prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.stocks.discovery import ark_view
@pytest.fixture(scope="module")
def vcr_config():
return {
"filter_headers": [("User-Agent", None)],
"filter_query_parameters": [
("period1", "1598220000"),
("period2", "1635980400"),
],
}
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"val",
["Buy", "Sell", "Mocked Value"],
)
def test_lambda_direction_color_red_green(val, recorder):
result_txt = ark_view.lambda_direction_color_red_green(val=val)
recorder.capture(result_txt)
@pytest.mark.vcr
@pytest.mark.record_stdout
@pytest.mark.parametrize(
"kwargs_dict, use_color",
[
({"num": 2}, True),
({"num": 2}, False),
({"num": 2, "sort_col": "open"}, False),
({"num": 2, "buys_only": True}, False),
({"num": 2, "sells_only": True}, False),
({"num": 2, "fund": "ARKK"}, False),
],
)
def test_ark_orders_view(kwargs_dict, mocker, use_color):
yf_download = ark_view.ark_model.yf.download
def mock_yf_download(*args, **kwargs):
kwargs["threads"] = False
return yf_download(*args, **kwargs)
mocker.patch("yfinance.download", side_effect=mock_yf_download)
mocker.patch.object(
target=ark_view.rich_config, attribute="USE_COLOR", new=use_color
)
ark_view.ark_orders_view(**kwargs_dict)
@pytest.mark.vcr(record_mode="none")
@pytest.mark.record_stdout
def test_ark_orders_view_empty_df(mocker):
mocker.patch(
"openbb_terminal.stocks.discovery.ark_view.ark_model.get_ark_orders",
return_value= | pd.DataFrame() | pandas.DataFrame |
##### file path
# input
path_df_D = "tianchi_fresh_comp_train_user.csv"
path_df_part_1 = "df_part_1.csv"
path_df_part_2 = "df_part_2.csv"
path_df_part_3 = "df_part_3.csv"
path_df_part_1_tar = "df_part_1_tar.csv"
path_df_part_2_tar = "df_part_2_tar.csv"
path_df_part_1_uic_label = "df_part_1_uic_label.csv"
path_df_part_2_uic_label = "df_part_2_uic_label.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# output
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_C = "df_part_1_C.csv"
path_df_part_1_IC = "df_part_1_IC.csv"
path_df_part_1_UI = "df_part_1_UI.csv"
path_df_part_1_UC = "df_part_1_UC.csv"
path_df_part_2_U = "df_part_2_U.csv"
path_df_part_2_I = "df_part_2_I.csv"
path_df_part_2_C = "df_part_2_C.csv"
path_df_part_2_IC = "df_part_2_IC.csv"
path_df_part_2_UI = "df_part_2_UI.csv"
path_df_part_2_UC = "df_part_2_UC.csv"
path_df_part_3_U = "df_part_3_U.csv"
path_df_part_3_I = "df_part_3_I.csv"
path_df_part_3_C = "df_part_3_C.csv"
path_df_part_3_IC = "df_part_3_IC.csv"
path_df_part_3_UI = "df_part_3_UI.csv"
path_df_part_3_UC = "df_part_3_UC.csv"
import pandas as pd
import numpy as np
##========================================================##
##======================== Part 3 ========================##
##========================================================##
###########################################
'''Step 1.1 feature data set U of df_part_3
(1)
u_b1_count_in_6
u_b2_count_in_6
u_b3_count_in_6
u_b4_count_in_6
u_b_count_in_6
(2)
u_b1_count_in_3
u_b2_count_in_3
u_b3_count_in_3
u_b4_count_in_3
u_b_count_in_3
(2)
u_b1_count_in_1
u_b2_count_in_1
u_b3_count_in_1
u_b4_count_in_1
u_b_count_in_1
(3)
u_b4_rate (in_6)
u_b4_diff_hours (in_6)
'''
# loading data
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# u_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['user_id', 'behavior_type']).cumcount()
df_part_3_u_b_count_in_6 = df_part_3.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_3_u_b_count_in_6 = pd.get_dummies(df_part_3_u_b_count_in_6['behavior_type']).join(
df_part_3_u_b_count_in_6[['user_id', 'cumcount']])
df_part_3_u_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_u_b_count_in_6['u_b1_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_1'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6['u_b2_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_2'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6['u_b3_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_3'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6['u_b4_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_4'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6 = df_part_3_u_b_count_in_6.groupby('user_id').agg({'u_b1_count_in_6': np.sum,
'u_b2_count_in_6': np.sum,
'u_b3_count_in_6': np.sum,
'u_b4_count_in_6': np.sum})
df_part_3_u_b_count_in_6.reset_index(inplace=True)
df_part_3_u_b_count_in_6['u_b_count_in_6'] = df_part_3_u_b_count_in_6[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6']].apply(lambda x: x.sum(),
axis=1)
# u_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['user_id', 'behavior_type']).cumcount()
df_part_3_u_b_count_in_3 = df_part_3.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_3_u_b_count_in_3 = pd.get_dummies(df_part_3_u_b_count_in_3['behavior_type']).join(
df_part_3_u_b_count_in_3[['user_id', 'cumcount']])
df_part_3_u_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_u_b_count_in_3['u_b1_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_1'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3['u_b2_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_2'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3['u_b3_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_3'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3['u_b4_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_4'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3 = df_part_3_u_b_count_in_3.groupby('user_id').agg({'u_b1_count_in_3': np.sum,
'u_b2_count_in_3': np.sum,
'u_b3_count_in_3': np.sum,
'u_b4_count_in_3': np.sum})
df_part_3_u_b_count_in_3.reset_index(inplace=True)
df_part_3_u_b_count_in_3['u_b_count_in_3'] = df_part_3_u_b_count_in_3[['u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3']].apply(lambda x: x.sum(),
axis=1)
# u_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['user_id', 'behavior_type']).cumcount()
df_part_3_u_b_count_in_1 = df_part_3_in_1.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_3_u_b_count_in_1 = pd.get_dummies(df_part_3_u_b_count_in_1['behavior_type']).join(
df_part_3_u_b_count_in_1[['user_id', 'cumcount']])
df_part_3_u_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_u_b_count_in_1['u_b1_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_1'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1['u_b2_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_2'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1['u_b3_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_3'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1['u_b4_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_4'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1 = df_part_3_u_b_count_in_1.groupby('user_id').agg({'u_b1_count_in_1': np.sum,
'u_b2_count_in_1': np.sum,
'u_b3_count_in_1': np.sum,
'u_b4_count_in_1': np.sum})
df_part_3_u_b_count_in_1.reset_index(inplace=True)
df_part_3_u_b_count_in_1['u_b_count_in_1'] = df_part_3_u_b_count_in_1[['u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1']].apply(lambda x: x.sum(),
axis=1)
# merge the result of count_in_6, count_in_3, count_in_1
df_part_3_u_b_count = pd.merge(df_part_3_u_b_count_in_6,
df_part_3_u_b_count_in_3, on=['user_id'], how='left').fillna(0)
df_part_3_u_b_count = pd.merge(df_part_3_u_b_count,
df_part_3_u_b_count_in_1, on=['user_id'], how='left').fillna(0)
df_part_3_u_b_count[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1']] = df_part_3_u_b_count[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1']].astype(int)
# u_b4_rate
df_part_3_u_b_count['u_b4_rate'] = df_part_3_u_b_count['u_b4_count_in_6'] / df_part_3_u_b_count['u_b_count_in_6']
# u_b4_diff_time
df_part_3 = df_part_3.sort_values(by=['user_id', 'time'])
df_part_3_u_b4_time = df_part_3[df_part_3['behavior_type'] == 4].drop_duplicates(['user_id'], 'first')[
['user_id', 'time']]
df_part_3_u_b4_time.columns = ['user_id', 'b4_first_time']
df_part_3_u_b_time = df_part_3.drop_duplicates(['user_id'], 'first')[['user_id', 'time']]
df_part_3_u_b_time.columns = ['user_id', 'b_first_time']
df_part_3_u_b_b4_time = pd.merge(df_part_3_u_b_time, df_part_3_u_b4_time, on=['user_id'])
df_part_3_u_b_b4_time['u_b4_diff_time'] = df_part_3_u_b_b4_time['b4_first_time'] - df_part_3_u_b_b4_time['b_first_time']
df_part_3_u_b_b4_time = df_part_3_u_b_b4_time[['user_id', 'u_b4_diff_time']]
df_part_3_u_b_b4_time['u_b4_diff_hours'] = df_part_3_u_b_b4_time['u_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
# generating feature set U
f_U_part_3 = pd.merge(df_part_3_u_b_count,
df_part_3_u_b_b4_time,
on=['user_id'], how='left')[['user_id',
'u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1',
'u_b4_rate',
'u_b4_diff_hours']]
# write to csv file
f_U_part_3 = f_U_part_3.round({'u_b4_rate': 3})
f_U_part_3.to_csv(path_df_part_3_U, index=False)
###########################################
'''Step 1.2 feature data set I of df_part_3
(1)
i_u_count_in_6
i_u_count_in_3
i_u_count_in_1
(2)
i_b1_count_in_6
i_b2_count_in_6
i_b3_count_in_6
i_b4_count_in_6
i_b_count_in_6
i_b1_count_in_3
i_b2_count_in_3
i_b3_count_in_3
i_b4_count_in_3
i_b_count_in_3
i_b1_count_in_1
i_b2_count_in_1
i_b3_count_in_1
i_b4_count_in_1
i_b_count_in_1
(3)
i_b4_rate (in_6)
i_b4_diff_hours (in_6)
'''
# loading data
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# i_u_count_in_6
df_part_3_in_6 = df_part_3.drop_duplicates(['item_id', 'user_id'])
df_part_3_in_6['i_u_count_in_6'] = df_part_3_in_6.groupby('item_id').cumcount() + 1
df_part_3_i_u_count_in_6 = df_part_3_in_6.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_6']]
# i_u_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')].drop_duplicates(['item_id', 'user_id'])
df_part_3_in_3['i_u_count_in_3'] = df_part_3_in_3.groupby('item_id').cumcount() + 1
df_part_3_i_u_count_in_3 = df_part_3_in_3.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_3']]
# i_u_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')].drop_duplicates(['item_id', 'user_id'])
df_part_3_in_1['i_u_count_in_1'] = df_part_3_in_1.groupby('item_id').cumcount() + 1
df_part_3_i_u_count_in_1 = df_part_3_in_1.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_1']]
# merge for generation of i_u_count
df_part_3_i_u_count = pd.merge(df_part_3_i_u_count_in_6,
df_part_3_i_u_count_in_3,
on=['item_id'], how='left').fillna(0)
df_part_3_i_u_count = pd.merge(df_part_3_i_u_count,
df_part_3_i_u_count_in_1,
on=['item_id'], how='left').fillna(0)
df_part_3_i_u_count[['i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1']] = df_part_3_i_u_count[['i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1']].astype(int)
# i_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['item_id', 'behavior_type']).cumcount()
df_part_3_i_b_count_in_6 = df_part_3.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_3_i_b_count_in_6 = pd.get_dummies(df_part_3_i_b_count_in_6['behavior_type']).join(
df_part_3_i_b_count_in_6[['item_id', 'cumcount']])
df_part_3_i_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_i_b_count_in_6['i_b1_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_1'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6['i_b2_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_2'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6['i_b3_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_3'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6['i_b4_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_4'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6 = df_part_3_i_b_count_in_6[['item_id',
'i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6']]
df_part_3_i_b_count_in_6 = df_part_3_i_b_count_in_6.groupby('item_id').agg({'i_b1_count_in_6': np.sum,
'i_b2_count_in_6': np.sum,
'i_b3_count_in_6': np.sum,
'i_b4_count_in_6': np.sum})
df_part_3_i_b_count_in_6.reset_index(inplace=True)
df_part_3_i_b_count_in_6['i_b_count_in_6'] = df_part_3_i_b_count_in_6['i_b1_count_in_6'] + \
df_part_3_i_b_count_in_6['i_b2_count_in_6'] + \
df_part_3_i_b_count_in_6['i_b3_count_in_6'] + \
df_part_3_i_b_count_in_6['i_b4_count_in_6']
# i_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['item_id', 'behavior_type']).cumcount()
df_part_3_i_b_count_in_3 = df_part_3.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_3_i_b_count_in_3 = pd.get_dummies(df_part_3_i_b_count_in_3['behavior_type']).join(
df_part_3_i_b_count_in_3[['item_id', 'cumcount']])
df_part_3_i_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_i_b_count_in_3['i_b1_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_1'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3['i_b2_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_2'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3['i_b3_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_3'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3['i_b4_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_4'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3 = df_part_3_i_b_count_in_3[['item_id',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3']]
df_part_3_i_b_count_in_3 = df_part_3_i_b_count_in_3.groupby('item_id').agg({'i_b1_count_in_3': np.sum,
'i_b2_count_in_3': np.sum,
'i_b3_count_in_3': np.sum,
'i_b4_count_in_3': np.sum})
df_part_3_i_b_count_in_3.reset_index(inplace=True)
df_part_3_i_b_count_in_3['i_b_count_in_3'] = df_part_3_i_b_count_in_3['i_b1_count_in_3'] + \
df_part_3_i_b_count_in_3['i_b2_count_in_3'] + \
df_part_3_i_b_count_in_3['i_b3_count_in_3'] + \
df_part_3_i_b_count_in_3['i_b4_count_in_3']
# i_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['item_id', 'behavior_type']).cumcount()
df_part_3_i_b_count_in_1 = df_part_3_in_1.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_3_i_b_count_in_1 = pd.get_dummies(df_part_3_i_b_count_in_1['behavior_type']).join(
df_part_3_i_b_count_in_1[['item_id', 'cumcount']])
df_part_3_i_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_i_b_count_in_1['i_b1_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_1'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1['i_b2_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_2'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1['i_b3_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_3'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1['i_b4_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_4'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1 = df_part_3_i_b_count_in_1[['item_id',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1']]
df_part_3_i_b_count_in_1 = df_part_3_i_b_count_in_1.groupby('item_id').agg({'i_b1_count_in_1': np.sum,
'i_b2_count_in_1': np.sum,
'i_b3_count_in_1': np.sum,
'i_b4_count_in_1': np.sum})
df_part_3_i_b_count_in_1.reset_index(inplace=True)
df_part_3_i_b_count_in_1['i_b_count_in_1'] = df_part_3_i_b_count_in_1['i_b1_count_in_1'] + \
df_part_3_i_b_count_in_1['i_b2_count_in_1'] + \
df_part_3_i_b_count_in_1['i_b3_count_in_1'] + \
df_part_3_i_b_count_in_1['i_b4_count_in_1']
# merge for generation of i_b_count
df_part_3_i_b_count = pd.merge(df_part_3_i_b_count_in_6,
df_part_3_i_b_count_in_3,
on=['item_id'], how='left').fillna(0)
df_part_3_i_b_count = pd.merge(df_part_3_i_b_count,
df_part_3_i_b_count_in_1,
on=['item_id'], how='left').fillna(0)
df_part_3_i_b_count[['i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1']] = df_part_3_i_b_count[['i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1']].astype(int)
# i_b4_rate
df_part_3_i_b_count['i_b4_rate'] = df_part_3_i_b_count['i_b4_count_in_6'] / df_part_3_i_b_count['i_b_count_in_6']
# i_b4_diff_time
df_part_3 = df_part_3.sort_values(by=['item_id', 'time'])
df_part_3_i_b4_time = df_part_3[df_part_3['behavior_type'] == 4].drop_duplicates(['item_id'], 'first')[
['item_id', 'time']]
df_part_3_i_b4_time.columns = ['item_id', 'b4_first_time']
df_part_3_i_b_time = df_part_3.drop_duplicates(['item_id'], 'first')[['item_id', 'time']]
df_part_3_i_b_time.columns = ['item_id', 'b_first_time']
df_part_3_i_b_b4_time = pd.merge(df_part_3_i_b_time, df_part_3_i_b4_time, on=['item_id'])
df_part_3_i_b_b4_time['i_b4_diff_time'] = df_part_3_i_b_b4_time['b4_first_time'] - df_part_3_i_b_b4_time['b_first_time']
df_part_3_i_b_b4_time['i_b4_diff_hours'] = df_part_3_i_b_b4_time['i_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_i_b_b4_time = df_part_3_i_b_b4_time[['item_id', 'i_b4_diff_hours']]
# generating feature set I
f_I_part_3 = pd.merge(df_part_3_i_b_count,
df_part_3_i_b_b4_time,
on=['item_id'], how='left')
f_I_part_3 = pd.merge(f_I_part_3,
df_part_3_i_u_count,
on=['item_id'], how='left')[['item_id',
'i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1',
'i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1',
'i_b4_rate',
'i_b4_diff_hours']]
# write to csv file
f_I_part_3 = f_I_part_3.round({'i_b4_rate': 3})
f_I_part_3.to_csv(path_df_part_3_I, index=False)
###########################################
'''Step 1.3 feature data set C of df_part_3
(1)
c_u_count_in_6
c_u_count_in_3
c_u_count_in_1
(2)
c_b1_count_in_6
c_b2_count_in_6
c_b3_count_in_6
c_b4_count_in_6
c_b_count_in_6
c_b1_count_in_3
c_b2_count_in_3
c_b3_count_in_3
c_b4_count_in_3
c_b_count_in_3
c_b1_count_in_1
c_b2_count_in_1
c_b3_count_in_1
c_b4_count_in_1
c_b_count_in_1
(3)
c_b4_rate (in_6)
c_b4_diff_hours (in_6)
'''
# loading data
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# c_u_count_in_6
df_part_3_in_6 = df_part_3.drop_duplicates(['item_category', 'user_id'])
df_part_3_in_6['c_u_count_in_6'] = df_part_3_in_6.groupby('item_category').cumcount() + 1
df_part_3_c_u_count_in_6 = df_part_3_in_6.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_6']]
# c_u_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')].drop_duplicates(
['item_category', 'user_id'])
df_part_3_in_3['c_u_count_in_3'] = df_part_3_in_3.groupby('item_category').cumcount() + 1
df_part_3_c_u_count_in_3 = df_part_3_in_3.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_3']]
# c_u_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')].drop_duplicates(
['item_category', 'user_id'])
df_part_3_in_1['c_u_count_in_1'] = df_part_3_in_1.groupby('item_category').cumcount() + 1
df_part_3_c_u_count_in_1 = df_part_3_in_1.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_1']]
df_part_3_c_u_count = pd.merge(df_part_3_c_u_count_in_6, df_part_3_c_u_count_in_3, on=['item_category'],
how='left').fillna(0)
df_part_3_c_u_count = pd.merge(df_part_3_c_u_count, df_part_3_c_u_count_in_1, on=['item_category'], how='left').fillna(
0)
df_part_3_c_u_count[['c_u_count_in_6',
'c_u_count_in_3',
'c_u_count_in_1']] = df_part_3_c_u_count[['c_u_count_in_6',
'c_u_count_in_3',
'c_u_count_in_1']].astype(int)
# c_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['item_category', 'behavior_type']).cumcount()
df_part_3_c_b_count_in_6 = df_part_3.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_3_c_b_count_in_6 = | pd.get_dummies(df_part_3_c_b_count_in_6['behavior_type']) | pandas.get_dummies |
# coding: utf-8
# # Content
# __1. Exploratory Visualization__
# __2. Data Cleaning__
# __3. Feature Engineering__
# __4. Modeling & Evaluation__
# __5. Ensemble Methods__
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
get_ipython().run_line_magic('matplotlib', 'inline')
plt.style.use('ggplot')
# In[2]:
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import RobustScaler, StandardScaler
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import Pipeline, make_pipeline
from scipy.stats import skew
from sklearn.decomposition import PCA, KernelPCA
from sklearn.preprocessing import Imputer
# In[3]:
from sklearn.model_selection import cross_val_score, GridSearchCV, KFold
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.linear_model import ElasticNet, SGDRegressor, BayesianRidge
from sklearn.kernel_ridge import KernelRidge
from xgboost import XGBRegressor
# In[4]:
pd.set_option('max_colwidth',200)
pd.set_option('display.width',200)
pd.set_option('display.max_columns',500)
pd.set_option('display.max_rows',1000)
# In[7]:
train=pd.read_csv('E:/Workspace/HousePrices/train.csv')
test=pd.read_csv('E:/Workspace/HousePrices/test.csv')
# In[8]:
# train = pd.read_csv('../input/train.csv')
# test = pd.read_csv('../input/test.csv')
# # Exploratory Visualization
# + __It seems that the price of recent-built houses are higher. So later I 'll use labelencoder for three "Year" feature.__
# In[9]:
plt.figure(figsize=(15,8))
sns.boxplot(train.YearBuilt, train.SalePrice)
# + __As is discussed in other kernels, the bottom right two two points with extremely large GrLivArea are likely to be outliers. So we delete them.__
# In[10]:
plt.figure(figsize=(12,6))
plt.scatter(x=train.GrLivArea, y=train.SalePrice)
plt.xlabel("GrLivArea", fontsize=13)
plt.ylabel("SalePrice", fontsize=13)
plt.ylim(0,800000)
# In[11]:
train.drop(train[(train["GrLivArea"]>4000)&(train["SalePrice"]<300000)].index,inplace=True)
# In[12]:
full=pd.concat([train,test], ignore_index=True)
# In[13]:
full.drop(['Id'],axis=1, inplace=True)
full.shape
# # Data Cleaning
# ### Missing Data
# In[14]:
aa = full.isnull().sum()
aa[aa>0].sort_values(ascending=False)
# + __Let's first imput the missing values of LotFrontage based on the median of LotArea and Neighborhood. Since LotArea is a continuous feature, We use qcut to divide it into 10 parts.__
# In[15]:
full.groupby(['Neighborhood'])[['LotFrontage']].agg(['mean','median','count'])
# In[16]:
full["LotAreaCut"] = pd.qcut(full.LotArea,10)
# In[17]:
full.groupby(['LotAreaCut'])[['LotFrontage']].agg(['mean','median','count'])
# In[18]:
full['LotFrontage']=full.groupby(['LotAreaCut','Neighborhood'])['LotFrontage'].transform(lambda x: x.fillna(x.median()))
# In[19]:
# Since some combinations of LotArea and Neighborhood are not available, so we just LotAreaCut alone.
full['LotFrontage']=full.groupby(['LotAreaCut'])['LotFrontage'].transform(lambda x: x.fillna(x.median()))
# + __Then we filling in other missing values according to data_description.__
# In[20]:
cols=["MasVnrArea", "BsmtUnfSF", "TotalBsmtSF", "GarageCars", "BsmtFinSF2", "BsmtFinSF1", "GarageArea"]
for col in cols:
full[col].fillna(0, inplace=True)
# In[21]:
cols1 = ["PoolQC" , "MiscFeature", "Alley", "Fence", "FireplaceQu", "GarageQual", "GarageCond", "GarageFinish", "GarageYrBlt", "GarageType", "BsmtExposure", "BsmtCond", "BsmtQual", "BsmtFinType2", "BsmtFinType1", "MasVnrType"]
for col in cols1:
full[col].fillna("None", inplace=True)
# In[22]:
# fill in with mode
cols2 = ["MSZoning", "BsmtFullBath", "BsmtHalfBath", "Utilities", "Functional", "Electrical", "KitchenQual", "SaleType","Exterior1st", "Exterior2nd"]
for col in cols2:
full[col].fillna(full[col].mode()[0], inplace=True)
# + __And there is no missing data except for the value we want to predict !__
# In[23]:
full.isnull().sum()[full.isnull().sum()>0]
# # Feature Engineering
# + __Convert some numerical features into categorical features. It's better to use LabelEncoder and get_dummies for these features.__
# In[24]:
NumStr = ["MSSubClass","BsmtFullBath","BsmtHalfBath","HalfBath","BedroomAbvGr","KitchenAbvGr","MoSold","YrSold","YearBuilt","YearRemodAdd","LowQualFinSF","GarageYrBlt"]
for col in NumStr:
full[col]=full[col].astype(str)
# + __Now I want to do a long list of value-mapping. __
# + __I was influenced by the insight that we should build as many features as possible and trust the model to choose the right features. So I decided to groupby SalePrice according to one feature and sort it based on mean and median. Here is an example:__
# In[25]:
full.groupby(['MSSubClass'])[['SalePrice']].agg(['mean','median','count'])
# + __So basically I'll do__
# '180' : 1
# '30' : 2 '45' : 2
# '190' : 3, '50' : 3, '90' : 3,
# '85' : 4, '40' : 4, '160' : 4
# '70' : 5, '20' : 5, '75' : 5, '80' : 5, '150' : 5
# '120': 6, '60' : 6
# + __Different people may have different views on how to map these values, so just follow your instinct =^_^=__
# __Below I also add a small "o" in front of the features so as to keep the original features to use get_dummies in a moment.__
# In[26]:
def map_values():
full["oMSSubClass"] = full.MSSubClass.map({'180':1,
'30':2, '45':2,
'190':3, '50':3, '90':3,
'85':4, '40':4, '160':4,
'70':5, '20':5, '75':5, '80':5, '150':5,
'120': 6, '60':6})
full["oMSZoning"] = full.MSZoning.map({'C (all)':1, 'RH':2, 'RM':2, 'RL':3, 'FV':4})
full["oNeighborhood"] = full.Neighborhood.map({'MeadowV':1,
'IDOTRR':2, 'BrDale':2,
'OldTown':3, 'Edwards':3, 'BrkSide':3,
'Sawyer':4, 'Blueste':4, 'SWISU':4, 'NAmes':4,
'NPkVill':5, 'Mitchel':5,
'SawyerW':6, 'Gilbert':6, 'NWAmes':6,
'Blmngtn':7, 'CollgCr':7, 'ClearCr':7, 'Crawfor':7,
'Veenker':8, 'Somerst':8, 'Timber':8,
'StoneBr':9,
'NoRidge':10, 'NridgHt':10})
full["oCondition1"] = full.Condition1.map({'Artery':1,
'Feedr':2, 'RRAe':2,
'Norm':3, 'RRAn':3,
'PosN':4, 'RRNe':4,
'PosA':5 ,'RRNn':5})
full["oBldgType"] = full.BldgType.map({'2fmCon':1, 'Duplex':1, 'Twnhs':1, '1Fam':2, 'TwnhsE':2})
full["oHouseStyle"] = full.HouseStyle.map({'1.5Unf':1,
'1.5Fin':2, '2.5Unf':2, 'SFoyer':2,
'1Story':3, 'SLvl':3,
'2Story':4, '2.5Fin':4})
full["oExterior1st"] = full.Exterior1st.map({'BrkComm':1,
'AsphShn':2, 'CBlock':2, 'AsbShng':2,
'WdShing':3, 'Wd Sdng':3, 'MetalSd':3, 'Stucco':3, 'HdBoard':3,
'BrkFace':4, 'Plywood':4,
'VinylSd':5,
'CemntBd':6,
'Stone':7, 'ImStucc':7})
full["oMasVnrType"] = full.MasVnrType.map({'BrkCmn':1, 'None':1, 'BrkFace':2, 'Stone':3})
full["oExterQual"] = full.ExterQual.map({'Fa':1, 'TA':2, 'Gd':3, 'Ex':4})
full["oFoundation"] = full.Foundation.map({'Slab':1,
'BrkTil':2, 'CBlock':2, 'Stone':2,
'Wood':3, 'PConc':4})
full["oBsmtQual"] = full.BsmtQual.map({'Fa':2, 'None':1, 'TA':3, 'Gd':4, 'Ex':5})
full["oBsmtExposure"] = full.BsmtExposure.map({'None':1, 'No':2, 'Av':3, 'Mn':3, 'Gd':4})
full["oHeating"] = full.Heating.map({'Floor':1, 'Grav':1, 'Wall':2, 'OthW':3, 'GasW':4, 'GasA':5})
full["oHeatingQC"] = full.HeatingQC.map({'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})
full["oKitchenQual"] = full.KitchenQual.map({'Fa':1, 'TA':2, 'Gd':3, 'Ex':4})
full["oFunctional"] = full.Functional.map({'Maj2':1, 'Maj1':2, 'Min1':2, 'Min2':2, 'Mod':2, 'Sev':2, 'Typ':3})
full["oFireplaceQu"] = full.FireplaceQu.map({'None':1, 'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})
full["oGarageType"] = full.GarageType.map({'CarPort':1, 'None':1,
'Detchd':2,
'2Types':3, 'Basment':3,
'Attchd':4, 'BuiltIn':5})
full["oGarageFinish"] = full.GarageFinish.map({'None':1, 'Unf':2, 'RFn':3, 'Fin':4})
full["oPavedDrive"] = full.PavedDrive.map({'N':1, 'P':2, 'Y':3})
full["oSaleType"] = full.SaleType.map({'COD':1, 'ConLD':1, 'ConLI':1, 'ConLw':1, 'Oth':1, 'WD':1,
'CWD':2, 'Con':3, 'New':3})
full["oSaleCondition"] = full.SaleCondition.map({'AdjLand':1, 'Abnorml':2, 'Alloca':2, 'Family':2, 'Normal':3, 'Partial':4})
return "Done!"
# In[27]:
map_values()
# In[28]:
# drop two unwanted columns
full.drop("LotAreaCut",axis=1,inplace=True)
full.drop(['SalePrice'],axis=1,inplace=True)
# ## Pipeline
# + __Next we can build a pipeline. It's convenient to experiment different feature combinations once you've got a pipeline.__
# + __Label Encoding three "Year" features.__
# In[29]:
class labelenc(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self,X,y=None):
return self
def transform(self,X):
lab=LabelEncoder()
X["YearBuilt"] = lab.fit_transform(X["YearBuilt"])
X["YearRemodAdd"] = lab.fit_transform(X["YearRemodAdd"])
X["GarageYrBlt"] = lab.fit_transform(X["GarageYrBlt"])
return X
# + __Apply log1p to the skewed features, then get_dummies.__
# In[30]:
class skew_dummies(BaseEstimator, TransformerMixin):
def __init__(self,skew=0.5):
self.skew = skew
def fit(self,X,y=None):
return self
def transform(self,X):
X_numeric=X.select_dtypes(exclude=["object"])
skewness = X_numeric.apply(lambda x: skew(x))
skewness_features = skewness[abs(skewness) >= self.skew].index
X[skewness_features] = np.log1p(X[skewness_features])
X = pd.get_dummies(X)
return X
# In[31]:
# build pipeline
pipe = Pipeline([
('labenc', labelenc()),
('skew_dummies', skew_dummies(skew=1)),
])
# In[32]:
# save the original data for later use
full2 = full.copy()
# In[33]:
data_pipe = pipe.fit_transform(full2)
# In[34]:
data_pipe.shape
# In[35]:
data_pipe.head()
# + __use robustscaler since maybe there are other outliers.__
# In[36]:
scaler = RobustScaler()
# In[37]:
n_train=train.shape[0]
X = data_pipe[:n_train]
test_X = data_pipe[n_train:]
y= train.SalePrice
X_scaled = scaler.fit(X).transform(X)
y_log = np.log(train.SalePrice)
test_X_scaled = scaler.transform(test_X)
# ## Feature Selection
# + __I have to confess, the feature engineering above is not enough, so we need more.__
# + __Combining different features is usually a good way, but we have no idea what features should we choose. Luckily there are some models that can provide feature selection, here I use Lasso, but you are free to choose Ridge, RandomForest or GradientBoostingTree.__
# In[38]:
lasso=Lasso(alpha=0.001)
lasso.fit(X_scaled,y_log)
# In[39]:
FI_lasso = pd.DataFrame({"Feature Importance":lasso.coef_}, index=data_pipe.columns)
# In[40]:
FI_lasso.sort_values("Feature Importance",ascending=False)
# In[41]:
FI_lasso[FI_lasso["Feature Importance"]!=0].sort_values("Feature Importance").plot(kind="barh",figsize=(15,25))
plt.xticks(rotation=90)
plt.show()
# + __Based on the "Feature Importance" plot and other try-and-error, I decided to add some features to the pipeline.__
# In[42]:
class add_feature(BaseEstimator, TransformerMixin):
def __init__(self,additional=1):
self.additional = additional
def fit(self,X,y=None):
return self
def transform(self,X):
if self.additional==1:
X["TotalHouse"] = X["TotalBsmtSF"] + X["1stFlrSF"] + X["2ndFlrSF"]
X["TotalArea"] = X["TotalBsmtSF"] + X["1stFlrSF"] + X["2ndFlrSF"] + X["GarageArea"]
else:
X["TotalHouse"] = X["TotalBsmtSF"] + X["1stFlrSF"] + X["2ndFlrSF"]
X["TotalArea"] = X["TotalBsmtSF"] + X["1stFlrSF"] + X["2ndFlrSF"] + X["GarageArea"]
X["+_TotalHouse_OverallQual"] = X["TotalHouse"] * X["OverallQual"]
X["+_GrLivArea_OverallQual"] = X["GrLivArea"] * X["OverallQual"]
X["+_oMSZoning_TotalHouse"] = X["oMSZoning"] * X["TotalHouse"]
X["+_oMSZoning_OverallQual"] = X["oMSZoning"] + X["OverallQual"]
X["+_oMSZoning_YearBuilt"] = X["oMSZoning"] + X["YearBuilt"]
X["+_oNeighborhood_TotalHouse"] = X["oNeighborhood"] * X["TotalHouse"]
X["+_oNeighborhood_OverallQual"] = X["oNeighborhood"] + X["OverallQual"]
X["+_oNeighborhood_YearBuilt"] = X["oNeighborhood"] + X["YearBuilt"]
X["+_BsmtFinSF1_OverallQual"] = X["BsmtFinSF1"] * X["OverallQual"]
X["-_oFunctional_TotalHouse"] = X["oFunctional"] * X["TotalHouse"]
X["-_oFunctional_OverallQual"] = X["oFunctional"] + X["OverallQual"]
X["-_LotArea_OverallQual"] = X["LotArea"] * X["OverallQual"]
X["-_TotalHouse_LotArea"] = X["TotalHouse"] + X["LotArea"]
X["-_oCondition1_TotalHouse"] = X["oCondition1"] * X["TotalHouse"]
X["-_oCondition1_OverallQual"] = X["oCondition1"] + X["OverallQual"]
X["Bsmt"] = X["BsmtFinSF1"] + X["BsmtFinSF2"] + X["BsmtUnfSF"]
X["Rooms"] = X["FullBath"]+X["TotRmsAbvGrd"]
X["PorchArea"] = X["OpenPorchSF"]+X["EnclosedPorch"]+X["3SsnPorch"]+X["ScreenPorch"]
X["TotalPlace"] = X["TotalBsmtSF"] + X["1stFlrSF"] + X["2ndFlrSF"] + X["GarageArea"] + X["OpenPorchSF"]+X["EnclosedPorch"]+X["3SsnPorch"]+X["ScreenPorch"]
return X
# + __By using a pipeline, you can quickily experiment different feature combinations.__
# In[43]:
pipe = Pipeline([
('labenc', labelenc()),
('add_feature', add_feature(additional=2)),
('skew_dummies', skew_dummies(skew=1)),
])
# ## PCA
# + __Im my case, doing PCA is very important. It lets me gain a relatively big boost on leaderboard. At first I don't believe PCA can help me, but
# in retrospect, maybe the reason is that the features I built are highly correlated, and it leads to multicollinearity. PCA can decorrelate these features.__
# + __So I'll use approximately the same dimension in PCA as in the original data. Since the aim here is not deminsion reduction.__
# In[44]:
full_pipe = pipe.fit_transform(full)
# In[45]:
full_pipe.shape
# In[46]:
n_train=train.shape[0]
X = full_pipe[:n_train]
test_X = full_pipe[n_train:]
y= train.SalePrice
X_scaled = scaler.fit(X).transform(X)
y_log = np.log(train.SalePrice)
test_X_scaled = scaler.transform(test_X)
# In[47]:
pca = PCA(n_components=410)
# In[48]:
X_scaled=pca.fit_transform(X_scaled)
test_X_scaled = pca.transform(test_X_scaled)
# In[49]:
X_scaled.shape, test_X_scaled.shape
# # Modeling & Evaluation
# In[50]:
# define cross validation strategy
def rmse_cv(model,X,y):
rmse = np.sqrt(-cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=5))
return rmse
# + __We choose 13 models and use 5-folds cross-calidation to evaluate these models.__
# Models include:
#
# + LinearRegression
# + Ridge
# + Lasso
# + Random Forrest
# + Gradient Boosting Tree
# + Support Vector Regression
# + Linear Support Vector Regression
# + ElasticNet
# + Stochastic Gradient Descent
# + BayesianRidge
# + KernelRidge
# + ExtraTreesRegressor
# + XgBoost
# In[51]:
models = [LinearRegression(),Ridge(),Lasso(alpha=0.01,max_iter=10000),RandomForestRegressor(),GradientBoostingRegressor(),SVR(),LinearSVR(),
ElasticNet(alpha=0.001,max_iter=10000),SGDRegressor(max_iter=1000,tol=1e-3),BayesianRidge(),KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5),
ExtraTreesRegressor(),XGBRegressor()]
# In[113]:
names = ["LR", "Ridge", "Lasso", "RF", "GBR", "SVR", "LinSVR", "Ela","SGD","Bay","Ker","Extra","Xgb"]
for name, model in zip(names, models):
score = rmse_cv(model, X_scaled, y_log)
print("{}: {:.6f}, {:.4f}".format(name,score.mean(),score.std()))
# + __Next we do some hyperparameters tuning. First define a gridsearch method.__
# In[52]:
class grid():
def __init__(self,model):
self.model = model
def grid_get(self,X,y,param_grid):
grid_search = GridSearchCV(self.model,param_grid,cv=5, scoring="neg_mean_squared_error")
grid_search.fit(X,y)
print(grid_search.best_params_, np.sqrt(-grid_search.best_score_))
grid_search.cv_results_['mean_test_score'] = np.sqrt(-grid_search.cv_results_['mean_test_score'])
print( | pd.DataFrame(grid_search.cv_results_) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 12:22:30 2019
@author: nk7g14
Currently, this only queries objects found in the XMM-Newton Serendipitous
Source Catalog (XMMSSC) https://heasarc.gsfc.nasa.gov/W3Browse/xmm-newton/xmmssc.html
We hope to however extended it to all observations as would be found in the
master catalogue. #TODO
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def XMM_GetStartAndEndTimes(self):
start_time = np.array(self.XMM_OBS_LIST['TIME'])
end_time = np.array(self.XMM_OBS_LIST['END_TIME'])
start_end = pd.DataFrame()
start_end['START_TIME'] = start_time
start_end['END_TIME'] = end_time
return start_end
def XMM_GetFlux_PN(self):
pn_flux = pd.DataFrame()
pn1 = np.array(self.XMM_OBS_LIST['PN_1_FLUX'])
pn1_err = np.array(self.XMM_OBS_LIST['PN_1_FLUX_ERROR'])
pn2 = np.array(self.XMM_OBS_LIST['PN_2_FLUX'])
pn2_err = np.array(self.XMM_OBS_LIST['PN_2_FLUX_ERROR'])
pn3 = np.array(self.XMM_OBS_LIST['PN_3_FLUX'])
pn3_err = np.array(self.XMM_OBS_LIST['PN_3_FLUX_ERROR'])
pn4 = np.array(self.XMM_OBS_LIST['PN_4_FLUX'])
pn4_err = np.array(self.XMM_OBS_LIST['PN_4_FLUX_ERROR'])
pn5 = np.array(self.XMM_OBS_LIST['PN_5_FLUX'])
pn5_err = np.array(self.XMM_OBS_LIST['PN_5_FLUX_ERROR'])
pn8 = np.array(self.XMM_OBS_LIST['PN_8_FLUX'])
pn8_err = np.array(self.XMM_OBS_LIST['PN_8_FLUX_ERROR'])
pn9 = np.array(self.XMM_OBS_LIST['PN_9_FLUX'])
pn9_err = np.array(self.XMM_OBS_LIST['PN_9_FLUX_ERROR'])
pn_flux['PN_1_FLUX'] = pn1
pn_flux['PN_1_FLUX_ERROR'] = pn1_err
pn_flux['PN_2_FLUX'] = pn2
pn_flux['PN_2_FLUX_ERROR'] = pn2_err
pn_flux['PN_3_FLUX'] = pn3
pn_flux['PN_3_FLUX_ERROR'] = pn3_err
pn_flux['PN_4_FLUX'] = pn4
pn_flux['PN_4_FLUX_ERROR'] = pn4_err
pn_flux['PN_5_FLUX'] = pn5
pn_flux['PN_5_FLUX_ERROR'] = pn5_err
pn_flux['PN_8_FLUX'] = pn8
pn_flux['PN_8_FLUX_ERROR'] = pn8_err
pn_flux['PN_9_FLUX'] = pn9
pn_flux['PN_9_FLUX_ERROR'] = pn9_err
return pn_flux
def XMM_GetFlux_MOS1(self):
mos1_flux = pd.DataFrame()
mos1_1 = np.array(self.XMM_OBS_LIST['M1_1_FLUX'])
mos1_1_err = np.array(self.XMM_OBS_LIST['M1_1_FLUX_ERROR'])
mos1_2 = np.array(self.XMM_OBS_LIST['M1_2_FLUX'])
mos1_2_err = np.array(self.XMM_OBS_LIST['M1_2_FLUX_ERROR'])
mos1_3 = np.array(self.XMM_OBS_LIST['M1_3_FLUX'])
mos1_3_err = np.array(self.XMM_OBS_LIST['M1_3_FLUX_ERROR'])
mos1_4 = np.array(self.XMM_OBS_LIST['M1_4_FLUX'])
mos1_4_err = np.array(self.XMM_OBS_LIST['M1_4_FLUX_ERROR'])
mos1_5 = np.array(self.XMM_OBS_LIST['M1_5_FLUX'])
mos1_5_err = np.array(self.XMM_OBS_LIST['M1_5_FLUX_ERROR'])
mos1_8 = np.array(self.XMM_OBS_LIST['M1_8_FLUX'])
mos1_8_err = np.array(self.XMM_OBS_LIST['M1_8_FLUX_ERROR'])
mos1_9 = np.array(self.XMM_OBS_LIST['M1_9_FLUX'])
mos1_9_err = np.array(self.XMM_OBS_LIST['M1_9_FLUX_ERROR'])
mos1_flux['M1_1_FLUX'] = mos1_1
mos1_flux['M1_1_FLUX_ERROR'] = mos1_1_err
mos1_flux['M1_2_FLUX'] = mos1_2
mos1_flux['M1_2_FLUX_ERROR'] = mos1_2_err
mos1_flux['M1_3_FLUX'] = mos1_3
mos1_flux['M1_3_FLUX_ERROR'] = mos1_3_err
mos1_flux['M1_4_FLUX'] = mos1_4
mos1_flux['M1_4_FLUX_ERROR'] = mos1_4_err
mos1_flux['M1_5_FLUX'] = mos1_5
mos1_flux['M1_5_FLUX_ERROR'] = mos1_5_err
mos1_flux['M1_8_FLUX'] = mos1_8
mos1_flux['M1_8_FLUX_ERROR'] = mos1_8_err
mos1_flux['M1_9_FLUX'] = mos1_9
mos1_flux['M1_9_FLUX_ERROR'] = mos1_9_err
return mos1_flux
def XMM_GetFlux_MOS2(self):
mos2_flux = pd.DataFrame()
mos2_1 = np.array(self.XMM_OBS_LIST['M2_1_FLUX'])
mos2_1_err = np.array(self.XMM_OBS_LIST['M2_1_FLUX_ERROR'])
mos2_2 = np.array(self.XMM_OBS_LIST['M2_2_FLUX'])
mos2_2_err = np.array(self.XMM_OBS_LIST['M2_2_FLUX_ERROR'])
mos2_3 = np.array(self.XMM_OBS_LIST['M2_3_FLUX'])
mos2_3_err = np.array(self.XMM_OBS_LIST['M2_3_FLUX_ERROR'])
mos2_4 = np.array(self.XMM_OBS_LIST['M2_4_FLUX'])
mos2_4_err = np.array(self.XMM_OBS_LIST['M2_4_FLUX_ERROR'])
mos2_5 = np.array(self.XMM_OBS_LIST['M2_5_FLUX'])
mos2_5_err = np.array(self.XMM_OBS_LIST['M2_5_FLUX_ERROR'])
mos2_8 = np.array(self.XMM_OBS_LIST['M2_8_FLUX'])
mos2_8_err = np.array(self.XMM_OBS_LIST['M2_8_FLUX_ERROR'])
mos2_9 = np.array(self.XMM_OBS_LIST['M2_9_FLUX'])
mos2_9_err = np.array(self.XMM_OBS_LIST['M2_9_FLUX_ERROR'])
mos2_flux['M2_1_FLUX'] = mos2_1
mos2_flux['M2_1_FLUX_ERROR'] = mos2_1_err
mos2_flux['M2_2_FLUX'] = mos2_2
mos2_flux['M2_2_FLUX_ERROR'] = mos2_2_err
mos2_flux['M2_3_FLUX'] = mos2_3
mos2_flux['M2_3_FLUX_ERROR'] = mos2_3_err
mos2_flux['M2_4_FLUX'] = mos2_4
mos2_flux['M2_4_FLUX_ERROR'] = mos2_4_err
mos2_flux['M2_5_FLUX'] = mos2_5
mos2_flux['M2_5_FLUX_ERROR'] = mos2_5_err
mos2_flux['M2_8_FLUX'] = mos2_8
mos2_flux['M2_8_FLUX_ERROR'] = mos2_8_err
mos2_flux['M2_9_FLUX'] = mos2_9
mos2_flux['M2_9_FLUX_ERROR'] = mos2_9_err
return mos2_flux
def XMM_GetFlux(self):
'''
For this mission, the fluxes for XMM are given for the basic bands:
Flux band 1 = 0.2 - 0.5 keV (soft)
Flux band 2 = 0.5 - 1.0 keV (soft)
Flux band 3 = 1.0 - 2.0 keV (soft)
Flux band 4 = 2.0 - 4.5 keV (hard)
Flux band 5 = 4.5 - 12.0 keV (hard)
We also have the broad energy bands given by:
Flux band 6 = 0.2 - 2.0 keV (N/A)
Flux band 7 = 2.0 - 12.0 keV (N/A)
Flux band 8 = 0.2 - 12.0 keV
Flux band 9 = 0.5 - 4.5 keV
'''
flux_pn = self.XMM_GetFlux_PN()
flux_mos1 = self.XMM_GetFlux_MOS1()
flux_mos2 = self.XMM_GetFlux_MOS2()
mapping = [flux_pn, flux_mos1, flux_mos2]
flux_df = pd.concat(mapping, axis=1)
return flux_df
def XMM_GetLightcurve(self):
flux_df = self.XMM_GetFlux()
start_end = self.XMM_GetStartAndEndTimes()
lightcurve = | pd.concat((start_end, flux_df), axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 27 12:01:19 2021
@author: leila
"""
import numpy as np
import pandas as pd
import random
#import matplotlib.pyplot as plt
#import csv
import datetime
from sklearn.model_selection import train_test_split
#from sklearn.model_selection import KFold
#from sklearn.model_selection import cross_val_score
#from sklearn.model_selection import cross_validate
from sklearn.model_selection import train_test_split,cross_val_score, KFold
from sklearn.metrics import accuracy_score #,classification_report,confusion_matrix
from sklearn.ensemble import RandomForestClassifier
students = pd.read_csv('Ready.csv')
#students = pd.read_csv(r'/Users/leila/Desktop/HPO/ABC/Ready.csv')
X=students.drop('graduated',axis=1)
y=students['graduated']
cols =list(X.select_dtypes(include=['object']).columns)
cols_rest=list(X.select_dtypes(exclude=['object']).columns)
test0=students[cols]
test1=students[cols_rest]
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
Xtest = sc_X.fit_transform(test1)
Xtest= | pd.DataFrame(Xtest, columns=cols_rest) | pandas.DataFrame |
import pandas as pd
from lyrics_function import get_genres, get_missing_genres
from lyrics_function import get_song_lyrics
import pandas as pd
import os
import unicodedata
from tqdm import tqdm
GENIUS_API_TOKEN = '<KEY>'
#====================================#
# CLEANING & FORMATTIING FUNCTIONS #
#====================================#
def strip_accents(text):
'''replaces characters with accents with utf-8 characters'''
text = unicodedata.normalize('NFD', text)\
.encode('ascii', 'ignore')\
.decode("utf-8")
return text
def format_track(df, col='track', new_col_name="genius_track"):
'''formats the track columns to contain track names
in a genius API compliant format'''
# replaces accented character strings with utf-8 characters
df[new_col_name] = df[col].apply(strip_accents)
# removes character strings inside parentheses
df[new_col_name] = df[new_col_name].str.replace(r"\([^()]*\)", '')
# removes any string after a hyphen
df[new_col_name] = df[new_col_name].str.replace(r"\-.*", '')
# removes any punctuation
df[new_col_name] = df[new_col_name].str.replace(r'[^\w\s]', '')
# removes any spaces at the beginning or end of strings
df[new_col_name] = df[new_col_name].str.strip()
# only keeps the first letter in the string capitalized -- rest are lowercased
df[new_col_name] = df[new_col_name].str.capitalize()
return df
def format_artist(df, col='artist', new_col_name="genius_artist"):
'''formats the artist columns to contain only the first artists name
in a genius API compliant format'''
# replaces accented character strings with utf-8 characters
df[new_col_name] = df[col].apply(strip_accents)
# removes character strings inside parentheses
df[new_col_name] = df[new_col_name].str.replace(r"\([^()]*\)", '')
# removes any string after a hyphen
df[new_col_name] = df[new_col_name].str.replace(r"\-.*", '')
# splits into a list if there is more than one artist
df[new_col_name] = df[new_col_name].str.split(",").str[0]
df[new_col_name] = df[new_col_name].str.strip()
# removes any punctuation
df[new_col_name] = df[new_col_name].str.replace(r'[^\w\s]', '')
# only keeps the first letter in the string capitalized -- rest are lowercased
df[new_col_name] = df[new_col_name].str.capitalize()
return df
#====================================#
# Getting Lyrics from Top Songs #
#====================================#
# import from lyrics_functions file
#====================================#
# Putting It All Together #
#====================================#
# getting all of the weekly data into one big data frame
df = | pd.DataFrame() | pandas.DataFrame |
"""
Lasso_regulation_program
- train_data = 20대 총선 자료,
d = 더불어 민주당
s = 새누리당
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import font_manager, rc
from pandas import Series
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso, Ridge
# Make graph font English to Korean
font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name()
rc('font', family=font_name)
# Training & Test Data Load
train_data = pd.read_csv('./2016DTM7.csv')
# Na Data dop
train_data.dropna()
# Arrange Data Set
x = train_data.drop(['d'], axis=1)
X = train_data.drop(['s'], axis=1)
Y = train_data.loc[:, ['d']]
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.1) # Split Data Set
predictors = x_train.columns # Save tarin_X attributes
"""
Lasso Regression
lassoReg = Coefficient List
predictors = Attributes List
coef = DataFrame with Attributes & Coefficient
pre_coef = Except Zero Coefficient Value List
"""
lassoReg = Lasso(alpha=0.00000005) # Call Lasso Regression Function
lassoReg.fit(x_train, y_train) # Fit Data in Lasso function
coef = | Series(lassoReg.coef_, predictors) | pandas.Series |
# coding=utf-8
# Author: <NAME>
# Date: Jan 13, 2020
#
# Description: Reads all available gene information (network, FPKM, DGE, etc) and extracts features for ML.
#
#
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import networkx as nx
from utils import get_network_layer, ensurePathExists
import argparse
from itertools import product, chain
def ours_or_literature_phenotype(r):
if pd.notnull(r['Our DM pheno code']):
return r['Our DM pheno code']
elif pd.notnull(r['Others DM pheno code']):
return r['Others DM pheno code']
else:
return np.nan
def direct_or_indirect_phenotype(r):
if pd.notnull(r['direct-phenotype']):
return r['direct-phenotype']
elif pd.notnull(r['indirect-phenotype']):
return 'indirect'
else:
return np.nan
if __name__ == '__main__':
#
# Args
#
parser = argparse.ArgumentParser()
parser.add_argument("--celltype", default='spermatocyte', type=str, choices=['spermatocyte', 'enterocyte'], help="Cell type. Must be either 'spermatocyte' or 'enterocyte'. Defaults to spermatocyte")
parser.add_argument('--layer', default='DM', type=str, choices=['HS', 'MM', 'DM'], help="Layer/Species.")
args = parser.parse_args()
#
celltype = args.celltype # spermatocyte or enterocyte
layer = species = args.layer
layers = ['HS', 'MM', 'DM']
network = 'thr' # 'thr'
threshold = 0.5
threshold_str = str(threshold).replace('.', 'p')
#
#
print('Reading {celltype:s}-{network:s}-{threshold:s} Network'.format(celltype=celltype, network=network, threshold=threshold_str))
path_net = '../../04-network/results/network/{celltype:s}/'.format(celltype=celltype)
rGfile_gpickle = path_net + 'net-{celltype:s}-{network:s}-{threshold:s}.gpickle'.format(celltype=celltype, network=network, threshold=threshold_str)
G = nx.read_gpickle(rGfile_gpickle)
#
# Load Multilayer Graph - Extract Layer Graph
#
print('Extracting {layer:s} SubGraph'.format(layer=layer))
Gt = get_network_layer(G, layer)
#
# Backbone data
#
print('Reading backbone')
path_backbone = "../../04-network/results/network-closure/{celltype:s}/".format(celltype=celltype)
rBfile = path_backbone + "net-closure-{celltype:s}-{network:s}-{threshold:s}-{layer:s}.gpickle".format(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
B = nx.read_gpickle(rBfile)
is_metric = {(i, j) for i, j, d in B.edges(data=True) if d.get('is_metric') is True}
Bm = B.edge_subgraph(is_metric).copy()
is_ultrametric = {(i, j) for i, j, d in B.edges(data=True) if d.get('is_ultrametric') is True}
Bum = Bm.edge_subgraph(is_ultrametric).copy()
#
# (ortho)Backbone data
#
if celltype == 'spermatocyte':
print('Reading ortho-backbone')
path_ortho_backbone = "../../04-network/results/network-closure-ortho/{celltype:s}/".format(celltype=celltype)
rOfile = path_ortho_backbone + "net-closure-ortho-{celltype:s}-{network:s}-{threshold:s}-{layer:s}.gpickle".format(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
OB = nx.read_gpickle(rOfile)
is_metric_ortho = nx.get_edge_attributes(OB, name='is_metric_ortho')
nx.set_edge_attributes(Gt, name='is_metric_ortho', values=is_metric_ortho)
is_metric_ortho_string = 'is_metric_ortho' + ''.join(['-{other_layer:s}'.format(other_layer=other_layer) for other_layer in layers if other_layer != layer])
is_ortho_metric_edges = [(i, j) for i, j, d in OB.edges(data=True) if d.get('is_metric_ortho') == is_metric_ortho_string]
set_ortho_metric_nodes = set(list(chain(*is_ortho_metric_edges)))
is_ortho_metric_nodes = {n: n in set_ortho_metric_nodes for n in Gt.nodes()}
nx.set_node_attributes(Gt, name='is_metric_ortho', values=is_ortho_metric_nodes)
#
# Node data to DataFrame
#
df = pd.DataFrame.from_dict(dict(Gt.nodes(data=True)), orient='index')
#
# Load DGE
#
print('Load DEG data')
path_dge = '../../02-core_genes/results/DE/'
rfdeg = path_dge + '{species:s}-DE_genes.csv.gz'.format(celltype=celltype, species=species)
dfdeg = pd.read_csv(rfdeg, index_col=0)
#
dfdeg = dfdeg.loc[dfdeg.index.isin(df.index), :]
# Set DEG variables
if species == 'DM':
df['Middle_vs_Apical'] = dfdeg['Middle_vs_Apical']
df['Middle_vs_Apical'].fillna(False, inplace=True)
df['Basal_vs_Middle'] = dfdeg['Basal_vs_Middle']
df['Basal_vs_Middle'].fillna(False, inplace=True)
#
df['logFC_MiddleApical'] = dfdeg['logFC_MiddleApical']
df['logFC_MiddleApical'].fillna(0, inplace=True)
#
df['logFC_BasalMiddle'] = dfdeg['logFC_BasalMiddle']
df['logFC_BasalMiddle'].fillna(0, inplace=True)
else:
df['Cyte_vs_Gonia'] = dfdeg['Cyte_vs_Gonia']
df['Cyte_vs_Gonia'].fillna(False, inplace=True)
df['Tid_vs_Cyte'] = dfdeg['Tid_vs_Cyte']
df['Tid_vs_Cyte'].fillna(False, inplace=True)
#
df['logFC_CyteGonia'] = dfdeg['logFC_CyteGonia']
df['logFC_CyteGonia'].fillna(0, inplace=True)
#
df['logFC_TidCyte'] = dfdeg['logFC_TidCyte']
df['logFC_TidCyte'].fillna(0, inplace=True)
#
# Load mdlc-mutant DGE
#
rMDLCFile = '../../01-diff-gene-exp/results/mdlc/{layer:s}-DGE-mdlc_vs_control.csv'.format(layer=layer)
dfM = pd.read_csv(rMDLCFile, index_col=0, usecols=['id', 'gene', 'logFC', 'logCPM', 'F', 'PValue', 'FDR'])
# Filter only DGE significant
dfMs = dfM.loc[(dfM['logFC'].abs() > 1) & (dfM['FDR'] <= 0.05) & (dfM['logCPM'] >= 1), :].copy()
dfMs_up = dfMs.loc[(dfMs['logFC'] > 0), :]
dfMs_dw = dfMs.loc[(dfMs['logFC'] < 0), :]
def map_up_down(x):
if x in dfMs_up.index:
return 'up'
elif x in dfMs_dw.index:
return 'down'
else:
return 'no-change'
df['mdlc-mutant-up/down'] = df.index.map(map_up_down)
df['logFC_mdlc-mutant'] = dfM['logFC']
df['logFC_mdlc-mutant'].fillna(0, inplace=True)
#
# Load mdlc-mutant splicing-defects
#
print('Adding mdlc Splicing Defects results')
rMDLCFile = '../../01-diff-gene-exp/results/mdlc/{layer:s}-IntronRetention-mdlc_vs_control.csv'.format(layer=layer)
dfI = pd.read_csv(rMDLCFile, index_col=0, usecols=['id', 'gene'])
df['mdlc-mutant-splidef'] = df.index.map(lambda x: x in dfI.index)
#
# Load FPKM
#
print('Load FPKM data')
path_fpkm = '../../02-core_genes/results/FPKM/'
df_HS_fpkm = pd.read_csv(path_fpkm + 'HS/HS-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype))
df_MM_fpkm = pd.read_csv(path_fpkm + 'MM/MM-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype))
df_DM_fpkm = pd.read_csv(path_fpkm + 'DM/DM-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype))
if species == 'DM':
dffpkm = df_DM_fpkm.set_index('id_gene')
elif species == 'MM':
dffpkm = df_MM_fpkm.set_index('id_gene')
elif species == 'HS':
dffpkm = df_HS_fpkm.set_index('id_gene')
# Only only genes in network.
#dffpkm = dffpkm.loc[dffpkm.index.isin(df.index), :]
#
# Identify conserved genes
#
print('Identify Conserved Genes')
dict_string_gene_HS = df_HS_fpkm.set_index('id_string')['id_gene'].to_dict()
dict_string_gene_MM = df_MM_fpkm.set_index('id_string')['id_gene'].to_dict()
dict_string_gene_DM = df_DM_fpkm.set_index('id_string')['id_gene'].to_dict()
path_meta = '../../02-core_genes/results/meta-genes/'
dfM = pd.read_csv(path_meta + 'meta-{celltype:s}-genes.csv.gz'.format(celltype=celltype), index_col='id_eggnog', usecols=['id_eggnog', 'id_string_HS', 'id_string_MM', 'id_string_DM'])
dfM['id_string_HS'] = dfM['id_string_HS'].apply(lambda x: x.split(',') if not pd.isnull(x) else [])
dfM['id_string_MM'] = dfM['id_string_MM'].apply(lambda x: x.split(',') if not pd.isnull(x) else [])
dfM['id_string_DM'] = dfM['id_string_DM'].apply(lambda x: x.split(',') if not | pd.isnull(x) | pandas.isnull |
from argh import arg
import os
from functools import partial
import pandas as pd
from typing import List
import hashlib
from functools import partial
from tqdm import tqdm
tqdm.pandas()
def calculate_improvement(df, current_row):
ensemble_size = current_row["ensemble_size"]
image = current_row["image"]
clazz = current_row["class"]
if ensemble_size == 1:
return None
assert ensemble_size > 1
assert ensemble_size <= df["ensemble_size"].max()
previous_row = df[(df["image"] == image) & (df["class"] == clazz) &
(df["ensemble_size"] == ensemble_size - 1)]
assert len(previous_row) == 1
previous_value = previous_row["value"]
value = current_row["value"]
improvement = (value - previous_value)
return improvement.iloc[0]
@arg('job_hashes', nargs='+')
def combine_ensemble(base_dir: str, job_hashes: List[str]):
metrics_file = os.path.join(base_dir, "metrics.csv")
print("Loading {}".format(metrics_file))
metrics = pd.read_csv(metrics_file)
metrics = metrics.rename(columns={"job_hash": "ensemble_hash"})
metrics = metrics[metrics["ensemble_hash"] == job_hashes[0]]
metrics = metrics[[
"image", "class", "threshold", "metric", "value", "ensemble_hash"
]]
metrics["ensemble_size"] = 1
results = [metrics]
for idx in range(1, len(job_hashes)):
hashes = job_hashes[:idx + 1]
hashes.sort()
ensemble_hash = hashlib.md5("".join(hashes).encode()).hexdigest()
metrics_file = os.path.join(base_dir, "ensembles", ensemble_hash,
"metrics.csv")
print("Loading {}".format(metrics_file))
metrics = pd.read_csv(metrics_file)
metrics["ensemble_size"] = len(hashes)
metrics["ensemble_hash"] = ensemble_hash
results.append(metrics)
job_hashes.sort()
ensemble_hash = hashlib.md5("".join(job_hashes).encode()).hexdigest()
outfile = os.path.join(base_dir, "ensembles", ensemble_hash,
"combined_metrics.csv")
print("Writing {}".format(outfile))
df = | pd.concat(results) | pandas.concat |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index= | pd.Int64Index([0, 1], dtype='int64') | pandas.Int64Index |
import json
import random
from collections import OrderedDict, Counter
from itertools import groupby
import copy
import pandas as pd
from django.shortcuts import render
from django.db.models import Count
from django.db.models.functions import Concat
from django.http import JsonResponse
from django.core.exceptions import ObjectDoesNotExist
from .models import (BDB_senses, Source,
Target, Alignment, StrongsM2M,
Notes, Words, Collocations,
BDB_strongs, BDB, Dodson
)
# UTILS
HEBREW_ACCENTS = '''
\u0591
\u0592
\u0593
\u0594
\u0595
\u0596
\u0597
\u0598
\u0599
\u0592
\u059A
\u059B
\u059C
\u059D
\u059E
\u059F
\u05A0
\u05A1
\u05A2
\u05A3
\u05A4
\u05A5
\u05A6
\u05A7
\u05A8
\u05A9
\u05AA
\u05AB
\u05AC
\u05AD
\u05AE
\u05AF
'''.split()
HEBREW_VOWELS = '''
\u05B0
\u05B1
\u05B2
\u05B3
\u05B4
\u05B5
\u05B6
\u05B7
\u05B8
\u05B9
\u05BA
\u05BB
\u05C7
'''.split()
HEBREW_PUNKT = '''
\u05BC
\u05BD
\u05BF
\u05C0
\u05C3
\u05C4
\u05C5
\u05C6
\u05EF
\u05F3
\u05F4
'''.split()
HEBREW_MAQEF = '''
\u05BE
'''.split()
HEBREW_SIN_SHIN_DOTS = '''
\u05C1
\u05C2
'''.split()
HEBREW_CONSONANTS = '''
\u05D0
\u05D1
\u05D2
\u05D4
\u05D5
\u05D6
\u05D7
\u05D8
\u05D9
\u05DA
\u05DB
\u05DC
\u05DD
\u05DE
\u05DF
\u05E0
\u05E1
\u05E2
\u05E3
\u05E4
\u05E5
\u05E6
\u05E7
\u05E8
\u05E9
\u05EA
'''.split()
HEBREW_WITH_DAGESH_OR_DOT = '''
\uFB2A
\uFB2B
\uFB2C
\uFB2D
\uFB2E
\uFB32
\uFB2F
\uFB30
\uFB31
\uFB33
\uFB34
\uFB35
\uFB36
\uFB37
\uFB38
\uFB39
\uFB3A
\uFB3B
\uFB3C
\uFB3D
\uFB3E
\uFB3F
\uFB40
\uFB41
\uFB42
\uFB43
\uFB44
\uFB45
\uFB46
\uFB47
\uFB48
\uFB49
'''.split()
HEBREW_ADVANCED = '''
\uFB1D
\uFB1E
\uFB1F
\uFB20
\uFB21
\uFB22
\uFB23
\uFB24
\uFB25
\uFB26
\uFB27
\uFB28
\uFB29
\uFB4A
\uFB4B
\uFB4C
\uFB4D
\uFB4E
\uFB4F
'''.split()
COLOR_SCALE = {1:'darken-4',
2:'darken-3',
3:'darken-2',
4:'lighten-1',
5:'lighten-2',
6:'lighten-3',
7:'lighten-4',
8:'lighten-5',
9:'lighten-5',
10:'lighten-5',
}
for i in range(11,5000): COLOR_SCALE[i] = 'lighten-5'
ICON_SCALE = {
1:'looks_one',
2:'looks_two',
3:'looks_3',
4:'looks_4',
5:'looks_5',
6:'looks_6' }
for i in range(7,5000): ICON_SCALE[i] = ''
def get_font(entry_id):
if entry_id[0]=='A':
font='hb'
elif entry_id[0]=='H':
font='hb'
elif entry_id[0]=='G':
font='gk'
else:
font=None
return font
def expand_window(li, window=5):
'''
>>> expand_window([20])
{15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}
>>> expand_window([20])
{15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}
'''
output = []
for itm in li:
for i in range(-window,window+1):
output.append(itm+i)
return set(output)
def build_concordance(token_ids, window_tokens, highlights=[], window=5):
'''
>>> build_concordance([1,2], {1:'In', 2:'the', 3:'beginning'})
['<span class="hl">In</span>', '<span class="hl">the</span>', 'beginning']
>>> build_concordance([1,2], {1:'In', 2:'the', 3:'beginning'}, highlights=[3])
['<span class="hl">In</span>',
'<span class="hl">the</span>',
'<span class="hl">beginning</span>']
'''
concordance = []
for i in range(min(token_ids)-window, max(token_ids)+window+1):
try:
token = window_tokens[i]
if i in token_ids or i in highlights:
token = '<span id="' + str(i) + '" class="hl">' + token + '</span>'
if token: # some cases are None
concordance.append(token)
except:
continue
return concordance
def remove_accents(input_string):
for char in HEBREW_ACCENTS:
input_string = input_string.replace(char, '')
return input_string
# VIEWS
def discovery(request):
'''
Fake some vertical data
'''
pass
def verticalize(request):
'''
Add a vertical annotation
'''
pass
def view_forms(request, entry_id):
'''
The current solution is suboptimal. What is needed instead is an SQL
query that gets the frequency of each morph+token combination
as well as a list of its alignments.
'''
source = Source.objects.filter(strongs_no_prefix=entry_id).prefetch_related('target_set')
lemma = source[0].lemma
font = get_font(entry_id)
# alternative: /api/source/?book=&chapter=&verse=&strongs_no_prefix=&lemma=%D7%99%D6%B8%D7%9C%D6%B7%D7%93&token=&query={token,alignments{target_blocks}}
# forms = source.values('morph', 'token').annotate(frequency=Count(['morph'])).order_by('-frequency')
forms = set(source.values_list('id', 'morph', 'token', 'alignment__target_blocks'))
# Create a form because namedtuples are immutable
class Form():
def __init__(self, id, morph, token, alg):
self.id = id
self.morph = morph
self.token = token
self.alg = alg
forms = [Form(*itm) for itm in forms]
# change the formatting per instance
for row in forms:
try:
row.alg = row.alg.strip()
except:
pass
row.token = remove_accents(row.token.strip())
# compute the frequencies
frequencies = Counter((row.morph, row.token) for row in forms)
# add the alignments
output = {}
for itm,freq in frequencies.items():
morph, token = itm
algs = []
for row in forms:
if row.morph == morph and row.token == token and row.alg:
algs.append(row.alg)
output[(morph, token)] = (freq, ', '.join(set(algs)))
# sort the output
output = sorted(output.items(), key=lambda item: item[1], reverse=True)
return render(request, 'lexicon/view_forms.html', {'lemma':lemma,
'entry':entry_id,
'forms':output,
'font':font,
# 'target_blocks':target_blocks,
})
def view_resources(request, entry_id):
source = Source.objects.filter(strongs_no_prefix=entry_id)
lemma = source[0].lemma
words_ids = source.filter(words__isnull=False).distinct().values('words__id')
words = Words.objects.filter(pk__in=words_ids)
related_words = StrongsM2M.objects.filter(number=entry_id).values_list('related_number')
strongs = Source.objects.filter(strongs_no_prefix__in=related_words).values_list('strongs_no_prefix', 'lemma').distinct()
related_words = dict(strongs)
# prefetch related is essential to keep the number of queries small
notes = Notes.objects.filter(source__strongs_no_prefix=entry_id).prefetch_related('source', 'source__target_set')
font = get_font(entry_id)
return render(request, 'lexicon/view_resources.html', {'lemma':lemma,
'words':words,
'related_words':related_words,
'notes':notes,
'font':font,
'entry':entry_id})
def view_dictionary(request, entry_id):
lemma = Source.objects.filter(strongs_no_prefix=entry_id)[0].lemma
font = get_font(entry_id)
if entry_id.startswith('H'):
bdb_entries_ids = BDB_strongs.objects.filter(strongs=entry_id).values('bdb')
bdb_entries = BDB.objects.filter(bdb__in=bdb_entries_ids)
return render(request, 'lexicon/view_dictionary.html', {'bdb_entries': bdb_entries,
'entry':entry_id,
'lemma':lemma,
'font':font,})
if entry_id.startswith('G'):
dodson = Dodson.objects.filter(strongs=entry_id)
return render(request, 'lexicon/view_dictionary.html', {'dodson': dodson,
'entry':entry_id,
'lemma':lemma,
'font':font,})
def view_parsed_dictionary(request, entry_id):
lemma = Source.objects.filter(strongs_no_prefix=entry_id)[0].lemma
font = 'hb'
if entry_id.startswith('H'):
bdb_entries_ids = BDB_strongs.objects.filter(strongs=entry_id).values('bdb')
bdb_entries = BDB.objects.filter(bdb__in=bdb_entries_ids).prefetch_related('bdbsensetosource_set', 'bdbsensetosource_set__source')
else:
bdb_entries = ''
return render(request, 'lexicon/view_parsed_dictionary.html', {'bdb_entries': bdb_entries,
'entry':entry_id,
'lemma':lemma,
'font':font,})
def view_collocates(request, lemma):
try:
node = Collocations.objects.get(node=lemma)
lemma = node.node
collocates = json.loads(node.context.replace("'", '"'))
except ObjectDoesNotExist:
node = ''
collocates = ''
entry = Source.objects.filter(lemma=lemma).first().strongs_no_prefix
font = get_font(entry)
return render(request, 'lexicon/view_collocates.html', {'node':node,
'collocates': collocates,
'lemma':lemma,
'font':font,
'entry':entry,})
def view_cooccurrences(request, main_entry, sec_entry):
'''
Search for a strongs number AND another strongs number in its immediate vicinity
? when do you link to the alignment data?
This is only the SOURCE, not the TARGET just yet.
'''
# main = Source.objects.filter(strongs_no_prefix=main_entry)
def clean(input_string):
return input_string.replace('_', ' ')
main = Source.objects.filter(lemma=clean(main_entry))
main_ids = [i[0] for i in main.values_list('id')]
main_w_context = expand_window(main_ids)
# secondary = Source.objects.filter(strongs_no_prefix=sec_entry)
secondary = Source.objects.filter(lemma=clean(sec_entry))
sec_ids = [i[0] for i in secondary.values_list('id')]
sec_w_context = expand_window(sec_ids)
main_window_tokens = dict(Source.objects.filter(id__in=main_w_context).annotate(full_token=Concat('token_prefix','token')).values_list('id', 'full_token'))
highlights = []
for word in sec_ids:
if word in main_w_context:
highlights.append(word)
lines = []
for word in main_ids:
if word in sec_w_context:
lines.append([word])
output = []
for line in lines:
output.append(''.join(build_concordance(line, main_window_tokens, highlights=highlights)))
return JsonResponse(output, safe=False)
def demo_entry(request):
return render(request, 'lexicon/demo_entry.html')
def view_occurrences(request, entry_id):
try:
lemma = Source.objects.filter(strongs_no_prefix=entry_id)[0].lemma
except:
lemma = None
font = get_font(entry_id)
tokens = Source.objects.filter(strongs_no_prefix=entry_id)
return render(request, 'lexicon/view_occurrences.html', {
'entry':entry_id,
'lemma': lemma,
'font': font,
'tokens': tokens,
})
def view_entry(request, entry_id):
try:
lemma = Source.objects.filter(strongs_no_prefix=entry_id)[0].lemma
except:
lemma = None
font = get_font(entry_id)
result = Alignment.objects.filter(source__strongs_no_prefix=entry_id).values('id', 'alg_id', 'source__book', 'source__chapter', 'source__verse', 'source', 'source__token', 'source__morph', 'target', 'target__target_token', 'roots', 'source_blocks', 'target_blocks')
source_ids = [itm['source'] for itm in result]
source_ids_w_window = expand_window(source_ids)
source_window_tokens = dict(Source.objects.filter(id__in=source_ids_w_window).annotate(full_token=Concat('token_prefix','token')).values_list('id', 'full_token'))
target_ids = [itm['target'] for itm in result]
target_ids_w_window = expand_window(target_ids)
target_window_tokens = dict(Target.objects.filter(id__in=target_ids_w_window).annotate(full_token=Concat('target_token_prefix','target_token')).values_list('id', 'full_token'))
# goal: alg_id, source_id, source_blocks, [target_id1, target_id2], [target_blocks1, target_blocks2], source_concordance, target_concordance
# this assumes only a single source_id, even if multiple source words are part of the alignment
# this is 'condensed' result as it merges multiple [target_ids, ...] into single lists
condensed_result = []
for idx,grp in groupby(result, lambda datum: datum['alg_id']):
output = {}
output['alg_id'] = idx
for idx,itm in enumerate(grp):
if idx == 0:
# for the first item we do some extra's
output['id'] = itm['id']
output['alg_id'] = itm['alg_id']
#TODO add the reference
output['book'] = itm['source__book']
output['chapter'] = itm['source__chapter']
output['verse'] = itm['source__verse']
output['source'] = [itm['source']] # list because build_concordance needs a list
output['source_blocks'] = itm['source_blocks']
output['source__morph'] = itm['source__morph']
output['target'] = [itm['target']]
output['target__target_token'] = [itm['target__target_token']] # list
output['target_blocks'] = itm['target_blocks']
output['roots'] = itm['roots']
else:
output['target'] = output['target'] + [itm['target']] # list
output['target__target_token'] = output['target__target_token'] + [itm['target__target_token']] # list
condensed_result.append(output)
# now add concordances
algs_w_concordances = []
for itm in condensed_result:
itm['source_concordance'] = ''.join(build_concordance(itm['source'], source_window_tokens, window=4))
itm['target_concordance'] = ''.join(build_concordance(itm['target'], target_window_tokens, window=8))
algs_w_concordances.append(itm)
# quickfix to start working with the data
df = | pd.DataFrame(algs_w_concordances) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
import numpy as np
from unittest import TestCase
import pandas as pd
from tsfresh.feature_selection.selection import select_features
class SelectionTestCase(TestCase):
def test_assert_list(self):
self.assertRaises(TypeError, select_features, X=pd.DataFrame(index=range(2)), y=[1,2,3])
def test_assert_one_row_X(self):
X = pd.DataFrame([1], index=[1])
y = pd.Series([1], index=[1])
self.assertRaises(ValueError, select_features, X=X, y=y)
def test_assert_different_index(self):
X = pd.DataFrame(list(range(3)), index=[1, 2, 3])
y = pd.Series(range(3), index=[1, 3, 4])
self.assertRaises(ValueError, select_features, X=X, y=y)
def test_assert_shorter_y(self):
X = | pd.DataFrame([1, 2], index=[1, 2]) | pandas.DataFrame |
#!/usr/bin/python
import pandas as pd
from scipy.signal import savgol_filter
import json
import time
import darts
from darts import TimeSeries
from darts.models import RNNModel
from sktime.performance_metrics.forecasting import mean_absolute_percentage_error
import dysts
from dysts.flows import *
from dysts.base import *
from dysts.utils import *
from dysts.analysis import *
hyperparams = {
"input_chunk_length": 50,
"output_chunk_length": 1,
"model": "LSTM",
"n_rnn_layers": 2,
"random_state": 0
}
results = dict()
# cwd = os.getcwd()
cwd = os.path.dirname(os.path.realpath(__file__))
output_path = cwd + "/results/importance_sampling.json"
print("Saving data to: ", output_path)
full_epoch_count = 400
forecast_length = 200
transient_length = 2
n_iters = 5
epoch_count = 30
n_ic = 10 # model retraining is not currently working in darts
traj_len = 150
show_progress = False
print(f"{n_ic} points sampled per iteration, with trajectory length {traj_len}, for a total of {n_iters} iterations of length {epoch_count}")
print(n_ic * traj_len * n_iters * epoch_count)
print(1000 * full_epoch_count) # 1000 timepoints in an epoch
for equation_ind, equation_name in enumerate(get_attractor_list()):
np.random.seed(0)
print(f"{equation_name} {equation_ind}", flush=True)
results[equation_name] = dict()
equation = getattr(dysts.flows, equation_name)()
if hasattr(equation, "delay"):
if equation.delay:
continue
sol = equation.make_trajectory(1200, resample=True)
y_train, y_test = sol[:-forecast_length, 0], sol[-forecast_length:, 0]
y_train_ts = TimeSeries.from_dataframe( | pd.DataFrame(y_train) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
class TestNumberScalar(object):
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert | is_number(1.1) | pandas.core.dtypes.common.is_number |
import os
import yaml
import json
import pandas as pd
import matplotlib.pyplot as plt
from pylab import rcParams
import seaborn as sns
import numpy as np
from sklearn.linear_model import LinearRegression
import glob
import time
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: convertYaml2PandasDataframeT20
# This function converts yaml files to Pandas dataframe and saves as CSV
#
###########################################################################################
def convertYaml2PandasDataframeT20(infile,source,dest):
'''
Converts and save T20 yaml files to pandasdataframes
Description
This function coverts all T20 Yaml files from source directory to pandas ata frames.
The data frames are then stored as .csv files The saved file is of the format
team1-team2-date.csv For e.g. Kolkata Knight Riders-Sunrisers Hyderabad-2016-05-22.csv etc
Usage
convertYaml2PandasDataframeT20(yamlFile,sourceDir=".",targetDir=".")
Arguments
yamlFile
The yaml file to be converted to dataframe and saved
sourceDir
The source directory of the yaml file
targetDir
The target directory in which the data frame is stored as RData file
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
convertYaml2PandasDataframeT20
Examples
# In the example below ../yamldir c
convertYaml2PandasDataframeT20("225171.yaml",".","../data")
'''
os.chdir(source)
os.path.join(source,infile)
# Read Yaml file and convert to json
print('Converting file:',infile)
with open(infile) as f:
a=yaml.load(f)
# 1st innings
deliveries=a['innings'][0]['1st innings']['deliveries']
#Create empty dataframe for team1
team1=pd.DataFrame()
# Loop through all the deliveries of 1st innings and append each row to dataframe
for i in range(len(deliveries)):
df = pd.DataFrame(deliveries[i])
b= df.T
team1=pd.concat([team1,b])
# Rename batsman to striker/non-striker as there is another column batsman who scored runs
team1=team1.rename(columns={'batsman':'striker'})
# All extras column names
extras=[0,'wides','byes','legbyes','noballs','penalty']
if 'extras' in team1: #Check if extras are there
# Get the columns in extras for team1
b=team1.extras.apply(pd.Series).columns
# Find the missing extras columns
diff= list(set(extras) - set(b))
print('Team1:diff:',diff)
# Rename extras dict column as there is another column extras which comes from runs_dict
team1=team1.rename(columns={'extras':'extras_dict'})
#Create new columns by splitting dictionary columns - extras and runs
team1=pd.concat([team1,team1['extras_dict'].apply(pd.Series)], axis=1)
# Add the missing columns
for col in diff:
print("team1:",col)
team1[col]=0
team1=team1.drop(columns=0)
else:
print('Team1:Extras not present')
# Rename runs columns to runs_dict
if 'runs' in team1: #Check if runs in team1
team1=team1.rename(columns={'runs':'runs_dict'})
team1=pd.concat([team1,team1['runs_dict'].apply(pd.Series)], axis=1)
else:
print('Team1:Runs not present')
if 'wicket' in team1: #Check if wicket present
# Rename wicket as wicket_dict dict column as there is another wicket column
team1=team1.rename(columns={'wicket':'wicket_dict'})
team1=pd.concat([team1,team1['wicket_dict'].apply(pd.Series)], axis=1)
else:
print('Team1: Wicket not present')
team1['team']=a['innings'][0]['1st innings']['team']
team1=team1.reset_index(inplace=False)
#Rename index to delivery
team1=team1.rename(columns={'index':'delivery'})
# 2nd innings - Check if the 2nd inning was played
if len(a['innings']) > 1: # Team2 played
deliveries=a['innings'][1]['2nd innings']['deliveries']
#Create empty dataframe for team1
team2=pd.DataFrame()
# Loop through all the deliveries of 1st innings
for i in range(len(deliveries)):
df = pd.DataFrame(deliveries[i])
b= df.T
team2=pd.concat([team2,b])
# Rename batsman to striker/non-striker as there is another column batsman who scored runs
team2=team2.rename(columns={'batsman':'striker'})
# Get the columns in extras for team1
if 'extras' in team2: #Check if extras in team2
b=team2.extras.apply(pd.Series).columns
diff= list(set(extras) - set(b))
print('Team2:diff:',diff)
# Rename extras dict column as there is another column extras which comes from runs_dict
team2=team2.rename(columns={'extras':'extras_dict'})
#Create new columns by splitting dictionary columns - extras and runs
team2=pd.concat([team2,team2['extras_dict'].apply(pd.Series)], axis=1)
# Add the missing columns
for col in diff:
print("team2:",col)
team2[col]=0
team2=team2.drop(columns=0)
else:
print('Team2:Extras not present')
# Rename runs columns to runs_dict
if 'runs' in team2:
team2=team2.rename(columns={'runs':'runs_dict'})
team2=pd.concat([team2,team2['runs_dict'].apply(pd.Series)], axis=1)
else:
print('Team2:Runs not present')
if 'wicket' in team2:
# Rename wicket as wicket_dict column as there is another column wicket
team2=team2.rename(columns={'wicket':'wicket_dict'})
team2=pd.concat([team2,team2['wicket_dict'].apply(pd.Series)], axis=1)
else:
print('Team2:wicket not present')
team2['team']=a['innings'][1]['2nd innings']['team']
team2=team2.reset_index(inplace=False)
#Rename index to delivery
team2=team2.rename(columns={'index':'delivery'})
else: # Create empty columns for team2 so that the complete DF as all columns
team2 = pd.DataFrame()
cols=['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']
team2 = team2.reindex(columns=cols)
#Check for missing columns. It is possible that no wickets for lost in the entire innings
cols=['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']
# Team1 - missing columns
msngCols=list(set(cols) - set(team1.columns))
print('Team1-missing columns:', msngCols)
for col in msngCols:
print("Adding:team1:",col)
team1[col]=0
# Team2 - missing columns
msngCols=list(set(cols) - set(team2.columns))
print('Team2-missing columns:', msngCols)
for col in msngCols:
print("Adding:team2:",col)
team2[col]=0
# Now both team1 and team2 should have the same columns. Concatenate
team1=team1[['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']]
team2=team2[['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']]
df=pd.concat([team1,team2])
#Fill NA's with 0s
df=df.fillna(0)
# Fill in INFO
print("Length of info field=",len(a['info']))
#City
try:
df['city']=a['info']['city']
except:
df['city'] =0
#Date
df['date']=a['info']['dates'][0]
#Gender
df['gender']=a['info']['gender']
#Match type
df['match_type']=a['info']['match_type']
# Neutral venue
try:
df['neutral_venue'] = a['info']['neutral_venue']
except KeyError as error:
df['neutral_venue'] = 0
#Outcome - Winner
try:
df['winner']=a['info']['outcome']['winner']
# Get the win type - runs, wickets etc
df['winType']=list(a['info']['outcome']['by'].keys())[0]
print("Wintype=",list(a['info']['outcome']['by'].keys())[0])
#Get the value of wintype
winType=list(a['info']['outcome']['by'].keys())[0]
print("Win value=",list(a['info']['outcome']['by'].keys())[0] )
# Get the win margin - runs,wickets etc
df['winMargin']=a['info']['outcome']['by'][winType]
print("win margin=", a['info']['outcome']['by'][winType])
except:
df['winner']=0
df['winType']=0
df['winMargin']=0
# Outcome - Tie
try:
df['result']=a['info']['outcome']['result']
df['resultHow']=list(a['info']['outcome'].keys())[0]
df['resultTeam'] = a['info']['outcome']['eliminator']
print(a['info']['outcome']['result'])
print(list(a['info']['outcome'].keys())[0])
print(a['info']['outcome']['eliminator'])
except:
df['result']=0
df['resultHow']=0
df['resultTeam']=0
try:
df['non_boundary'] = a['info']['non_boundary']
except KeyError as error:
df['non_boundary'] = 0
try:
df['ManOfMatch']=a['info']['player_of_match'][0]
except:
df['ManOfMatch']=0
# Identify the winner
df['overs']=a['info']['overs']
df['team1']=a['info']['teams'][0]
df['team2']=a['info']['teams'][1]
df['tossWinner']=a['info']['toss']['winner']
df['tossDecision']=a['info']['toss']['decision']
df['venue']=a['info']['venue']
# Rename column 'striker' to batsman
# Rename column 'batsman' to runs as it signifies runs scored by batsman
df=df.rename(columns={'batsman':'runs'})
df=df.rename(columns={'striker':'batsman'})
if (type(a['info']['dates'][0]) == str):
outfile=a['info']['teams'][0]+ '-' + a['info']['teams'][1] + '-' +a['info']['dates'][0] + '.csv'
else:
outfile=a['info']['teams'][0]+ '-' + a['info']['teams'][1] + '-' +a['info']['dates'][0].strftime('%Y-%m-%d') + '.csv'
destFile=os.path.join(dest,outfile)
print(destFile)
df.to_csv(destFile,index=False)
print("Dataframe shape=",df.shape)
return df, outfile
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: convertAllYaml2PandasDataframesT20
# This function converts all yaml files to Pandas dataframes and saves as CSV
#
###########################################################################################
def convertAllYaml2PandasDataframesT20(source,dest):
'''
Convert and save all Yaml files to pandas dataframes and save as CSV
Description
This function coverts all Yaml files from source directory to data frames. The data frames are
then stored as .csv. The saved files are of the format team1-team2-date.RData For
e.g. England-India-2008-04-06.RData etc
Usage
convertAllYaml2PandasDataframesT20(sourceDir=".",targetDir=".")
Arguments
sourceDir
The source directory of the yaml files
targetDir
The target directory in which the data frames are stored as RData files
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
convertYaml2PandasDataframe
Examples
# In the example below ../yamldir is the source dir for the yaml files
convertAllYaml2PandasDataframesT20("../yamldir","../data")
'''
files = os.listdir(source)
for index, file in enumerate(files):
print("\n\nFile no=",index)
if file.endswith(".yaml"):
df, filename = convertYaml2PandasDataframeT20(file, source, dest)
#print(filename)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getRuns
# This function gets the runs scored by batsmen
#
###########################################################################################
def getRuns(df):
df1=df[['batsman','runs','extras','total','non_boundary']]
# Determine number of deliveries faced and runs scored
runs=df1[['batsman','runs']].groupby(['batsman'],sort=False,as_index=False).agg(['count','sum'])
# Drop level 0
runs.columns = runs.columns.droplevel(0)
runs=runs.reset_index(inplace=False)
runs.columns=['batsman','balls','runs']
return(runs)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getFours
# This function gets the fours scored by batsmen
#
###########################################################################################
def getFours(df):
df1=df[['batsman','runs','extras','total','non_boundary']]
# Get number of 4s. Check if it is boundary (non_boundary=0)
m=df1.loc[(df1.runs >=4) & (df1.runs <6) & (df1.non_boundary==0)]
# Count the number of 4s
noFours= m[['batsman','runs']].groupby('batsman',sort=False,as_index=False).count()
noFours.columns=['batsman','4s']
return(noFours)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getSixes
# This function gets the sixes scored by batsmen
#
###########################################################################################
def getSixes(df):
df1=df[['batsman','runs','extras','total','non_boundary']]
df2= df1.loc[(df1.runs ==6)]
sixes= df2[['batsman','runs']].groupby('batsman',sort=False,as_index=False).count()
sixes.columns=['batsman','6s']
return(sixes)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getExtras
# This function gets the extras for the team
#
###########################################################################################
def getExtras(df):
df3= df[['total','wides', 'noballs', 'legbyes', 'byes', 'penalty', 'extras']]
a=df3.sum().astype(int)
#Convert series to dataframe
extras=a.to_frame().T
return(extras)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBattingScorecardMatch
# This function returns the team batting scorecard
#
###########################################################################################
def teamBattingScorecardMatch (match,theTeam):
'''
Team batting scorecard of a team in a match
Description
This function computes returns the batting scorecard (runs, fours, sixes, balls played) for the team
Usage
teamBattingScorecardMatch(match,theTeam)
Arguments
match
The match for which the score card is required e.g.
theTeam
Team for which scorecard required
Value
scorecard A data frame with the batting scorecard
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBatsmenPartnershipMatch
teamBowlingScorecardMatch
teamBatsmenVsBowlersMatch
Examples
x1,y1=teamBattingScorecardMatch(kkr_sh,"<NAME>")
print(x1)
print(y1)
'''
scorecard=pd.DataFrame()
if(match.size != 0):
team=match.loc[match['team'] == theTeam]
else:
return(scorecard,-1)
a1= getRuns(team)
b1= getFours(team)
c1= getSixes(team)
# Merge columns
d1=pd.merge(a1, b1, how='outer', on='batsman')
e=pd.merge(d1,c1,how='outer', on='batsman')
e=e.fillna(0)
e['4s']=e['4s'].astype(int)
e['6s']=e['6s'].astype(int)
e['SR']=(e['runs']/e['balls']) *100
scorecard = e[['batsman','runs','balls','4s','6s','SR']]
extras=getExtras(match)
return(scorecard,extras)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getRunsConceded
# This function gets the runs conceded by bowler
#
###########################################################################################
def getRunsConceded(df):
# Note the column batsman has the runs scored by batsman
df1=df[['bowler','runs','wides', 'noballs']]
df2=df1.groupby('bowler').sum()
# Only wides and no balls included in runs conceded
df2['runs']=(df2['runs']+df2['wides']+df2['noballs']).astype(int)
df3 = df2['runs']
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getOvers
# This function gets the overs for bowlers
#
###########################################################################################
def getOvers(df):
df1=df[['bowler','delivery']]
df2=(df1.groupby('bowler').count()/6).astype(int)
df2.columns=['overs']
return(df2)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getMaidens
# This function gets the maiden overs for bowlers
#
###########################################################################################
def getMaidens(df):
df1=df[['bowler','delivery','runs','wides', 'noballs']]
# Get the over
df1['over']=df1.delivery.astype(int)
# Runs conceded includes wides and noballs
df1['runsConceded']=df1['runs'] + df1['wides'] + df1['noballs']
df2=df1[['bowler','over','runsConceded']]
# Compute runs in each over by bowler
df3=df2.groupby(['bowler','over']).sum()
df4=df3.reset_index(inplace=False)
# If maiden set as 1 else as 0
df4.loc[df4.runsConceded !=0,'maiden']=0
df4.loc[df4.runsConceded ==0,'maiden']=1
# Sum te maidens
df5=df4[['bowler','maiden']].groupby('bowler').sum()
return(df5)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getWickets
# This function gets the wickets for bowlers
#
###########################################################################################
def getWickets(df):
df1=df[['bowler','kind', 'player_out', 'fielders']]
# Check if the team took wickets. Then this column will be a string
if isinstance(df1.player_out.iloc[0],str):
df2= df1[df1.player_out !='0']
df3 = df2[['bowler','player_out']].groupby('bowler').count()
else: # Did not take wickets. Set wickets as 0
df3 = df1[['bowler','player_out']].groupby('bowler').count()
df3['player_out']=0 # Set wicktes as 0
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBowlingScorecardMatch
# This function gets the bowling scorecard
#
###########################################################################################
def teamBowlingScorecardMatch (match,theTeam):
'''
Compute and return the bowling scorecard of a team in a match
Description
This function computes and returns the bowling scorecard of a team in a match
Usage
teamBowlingScorecardMatch(match,theTeam)
Arguments
match
The match between the teams
theTeam
Team for which bowling performance is required
Value
l A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingWicketMatch
teamBowlersVsBatsmenMatch
teamBattingScorecardMatch
Examples
m=teamBowlingScorecardMatch(kkr_sh,"<NAME>")
print(m)
'''
team=match.loc[match.team== theTeam]
# Compute overs bowled
a1= getOvers(team).reset_index(inplace=False)
# Compute runs conceded
b1= getRunsConceded(team).reset_index(inplace=False)
# Compute maidens
c1= getMaidens(team).reset_index(inplace=False)
# Compute wickets
d1= getWickets(team).reset_index(inplace=False)
e1=pd.merge(a1, b1, how='outer', on='bowler')
f1= pd.merge(e1,c1,how='outer', on='bowler')
g1= pd.merge(f1,d1,how='outer', on='bowler')
g1 = g1.fillna(0)
# Compute economy rate
g1['econrate'] = g1['runs']/g1['overs']
g1.columns=['bowler','overs','runs','maidens','wicket','econrate']
g1.maidens = g1.maidens.astype(int)
g1.wicket = g1.wicket.astype(int)
return(g1)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBatsmenPartnershipMatch
# This function gets the batting partnerships
#
###########################################################################################
def teamBatsmenPartnershipMatch(match,theTeam,opposition,plot=True,savePic=False, dir1=".",picFile="pic1.png"):
'''
Team batting partnerships of batsmen in a match
Description
This function plots the partnerships of batsmen in a match against an opposition or it can return the data frame
Usage
teamBatsmenPartnershipMatch(match,theTeam,opposition, plot=TRUE)
Arguments
match
The match between the teams
theTeam
The team for which the the batting partnerships are sought
opposition
The opposition team
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
df The data frame of the batsmen partnetships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBattingScorecardMatch
teamBowlingWicketKindMatch
teamBatsmenVsBowlersMatch
matchWormChart
Examples
teamBatsmenPartnershipMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
m=teamBatsmenPartnershipMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=False)
print(m)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['batsman','runs','non_striker']]
if plot == True:
df3=df2.groupby(['batsman','non_striker']).sum().unstack().fillna(0)
rcParams['figure.figsize'] = 10, 6
df3.plot(kind='bar',stacked=True)
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -batting partnership- vs ' + opposition)
plt.text(4, 30,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBatsmenPartnershipMatch
# This function gives the performances of batsmen vs bowlers
#
###########################################################################################
def teamBatsmenVsBowlersMatch(match,theTeam,opposition, plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Team batsmen against bowlers in a match
Description
This function plots the performance of batsmen versus bowlers in a match or it can return the data frame
Usage
teamBatsmenVsBowlersMatch(match,theTeam,opposition, plot=TRUE)
Arguments
match
The match between the teams
theTeam
The team for which the the batting partnerships are sought
opposition
The opposition team
plot
If plot=TRUE then a plot is created otherwise a data frame is return
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
b The data frame of the batsmen vs bowlers performance
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingWicketKindMatch
teamBowlingWicketMatch
Examples
teamBatsmenVsBowlersMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['batsman','runs','bowler']]
if plot == True:
df3=df2.groupby(['batsman','bowler']).sum().unstack().fillna(0)
df3.plot(kind='bar',stacked=True)
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -Batsman vs Bowler- in match against ' + opposition)
plt.text(4, 30,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
df3=df2.groupby(['batsman','bowler']).sum().reset_index(inplace=False)
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBowlingWicketKindMatch
# This function gives the wicket kind for bowlers
#
###########################################################################################
def teamBowlingWicketKindMatch(match,theTeam,opposition, plot=True,savePic=False, dir1=".",picFile="pic1.png"):
'''
Compute and plot the wicket kinds by bowlers in match
Description
This function computes returns kind of wickets (caught, bowled etc) of bowlers in a match between 2 teams
Usage
teamBowlingWicketKindMatch(match,theTeam,opposition,plot=TRUE)
Arguments
match
The match between the teams
theTeam
Team for which bowling performance is required
opposition
The opposition team
plot
If plot= TRUE the dataframe will be plotted else a data frame will be returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or data fame A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingWicketMatch
teamBowlingWicketRunsMatch
teamBowlersVsBatsmenMatch
Examples
teamBowlingWicketKindMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
m=teamBowlingWicketKindMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=False)
print(m)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['bowler','kind','player_out']]
# Find all rows where there was a wicket
df3=df2[df2.player_out != '0']
if plot == True:
# Find the different types of wickets for each bowler
df4=df3.groupby(['bowler','kind']).count().unstack().fillna(0)
df4.plot(kind='bar',stacked=True)
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -Wicketkind vs Runs- given against ' + opposition)
plt.text(4, 30,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile))
else:
plt.show()
plt.gcf().clear()
else:
# Find the different types of wickets for each bowler
df4=df3.groupby(['bowler','kind']).count().reset_index(inplace=False)
return(df4)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBowlingWicketMatch
# This function gives the wickets for bowlers
#
###########################################################################################
def teamBowlingWicketMatch(match,theTeam,opposition, plot=True,savePic=False, dir1=".",picFile="pic1.png"):
'''
Compute and plot wickets by bowlers in match
Description
This function computes returns the wickets taken bowlers in a match between 2 teams
Usage
teamBowlingWicketMatch(match,theTeam,opposition, plot=TRUE)
Arguments
match
The match between the teams
theTeam
Team for which bowling performance is required
opposition
The opposition team
plot
If plot= TRUE the dataframe will be plotted else a data frame will be returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or data fame A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingWicketMatch
teamBowlingWicketRunsMatch
teamBowlersVsBatsmenMatch
Examples
teamBowlingWicketMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['bowler','kind','player_out']]
# Find all rows where there was a wicket
df3=df2[df2.player_out != '0']
if plot == True:
# Find the different types of wickets for each bowler
df4=df3.groupby(['bowler','player_out']).count().unstack().fillna(0)
df4.plot(kind='bar',stacked=True)
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -No of Wickets vs Runs conceded- against ' + opposition)
plt.text(1, 1,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
# Find the different types of wickets for each bowler
df4=df3.groupby(['bowler','player_out']).count().reset_index(inplace=False)
return(df4)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBowlersVsBatsmenMatch
# This function gives the bowlers vs batsmen and runs conceded
#
###########################################################################################
def teamBowlersVsBatsmenMatch (match,theTeam,opposition, plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Team bowlers vs batsmen in a match
Description
This function computes performance of bowlers of a team against an opposition in a match
Usage
teamBowlersVsBatsmenMatch(match,theTeam,opposition, plot=TRUE)
Arguments
match
The data frame of the match. This can be obtained with the call for e.g a <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")
theTeam
The team against which the performance is required
opposition
The opposition team
plot
This parameter specifies if a plot is required, If plot=FALSE then a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or dataframe If plot=TRUE there is no return. If plot=TRUE then the dataframe is returned
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBattingScorecardMatch
teamBowlingWicketKindMatch
matchWormChart
Examples
teamBowlersVsBatsmenMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['batsman','runs','bowler']]
if plot == True:
df3=df2.groupby(['batsman','bowler']).sum().unstack().fillna(0)
df3.plot(kind='bar',stacked=True)
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -Bowler vs Batsman- against ' + opposition)
plt.text(4, 20,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
df3=df2.groupby(['batsman','bowler']).sum().reset_index(inplace=False)
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: matchWormChart
# This function draws the match worm chart
#
###########################################################################################
def matchWormChart(match,team1,team2,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot the match worm graph
Description
This function plots the match worm graph between 2 teams in a match
Usage
matchWormGraph(match,t1,t2)
Arguments
match
The dataframe of the match
team1
The 1st team of the match
team2
the 2nd team in the match
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
none
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBatsmenVsBowlersMatch
teamBowlingWicketKindMatch
Examples
## Not run:
#Get the match details
a <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")
# Plot tne match worm plot
matchWormChart(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad")
'''
df1=match.loc[match.team==team1]
df2=match.loc[match.team==team2]
df3=df1[['delivery','total']]
df3['cumsum']=df3.total.cumsum()
df4 = df2[['delivery','total']]
df4['cumsum'] = df4.total.cumsum()
df31 = df3[['delivery','cumsum']]
df41 = df4[['delivery','cumsum']]
#plt.plot(df3.delivery.values,df3.cumsum.values)
df51= pd.merge(df31,df41,how='outer', on='delivery').dropna()
df52=df51.set_index('delivery')
df52.columns = [team1,team2]
df52.plot()
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Delivery')
plt.ylabel('Runs')
plt.title('Match worm chart ' + team1 + ' vs ' + team2)
plt.text(10, 10,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if plot == True:
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: getAllMatchesBetweenTeams
# This function gets all the matches between 2 IPL teams
#
###########################################################################################
def getAllMatchesBetweenTeams(team1,team2,dir=".",save=False,odir="."):
'''
Get data on all matches between 2 opposing teams
Description
This function gets all the data on matches between opposing IPL teams This can be saved
by the user which can be used in function in which analyses are done for all matches
between these teams.
Usage
getAllMatchesBetweenTeams(team1,team2,dir=".",save=FALSE)
Arguments
team1
One of the team in consideration e.g (KKR, CSK etc)
team2
The other team for which matches are needed e.g( MI, GL)
dir
The directory which has the RData files of matches between teams
save
Default=False. This parameter indicates whether the combined data frame
needs to be saved or not. It is recommended to save this large dataframe as
the creation of this data frame takes a several seconds depending on the number of matches
Value
matches - The combined data frame
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
plotWinsbyTossDecision
teamBowlersVsBatsmenOppnAllMatches
'''
# Create the 2 combinations
t1 = team1 +'-' + team2 + '*.csv'
t2 = team2 + '-' + team1 + '*.csv'
path1= os.path.join(dir,t1)
path2 = os.path.join(dir,t2)
files = glob.glob(path1) + glob.glob(path2)
print(len(files))
# Save as CSV only if there are matches between the 2 teams
if len(files) !=0:
df = pd.DataFrame()
for file in files:
df1 = pd.read_csv(file)
df=pd.concat([df,df1])
if save==True:
dest= team1 +'-' + team2 + '-allMatches.csv'
output=os.path.join(odir,dest)
df.to_csv(output)
else:
return(df)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: saveAllMatchesBetween2IPLTeams
# This function saves all the matches between allIPL teams
#
###########################################################################################
def saveAllMatchesBetween2IPLTeams(dir1,odir="."):
'''
Saves all matches between 2 IPL teams as dataframe
Description
This function saves all matches between 2 IPL teams as a single dataframe in the
current directory
Usage
saveAllMatchesBetween2IPLTeams(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenVsBowlersOppnAllMatches
'''
teams = ["Chennai Super Kings","Deccan Chargers","Delhi Daredevils",
"Kings XI Punjab", 'Kochi Tuskers Kerala',"Kolkata Knight Riders",
"Mumbai Indians", "Pune Warriors","Rajasthan Royals",
"Royal Challengers Bangalore","Sunrisers Hyderabad","Gujarat Lions",
"Rising Pune Supergiants"]
for team1 in teams:
for team2 in teams:
if team1 != team2:
print("Team1=",team1,"team2=", team2)
getAllMatchesBetweenTeams(team1,team2,dir=dir1,save=True,odir=odir)
time.sleep(2) #Sleep before next save
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBatsmenPartnershiOppnAllMatches
# This function gets the partnetships for a team in all matches
#
###########################################################################################
def teamBatsmenPartnershiOppnAllMatches(matches,theTeam,report="summary",top=5):
'''
Team batting partnership against a opposition all IPL matches
Description
This function computes the performance of batsmen against all bowlers of an oppositions in
all matches. This function returns a dataframe
Usage
teamBatsmenPartnershiOppnAllMatches(matches,theTeam,report="summary")
Arguments
matches
All the matches of the team against the oppositions
theTeam
The team for which the the batting partnerships are sought
report
If the report="summary" then the list of top batsmen with the highest partnerships
is displayed. If report="detailed" then the detailed break up of partnership is returned
as a dataframe
top
The number of players to be displayed from the top
Value
partnerships The data frame of the partnerships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenVsBowlersOppnAllMatchesPlot
teamBatsmenPartnershipOppnAllMatchesChart
'''
df1 = matches[matches.team == theTeam]
df2 = df1[['batsman','non_striker','runs']]
# Compute partnerships
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
df3.columns = ['batsman','non_striker','partnershipRuns']
# Compute total partnerships
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('partnershipRuns',ascending=False)
df4.columns = ['batsman','totalPartnershipRuns']
# Select top 5
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
if report == 'summary':
return(df5)
elif report == 'detailed':
return(df6)
else:
print("Invalid option")
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBatsmenPartnershipOppnAllMatchesChart
# This function plots the partnetships for a team in all matches
#
###########################################################################################
def teamBatsmenPartnershipOppnAllMatchesChart(matches,main,opposition,plot=True,top=5,partnershipRuns=20,savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot of team partnership in all IPL matches against an opposition
Description
This function plots the batting partnership of a team againt all oppositions in all
matches This function also returns a dataframe with the batting partnerships
Usage
teamBatsmenPartnershipOppnAllMatchesChart(matches,main,opposition, plot=TRUE,top=5,partnershipRuns=20))
Arguments
matches
All the matches of the team against all oppositions
main
The main team for which the the batting partnerships are sought
opposition
The opposition team for which the the batting partnerships are sought
plot
Whether the partnerships have top be rendered as a plot. If plot=FALSE the data frame is returned
top
The number of players from the top to be included in chart
partnershipRuns
The minimum number of partnership runs to include for the chart
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or partnerships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenPartnershiplOppnAllMatches
saveAllMatchesBetween2IPLTeams
teamBatsmenVsBowlersAllOppnAllMatchesPlot
teamBatsmenVsBowlersOppnAllMatches
'''
df1 = matches[matches.team == main]
df2 = df1[['batsman','non_striker','runs']]
# Compute partnerships
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
df3.columns = ['batsman','non_striker','partnershipRuns']
# Compute total partnerships
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('partnershipRuns',ascending=False)
df4.columns = ['batsman','totalPartnershipRuns']
# Select top 5
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
df7 = df6[['batsman','non_striker','partnershipRuns']]
# Remove rows where partnershipRuns < partnershipRuns as there are too many
df8 = df7[df7['partnershipRuns'] > partnershipRuns]
df9=df8.groupby(['batsman','non_striker'])['partnershipRuns'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df8=df7.pivot(columns='non_striker',index='batsman').fillna(0)
if plot == True:
df9.plot(kind='bar',stacked=True,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Partnership runs between ' + main + '-' + opposition)
plt.xlabel('Batsman')
plt.ylabel('Partnership runs')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBatsmenVsBowlersOppnAllMatches
# This function plots the performance of batsmen against bowlers
#
###########################################################################################
def teamBatsmenVsBowlersOppnAllMatches(matches,main,opposition,plot=True,top=5,runsScored=20,savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes the performance of batsmen against the bowlers of an oppositions in all matches
Usage
teamBatsmenVsBowlersOppnAllMatches(matches,main,opposition,plot=TRUE,top=5,runsScored=20)
Arguments
matches
All the matches of the team against one specific opposition
main
The team for which the the batting partnerships are sought
opposition
The opposition team
plot
If plot=True then a plot will be displayed else a data frame will be returned
top
The number of players to be plotted or returned as a dataframe. The default is 5
runsScored
The cutfoff limit for runs scored for runs scored against bowler
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or dataframe
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenVsBowlersOppnAllMatchesPlot
teamBatsmenPartnershipOppnAllMatchesChart
teamBatsmenVsBowlersOppnAllMatches
'''
df1 = matches[matches.team == main]
df2 = df1[['batsman','bowler','runs']]
# Runs scored by bowler
df3=df2.groupby(['batsman','bowler']).sum().reset_index(inplace=False)
df3.columns = ['batsman','bowler','runsScored']
# Need to pick the 'top' number of bowlers
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('runsScored',ascending=False)
df4.columns = ['batsman','totalRunsScored']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
df7 = df6[['batsman','bowler','runsScored']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['runsScored'] >runsScored]
df9=df8.groupby(['batsman','bowler'])['runsScored'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df8=df7.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Runs against bowlers ' + main + '-' + opposition)
plt.xlabel('Batsman')
plt.ylabel('Runs scored')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBattingScorecardOppnAllMatches
# This function computes the batting scorecard for all matches
#
###########################################################################################
def teamBattingScorecardOppnAllMatches(matches,main,opposition):
'''
Team batting scorecard of a team in all matches against an opposition
Description
This function computes returns the batting scorecard (runs, fours, sixes, balls played)
for the team in all matches against an opposition
Usage
teamBattingScorecardOppnAllMatches(matches,main,opposition)
Arguments
matches
the data frame of all matches between a team and an opposition obtained with the call getAllMatchesBetweenteam()
main
The main team for which scorecard required
opposition
The opposition team
Value
scorecard The scorecard of all the matches
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenPartnershipAllOppnAllMatches
teamBowlingWicketKindOppositionAllMatches
'''
team=matches.loc[matches.team== main]
a1= getRuns(team)
b1= getFours(team)
c1= getSixes(team)
# Merge columns
d1=pd.merge(a1, b1, how='outer', on='batsman')
e=pd.merge(d1,c1,how='outer', on='batsman')
e=e.fillna(0)
e['4s']=e['4s'].astype(int)
e['6s']=e['6s'].astype(int)
e['SR']=(e['runs']/e['balls']) *100
scorecard = e[['batsman','runs','balls','4s','6s','SR']].sort_values('runs',ascending=False)
return(scorecard)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBattingScorecardOppnAllMatches
# This function computes the batting scorecard for all matches
#
###########################################################################################
def teamBowlingScorecardOppnAllMatches(matches,main,opposition):
'''
Team bowling scorecard opposition all matches
Description
This function computes returns the bowling dataframe of best bowlers
deliveries, maidens, overs, wickets against an IPL oppositions in all matches
Usage
teamBowlingScorecardOppnAllMatches(matches,main,opposition)
Arguments
matches
The matches of the team against all oppositions and all matches
main
Team for which bowling performance is required
opposition
The opposing IPL team
Value
l A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBowlingWicketKindOppositionAllMatches
teamBatsmenVsBowlersOppnAllMatches
plotWinsbyTossDecision
'''
team=matches.loc[matches.team== main]
# Compute overs bowled
a1= getOvers(team).reset_index(inplace=False)
# Compute runs conceded
b1= getRunsConceded(team).reset_index(inplace=False)
# Compute maidens
c1= getMaidens(team).reset_index(inplace=False)
# Compute wickets
d1= getWickets(team).reset_index(inplace=False)
e1=pd.merge(a1, b1, how='outer', on='bowler')
f1= pd.merge(e1,c1,how='outer', on='bowler')
g1= pd.merge(f1,d1,how='outer', on='bowler')
g1 = g1.fillna(0)
# Compute economy rate
g1['econrate'] = g1['runs']/g1['overs']
g1.columns=['bowler','overs','runs','maidens','wicket','econrate']
g1.maidens = g1.maidens.astype(int)
g1.wicket = g1.wicket.astype(int)
g2 = g1.sort_values('wicket',ascending=False)
return(g2)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBowlingWicketKindOppositionAllMatches
# This function plots the performance of bowlers and the kind of wickets
#
###########################################################################################
def teamBowlingWicketKindOppositionAllMatches(matches,main,opposition,plot=True,top=5,wickets=2,savePic=False, dir1=".",picFile="pic1.png"):
'''
Team bowlers wicket kind against an opposition in all matches
Description
This function computes performance of bowlers of a team and the wicket kind against
an opposition in all matches against the opposition
Usage
teamBowlersWicketKindOppnAllMatches(matches,main,opposition,plot=TRUE,top=5,wickets=2)
Arguments
matches
The data frame of all matches between a team the opposition. T
main
The team for which the performance is required
opposition
The opposing team
plot
If plot=True then a plot is displayed else a dataframe is returned
top
The top number of players to be considered
wickets
The minimum number of wickets as cutoff
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or dataframe The return depends on the value of the plot
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
plotWinsByRunOrWickets
teamBowlersVsBatsmenOppnAllMatches
'''
df1=matches.loc[matches.team== main]
df2= df1[['bowler','kind','player_out']]
# Find all rows where there was a wicket
df2=df2[df2.player_out != '0']
# Number of wickets taken by bowler
df3=df2.groupby(['bowler','kind']).count().reset_index(inplace=False)
df3.columns = ['bowler','kind','wickets']
# Need to pick the 'top' number of bowlers by wickets
df4 = df3.groupby('bowler').sum().reset_index(inplace=False).sort_values('wickets',ascending=False)
df4.columns = ['bowler','totalWickets']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='bowler')
df7 = df6[['bowler','kind','wickets']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['wickets'] >wickets]
df9=df8.groupby(['bowler','kind'])['wickets'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df9=df8.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Wicker kind by bowlers of ' + main + '-' + opposition)
plt.xlabel('Bowler')
plt.ylabel('Total wickets')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBowlersVsBatsmenOppnAllMatches
# This function plots the performance of the bowlers against batsmen
#
###########################################################################################
def teamBowlersVsBatsmenOppnAllMatches(matches,main,opposition,plot=True,top=5,runsConceded=10, savePic=False, dir1=".",picFile="pic1.png"):
'''
Team bowlers vs batsmen against an opposition in all matches
Description
This function computes performance of bowlers of a team against an opposition in all
matches against the opposition
Usage
teamBowlersVsBatsmenOppnAllMatches(matches,main,opposition,plot=True,top=5,runsConceded=10))
Arguments
matches
The data frame of all matches between a team the opposition.
main
The main team against which the performance is required
opposition
The opposition team against which the performance is require
plot
If true plot else return dataframe
top
The number of rows to be returned. 5 by default
runsConceded
The minimum numer runs to use as cutoff
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
dataframe The dataframe with all performances
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenPartnershipOppnAllMatches
teamBowlersVsBatsmenOppnAllMatchesRept
'''
df1=matches.loc[matches.team== main]
df2= df1[['bowler','batsman','runs']]
# Number of wickets taken by bowler
df3=df2.groupby(['bowler','batsman']).sum().reset_index(inplace=False)
df3.columns = ['bowler','batsman','runsConceded']
# Need to pick the 'top' number of bowlers by wickets
df4 = df3.groupby('bowler').sum().reset_index(inplace=False).sort_values('runsConceded',ascending=False)
df4.columns = ['bowler','totalRunsConceded']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='bowler')
df7 = df6[['bowler','batsman','runsConceded']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['runsConceded'] >runsConceded]
df9=df8.groupby(['bowler','batsman'])['runsConceded'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df9=df8.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Wicker kind by bowlers of ' + main + '-' + opposition)
plt.xlabel('Bowler')
plt.ylabel('Total runs')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: plotWinLossBetweenTeams
# This function plots the number of wins and losses in teams
#
###########################################################################################
def plotWinLossBetweenTeams(matches,team1,team2,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot wins for each team
Description
This function computes and plots number of wins for each team in all their encounters.
The plot includes the number of wins byteam1 each team and the matches with no result
Usage
plotWinLossBetweenTeams(matches)
Arguments
matches
The dataframe with all matches between 2 IPL teams
team1
The 1st team
team2
The 2nd team
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
teamBattingScorecardOppnAllMatches
teamBatsmenPartnershipOppnAllMatchesChart
getAllMatchesBetweenTeams
'''
a=matches[['date','winner']].groupby(['date','winner']).count().reset_index(inplace=False)
b=a.groupby('winner').count().reset_index(inplace=False)
b.columns = ['winner','number']
sns.barplot(x='winner',y='number',data=b)
plt.xlabel('Winner')
plt.ylabel('Number')
plt.title("Wins vs losses " + team1 + "-"+ team2)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: plotWinsByRunOrWickets
# This function plots how the win for the team was whether by runs or wickets
#
###########################################################################################
def plotWinsByRunOrWickets(matches,team1,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot whether the wins for the team was by runs or wickets
Description
This function computes and plots number the number of wins by runs vs number of wins
by wickets
Usage
plotWinsByRunOrWickets(matches,team1)
Arguments
matches
The dataframe with all matches between 2 IPL teams
team1
The team for which the plot has to be done
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>esh.<EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenPartnershipOppnAllMatchesChart
getAllMatchesBetweenTeams
'''
# Get the number of matches won
df= matches.loc[matches.winner == team1]
a=df[['date','winType']].groupby(['date','winType']).count().reset_index(inplace=False)
b=a.groupby('winType').count().reset_index(inplace=False)
b.columns = ['winType','number']
sns.barplot(x='winType',y='number',data=b)
plt.xlabel('Win Type - Runs or wickets')
plt.ylabel('Number')
plt.title("Win type for team -" + team1 )
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: plotWinsbyTossDecision
# This function plots the number of wins/losses for team based on its toss decision
#
###########################################################################################
def plotWinsbyTossDecision(matches,team1,tossDecision='bat', plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot whether the wins for the team was by runs or wickets
Description
This function computes and plots number the number of wins by runs vs number of wins
by wickets
Usage
plotWinsbyTossDecision(matches,team1,tossDecision='bat')
Arguments
matches
The dataframe with all matches between 2 IPL teams
team1
The team for which the plot has to be done
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenPartnershipOppnAllMatchesChart
teamBowlingWicketKindOppositionAllMatches
'''
df=matches.loc[(matches.tossDecision==tossDecision) & (matches.tossWinner==team1)]
a=df[['date','winner']].groupby(['date','winner']).count().reset_index(inplace=False)
b=a.groupby('winner').count().reset_index(inplace=False)
b.columns = ['winner','number']
sns.barplot(x='winner',y='number',data=b)
plt.xlabel('Winner ' + 'when toss decision was to :' + tossDecision)
plt.ylabel('Number')
plt.title('Wins vs losses for ' + team1 + ' when toss decision was to ' + tossDecision )
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: getAllMatchesAllOpposition
# This function gets all the matches between a IPL team and all opposition
#
###########################################################################################
def getAllMatchesAllOpposition(team1,dir=".",save=False,odir="."):
'''
Get data on all matches against all opposition
Description
This function gets all the matches for a particular IPL team for
against all other oppositions. It constructs a huge dataframe of
all these matches. This can be saved by the user which can be used in
function in which analyses are done for all matches and for all oppositions.
Usage
getAllMatchesAllOpposition(team,dir=".",save=FALSE)
Arguments
team
The team for which all matches and all opposition has to be obtained e.g. India, Pakistan
dir
The directory in which the saved .RData files exist
save
Default=False. This parameter indicates whether the combined data frame needs to be saved or not. It is recommended to save this large dataframe as the creation of this data frame takes a several seconds depending on the number of matches
Value
match The combined data frame
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
saveAllMatchesAllOppositionIPLT20
teamBatsmenPartnershiAllOppnAllMatches
'''
# Create the 2 combinations
t1 = '*' + team1 +'*.csv'
path= os.path.join(dir,t1)
files = glob.glob(path)
print(len(files))
# Save as CSV only if there are matches between the 2 teams
if len(files) !=0:
df = pd.DataFrame()
for file in files:
df1 = pd.read_csv(file)
df=pd.concat([df,df1])
if save==True:
dest= team1 + '-allMatchesAllOpposition.csv'
output=os.path.join(odir,dest)
df.to_csv(output)
else:
return(df)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: saveAllMatchesAllOppositionIPLT20
# This function saves all the matches between all IPL team and all opposition
#
###########################################################################################
def saveAllMatchesAllOppositionIPLT20(dir1,odir="."):
'''
Saves matches against all IPL teams as dataframe and CSV for an IPL team
Description
This function saves all IPL matches agaist all opposition as a single
dataframe in the current directory
Usage
saveAllMatchesAllOppositionIPLT20(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
convertYaml2PandasDataframeT20
teamBattingScorecardMatch
'''
teams = ["Chennai Super Kings","Deccan Chargers","Delhi Daredevils",
"Kings XI Punjab", 'Kochi Tuskers Kerala',"Kolkata Knight Riders",
"Mumbai Indians", "Pune Warriors","Rajasthan Royals",
"Royal Challengers Bangalore","Sunrisers Hyderabad","Gujarat Lions",
"Rising Pune Supergiants"]
for team in teams:
print("Team=",team)
getAllMatchesAllOpposition(team,dir=dir1,save=True,odir=odir)
time.sleep(2) #Sleep before next save
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBatsmenPartnershiAllOppnAllMatches
# This function computes the partnerships of an IPK team against all other IPL teams
#
###########################################################################################
def teamBatsmenPartnershiAllOppnAllMatches(matches,theTeam,report="summary",top=5):
'''
Team batting partnership against a opposition all IPL matches
Description
This function computes the performance of batsmen against all bowlers of an oppositions in
all matches. This function returns a dataframe
Usage
teamBatsmenPartnershiAllOppnAllMatches(matches,theTeam,report="summary")
Arguments
matches
All the matches of the team against the oppositions
theTeam
The team for which the the batting partnerships are sought
report
If the report="summary" then the list of top batsmen with the highest partnerships
is displayed. If report="detailed" then the detailed break up of partnership is returned
as a dataframe
top
The number of players to be displayed from the top
Value
partnerships The data frame of the partnerships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenVsBowlersOppnAllMatchesPlot
teamBatsmenPartnershipOppnAllMatchesChart
'''
df1 = matches[matches.team == theTeam]
df2 = df1[['batsman','non_striker','runs']]
# Compute partnerships
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
df3.columns = ['batsman','non_striker','partnershipRuns']
# Compute total partnerships
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('partnershipRuns',ascending=False)
df4.columns = ['batsman','totalPartnershipRuns']
# Select top 5
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
if report == 'summary':
return(df5)
elif report == 'detailed':
return(df6)
else:
print("Invalid option")
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBatsmenPartnershipAllOppnAllMatchesChart
# This function computes and plots the partnerships of an IPK team against all other IPL teams
#
###########################################################################################
def teamBatsmenPartnershipAllOppnAllMatchesChart(matches,main,plot=True,top=5,partnershipRuns=20, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plots team batting partnership all matches all oppositions
Description
This function plots the batting partnership of a team againt all oppositions in all matches This function also returns a dataframe with the batting partnerships
Usage
teamBatsmenPartnershipAllOppnAllMatchesChart(matches,theTeam,main,plot=True,top=5,partnershipRuns=20)
Arguments
matches
All the matches of the team against all oppositions
theTeam
The team for which the the batting partnerships are sought
main
The main team for which the the batting partnerships are sought
plot
Whether the partnerships have top be rendered as a plot. If plot=FALSE the data frame is returned
top
The number of players from the top to be included in chart
partnershipRuns
The minimum number of partnership runs to include for the chart
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or partnerships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
df1 = matches[matches.team == main]
df2 = df1[['batsman','non_striker','runs']]
# Compute partnerships
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
df3.columns = ['batsman','non_striker','partnershipRuns']
# Compute total partnerships
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('partnershipRuns',ascending=False)
df4.columns = ['batsman','totalPartnershipRuns']
# Select top 5
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
df7 = df6[['batsman','non_striker','partnershipRuns']]
# Remove rows where partnershipRuns < partnershipRuns as there are too many
df8 = df7[df7['partnershipRuns'] > partnershipRuns]
df9=df8.groupby(['batsman','non_striker'])['partnershipRuns'].sum().unstack(fill_value=0)
# Note: Can also use the below code -*************
#df8=df7.pivot(columns='non_striker',index='batsman').fillna(0)
if plot == True:
df9.plot(kind='bar',stacked=True,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Batting partnerships of' + main + 'against all teams')
plt.xlabel('Batsman')
plt.ylabel('Partnership runs')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBatsmenVsBowlersAllOppnAllMatches
# This function computes and plots the performance of batsmen
# of an IPL team against all other teams
#
###########################################################################################
def teamBatsmenVsBowlersAllOppnAllMatches(matches,main,plot=True,top=5,runsScored=20, savePic=False, dir1=".",picFile="pic1.png"):
'''
Report of team batsmen vs bowlers in all matches all oppositions
Description
This function computes the performance of batsmen against all bowlers of all oppositions in all matches
Usage
teamBatsmenVsBowlersAllOppnAllMatches(matches,main,plot=True,top=5,runsScored=20)
Arguments
matches
All the matches of the team against all oppositions
main
The team for which the the batting partnerships are sought
plot
Whether a plot is required or not
top
The number of top batsmen to be included
runsScored
The total runs scoed by batsmen
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
The data frame of the batsman and the runs against bowlers
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
df1 = matches[matches.team == main]
df2 = df1[['batsman','bowler','runs']]
# Runs scored by bowler
df3=df2.groupby(['batsman','bowler']).sum().reset_index(inplace=False)
df3.columns = ['batsman','bowler','runsScored']
print(df3.shape)
# Need to pick the 'top' number of bowlers
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('runsScored',ascending=False)
print(df4.shape)
df4.columns = ['batsman','totalRunsScored']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
df7 = df6[['batsman','bowler','runsScored']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['runsScored'] >runsScored]
df9=df8.groupby(['batsman','bowler'])['runsScored'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df8=df7.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
#ax.legend(fontsize=25)
plt.title('Runs by ' + main + ' against all T20 bowlers')
plt.xlabel('Batsman')
plt.ylabel('Runs scored')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBattingScorecardAllOppnAllMatches
# This function computes and batting scorecard of an IPL team against all other
# IPL teams
#
###########################################################################################
def teamBattingScorecardAllOppnAllMatches(matches,main):
'''
Team batting scorecard against all oppositions in all matches
Description
This function omputes and returns the batting scorecard of a team in all matches against all oppositions. The data frame has the ball played, 4's,6's and runs scored by batsman
Usage
teamBattingScorecardAllOppnAllMatches(matches,theTeam)
Arguments
matches
All matches of the team in all matches with all oppositions
main
The team for which the the batting partnerships are sought
Value
details The data frame of the scorecard of the team in all matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
team=matches.loc[matches.team== main]
a1= getRuns(team)
b1= getFours(team)
c1= getSixes(team)
# Merge columns
d1=pd.merge(a1, b1, how='outer', on='batsman')
e=pd.merge(d1,c1,how='outer', on='batsman')
e=e.fillna(0)
e['4s']=e['4s'].astype(int)
e['6s']=e['6s'].astype(int)
e['SR']=(e['runs']/e['balls']) *100
scorecard = e[['batsman','runs','balls','4s','6s','SR']].sort_values('runs',ascending=False)
return(scorecard)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBowlingScorecardAllOppnAllMatches
# This function computes and bowling scorecard of an IPL team against all other
# IPL teams
#
###########################################################################################
def teamBowlingScorecardAllOppnAllMatches(matches,main):
'''
Team bowling scorecard all opposition all matches
Description
This function computes returns the bowling dataframe of bowlers deliveries,
maidens, overs, wickets against all oppositions in all matches
Usage
teamBowlingScorecardAllOppnAllMatches(matches,theTeam)
Arguments
matches
The matches of the team against all oppositions and all matches
theTeam
Team for which bowling performance is required
Value
l A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
team=matches.loc[matches.team== main]
# Compute overs bowled
a1= getOvers(team).reset_index(inplace=False)
# Compute runs conceded
b1= getRunsConceded(team).reset_index(inplace=False)
# Compute maidens
c1= getMaidens(team).reset_index(inplace=False)
# Compute wickets
d1= getWickets(team).reset_index(inplace=False)
e1=pd.merge(a1, b1, how='outer', on='bowler')
f1= pd.merge(e1,c1,how='outer', on='bowler')
g1= pd.merge(f1,d1,how='outer', on='bowler')
g1 = g1.fillna(0)
# Compute economy rate
g1['econrate'] = g1['runs']/g1['overs']
g1.columns=['bowler','overs','runs','maidens','wicket','econrate']
g1.maidens = g1.maidens.astype(int)
g1.wicket = g1.wicket.astype(int)
g2 = g1.sort_values('wicket',ascending=False)
return(g2)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBowlingWicketKindAllOppnAllMatches
# This function computes and plots the wicket kind of an IPL team against all other
# IPL teams
#
###########################################################################################
def teamBowlingWicketKindAllOppnAllMatches(matches,main,plot=True,top=5,wickets=2,savePic=False, dir1=".",picFile="pic1.png"):
df1=matches.loc[matches.team== main]
df2= df1[['bowler','kind','player_out']]
# Find all rows where there was a wicket
df2=df2[df2.player_out != '0']
# Number of wickets taken by bowler
df3=df2.groupby(['bowler','kind']).count().reset_index(inplace=False)
df3.columns = ['bowler','kind','wickets']
# Need to pick the 'top' number of bowlers by wickets
df4 = df3.groupby('bowler').sum().reset_index(inplace=False).sort_values('wickets',ascending=False)
df4.columns = ['bowler','totalWickets']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='bowler')
df7 = df6[['bowler','kind','wickets']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['wickets'] >wickets]
df9=df8.groupby(['bowler','kind'])['wickets'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df9=df8.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Wicker kind by bowlers of ' + main + ' against all T20 teams')
plt.xlabel('Bowler')
plt.ylabel('Total wickets')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBowlersVsBatsmenAllOppnAllMatches
# This function computes and plots the performance of bowlers of an IPL team against all other
# IPL teams
#
###########################################################################################
def teamBowlersVsBatsmenAllOppnAllMatches(matches,main,plot=True,top=5,runsConceded=10,savePic=False, dir1=".",picFile="pic1.png"):
'''
Compute team bowlers vs batsmen all opposition all matches
Description
This function computes performance of bowlers of a team against all opposition in all matches
Usage
teamBowlersVsBatsmenAllOppnAllMatches(matches,,main,plot=True,top=5,runsConceded=10)
Arguments
matches
the data frame of all matches between a team and aall opposition and all obtained with the call getAllMatchesAllOpposition()
main
The team against which the performance is requires
plot
Whether a plot should be displayed or a dataframe to be returned
top
The top number of bowlers in result
runsConded
The number of runs conceded by bowlers
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
dataframe The dataframe with all performances
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
df1=matches.loc[matches.team== main]
df2= df1[['bowler','batsman','runs']]
# Number of wickets taken by bowler
df3=df2.groupby(['bowler','batsman']).sum().reset_index(inplace=False)
df3.columns = ['bowler','batsman','runsConceded']
# Need to pick the 'top' number of bowlers by wickets
df4 = df3.groupby('bowler').sum().reset_index(inplace=False).sort_values('runsConceded',ascending=False)
df4.columns = ['bowler','totalRunsConceded']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='bowler')
df7 = df6[['bowler','batsman','runsConceded']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['runsConceded'] >runsConceded]
df9=df8.groupby(['bowler','batsman'])['runsConceded'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df9=df8.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Performance of' + main + 'Bowlers vs Batsmen ' )
plt.xlabel('Bowler')
plt.ylabel('Total runs')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: plotWinLossByTeamAllOpposition
# This function computes and plots twins and lossed of IPL team against all other
# IPL teams
#
###########################################################################################
def plotWinLossByTeamAllOpposition(matches, team1, plot='summary',savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot wins for each team
Description
This function computes and plots number of wins for each team in all their encounters.
The plot includes the number of wins byteam1 each team and the matches with no result
Usage
plotWinLossByTeamAllOpposition(matches, main, plot='summary')
Arguments
matches
The dataframe with all matches between 2 IPL teams
main
The 1st team
plot
Summary or detailed
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
a=matches[['date','winner']].groupby(['date','winner']).count().reset_index(inplace=False)
# Plot the overall performance as wins and losses
if plot=="summary":
m= a.loc[a.winner==team1]['winner'].count()
n= a.loc[a.winner!=team1]['winner'].count()
df=pd.DataFrame({'outcome':['win','loss'],'number':[m,n]})
sns.barplot(x='outcome',y='number',data=df)
plt.xlabel('Outcome')
plt.ylabel('Number')
plt.title("Wins vs losses(summary) of " + team1 + ' against all Opposition' )
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
elif plot=="detailed" :
#Plot breakup by team
b=a.groupby('winner').count().reset_index(inplace=False)
# If 'winner' is '0' then the match is a tie.Set as 'tie'
b.loc[b.winner=='0','winner']='Tie'
b.columns = ['winner','number']
ax=sns.barplot(x='winner',y='number',data=b)
plt.xlabel('Winner')
plt.ylabel('Number')
plt.title("Wins vs losses(detailed) of " + team1 + ' against all Opposition' )
ax.set_xticklabels(ax.get_xticklabels(),rotation=60,fontsize=6)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
print("Unknown option")
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: plotWinsByRunOrWicketsAllOpposition
# This function computes and plots twins and lossed of IPL team against all other
# IPL teams
#
###########################################################################################
def plotWinsByRunOrWicketsAllOpposition(matches,team1,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot whether the wins for the team was by runs or wickets
Description
This function computes and plots number the number of wins by runs vs number of wins
by wickets against all Opposition
Usage
plotWinsByRunOrWicketsAllOpposition(matches,team1)
Arguments
matches
The dataframe with all matches between an IPL team and all IPL teams
team1
The team for which the plot has to be done
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
# Get the number of matches won
df= matches.loc[matches.winner == team1]
a=df[['date','winType']].groupby(['date','winType']).count().reset_index(inplace=False)
b=a.groupby('winType').count().reset_index(inplace=False)
b.columns = ['winType','number']
sns.barplot(x='winType',y='number',data=b)
plt.xlabel('Win Type - Runs or wickets')
plt.ylabel('Number')
plt.title("Win type for team -" + team1 + ' against all opposition' )
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: plotWinsbyTossDecisionAllOpposition
# This function computes and plots the win type of IPL team against all
# IPL teams
#
###########################################################################################
def plotWinsbyTossDecisionAllOpposition(matches,team1,tossDecision='bat',plot="summary", savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot whether the wins for the team was by runs or wickets
Description
This function computes and plots number the number of wins by runs vs number of wins
by wickets
Usage
plotWinsbyTossDecisionAllOpposition(matches,team1,tossDecision='bat',plot="summary")
Arguments
matches
The dataframe with all matches between 2 IPL teams
team1
The team for which the plot has to be done
plot
'summary' or 'detailed'
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenPartnershipOppnAllMatchesChart
teamBowlingWicketKindOppositionAllMatches
'''
df=matches.loc[(matches.tossDecision==tossDecision) & (matches.tossWinner==team1)]
a=df[['date','winner']].groupby(['date','winner']).count().reset_index(inplace=False)
if plot=="summary":
m= a.loc[a.winner==team1]['winner'].count()
n= a.loc[a.winner!=team1]['winner'].count()
df=pd.DataFrame({'outcome':['win','loss'],'number':[m,n]})
sns.barplot(x='outcome',y='number',data=df)
plt.xlabel('Outcome')
plt.ylabel('Number')
plt.title("Wins vs losses(summary) against all opposition when toss decision was to " + tossDecision + ' for ' + team1 )
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
elif plot=="detailed" :
#Plot breakup by team
b=a.groupby('winner').count().reset_index(inplace=False)
# If 'winner' is '0' then the match is a tie.Set as 'tie'
b.loc[b.winner=='0','winner']='Tie'
b.columns = ['winner','number']
ax=sns.barplot(x='winner',y='number',data=b)
plt.xlabel(team1 + ' chose to ' + tossDecision)
plt.ylabel('Number')
plt.title('Wins vs losses(detailed) against all opposition for ' + team1 + ' when toss decision was to ' + tossDecision )
ax.set_xticklabels(ax.get_xticklabels(),rotation=60, fontsize=6)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: Details
# This function computes the batting details of a team
# IPL teams
#
###########################################################################################
def getTeamBattingDetails(team,dir=".",save=False,odir="."):
'''
Description
This function gets the batting details of a team in all matchs against all oppositions. This gets all the details of the batsmen balls faced,4s,6s,strikerate, runs, venue etc. This function is then used for analyses of batsmen. This function calls teamBattingPerfDetails()
Usage
getTeamBattingDetails(team,dir=".",save=FALSE)
Arguments
team
The team for which batting details is required
dir
The source directory of RData files obtained with convertAllYaml2RDataframes()
save
Whether the data frame needs to be saved as RData or not. It is recommended to set save=TRUE as the data can be used for a lot of analyses of batsmen
Value
battingDetails The dataframe with the batting details
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
Examples
m=getTeamBattingDetails(team1,dir1,save=True)
'''
# Get all matches played by team
t1 = '*' + team +'*.csv'
path= os.path.join(dir,t1)
files = glob.glob(path)
# Create an empty dataframe
details = pd.DataFrame()
# Loop through all matches played by team
for file in files:
match=pd.read_csv(file)
scorecard,extras=teamBattingScorecardMatch(match,team)
if scorecard.empty:
continue
# Filter out only the rows played by team
match1 = match.loc[match.team==team]
# Check if there were wickets, you will 'bowled', 'caught' etc
if len(match1 !=0):
if isinstance(match1.kind.iloc[0],str):
b=match1.loc[match1.kind != '0']
# Get the details of the wicket
wkts= b[['batsman','bowler','fielders','kind','player_out']]
#date','team2','winner','result','venue']]
df=pd.merge(scorecard,wkts,how='outer',on='batsman')
# Fill NA as not outs
df =df.fillna('notOut')
# Set other info
if len(b) != 0:
df['date']= b['date'].iloc[0]
df['team2']= b['team2'].iloc[0]
df['winner']= b['winner'].iloc[0]
df['result']= b['result'].iloc[0]
df['venue']= b['venue'].iloc[0]
details= pd.concat([details,df])
details = details.sort_values(['batsman','date'])
if save==True:
fileName = "./" + team + "-BattingDetails.csv"
output=os.path.join(odir,fileName)
details.to_csv(output)
return(details)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: getBatsmanDetails
# This function gets the batsman details
# IPL teams
#
###########################################################################################
def getBatsmanDetails(team, name,dir="."):
'''
Get batting details of batsman from match
Description
This function gets the batting details of a batsman given the match data as a RData file
Usage
getBatsmanDetails(team,name,dir=".")
Arguments
team
The team of the batsman e.g. India
name
Name of batsman
dir
The directory where the source file exists
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
batsmanRunsPredict
batsmanMovingAverage
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
## Not run:
name="<NAME>"
team='Chennai Super Kings'
#df=getBatsmanDetails(team, name,dir=".")
'''
path = dir + '/' + team + "-BattingDetails.csv"
battingDetails= pd.read_csv(path)
batsmanDetails = battingDetails.loc[battingDetails['batsman'].str.contains(name)]
return(batsmanDetails)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: getBatsmanDetails
# This function plots runs vs deliveries for the batsman
#
###########################################################################################
def batsmanRunsVsDeliveries(df,name= "A Late Cut",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Runs versus deliveries faced
Description
This function plots the runs scored and the deliveries required. A regression smoothing function is used to fit the points
Usage
batsmanRunsVsDeliveries(df, name= "A Late Cut")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanFoursSixes
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanRunsVsDeliveries(df, name)
'''
rcParams['figure.figsize'] = 8, 5
plt.scatter(df.balls,df.runs)
sns.lmplot(x='balls',y='runs', data=df)
plt.xlabel("Balls faced",fontsize=8)
plt.ylabel('Runs',fontsize=8)
atitle=name + "- Runs vs balls faced"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanFoursSixes
# This function gets the batsman fours and sixes for batsman
#
#
###########################################################################################
def batsmanFoursSixes(df,name= "A Leg Glance", plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the total runs, fours and sixes of the batsman
Usage
batsmanFoursSixes(df,name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanDismissals batsmanRunsVsDeliveries batsmanRunsVsStrikeRate batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanFoursSixes(df,"SK Raina")
'''
# Compute runs from fours and sixes
rcParams['figure.figsize'] = 8, 5
df['RunsFromFours']=df['4s']*4
df['RunsFromSixes']=df['6s']*6
df1 = df[['balls','runs','RunsFromFours','RunsFromSixes']]
# Total runs
sns.scatterplot('balls','runs',data=df1)
# Fit a linear regression line
balls=df1.balls.reshape(-1,1)
linreg = LinearRegression().fit(balls, df1.runs)
x=np.linspace(0,120,10)
#Plot regression line balls vs runs
plt.plot(x, linreg.coef_ * x + linreg.intercept_, color='blue',label="Total runs")
# Runs from fours
sns.scatterplot('balls','RunsFromFours',data=df1)
#Plot regression line balls vs Runs from fours
linreg = LinearRegression().fit(balls, df1.RunsFromFours)
plt.plot(x, linreg.coef_ * x + linreg.intercept_, color='red',label="Runs from fours")
# Runs from sixes
sns.scatterplot('balls','RunsFromSixes',data=df1)
#Plot regression line balls vs Runs from sixes
linreg = LinearRegression().fit(balls, df1.RunsFromSixes)
plt.plot(x, linreg.coef_ * x + linreg.intercept_, color='green',label="Runs from sixes")
plt.xlabel("Balls faced",fontsize=8)
plt.ylabel('Runs',fontsize=8)
atitle=name + "- Total runs, fours and sixes"
plt.title(atitle,fontsize=8)
plt.legend(loc="upper left")
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanDismissals
# This function plots the batsman dismissals
#
###########################################################################################
def batsmanDismissals(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the type of dismissals of the the batsman
Usage
batsmanDismissals(df,name="A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanFoursSixes
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanDismissals(df,"SK Raina")
'''
# Count dismissals
rcParams['figure.figsize'] = 8, 5
df1 = df[['batsman','kind']]
df2 = df1.groupby('kind').count().reset_index(inplace=False)
df2.columns = ['dismissals','count']
plt.pie(df2['count'], labels=df2['dismissals'],autopct='%.1f%%')
atitle= name + "-Dismissals"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanRunsVsStrikeRate
# This function plots the runs vs strike rate
#
#
###########################################################################################
def batsmanRunsVsStrikeRate (df,name= "A Late Cut", plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function plots the runs scored by the batsman and the runs scored by the batsman. A loess line is fitted over the points
Usage
batsmanRunsVsStrikeRate(df, name= "A Late Cut")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanDismissals
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
teamBatsmenPartnershipAllOppnAllMatches
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanRunsVsStrikeRate(df,"SK Raina")
'''
rcParams['figure.figsize'] = 8, 5
plt.scatter(df.runs,df.SR)
sns.lmplot(x='runs',y='SR', data=df,order=2)
plt.xlabel("Runs",fontsize=8)
plt.ylabel('Strike Rate',fontsize=8)
atitle=name + "- Runs vs Strike rate"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: movingaverage
# This computes the moving average
#
#
###########################################################################################
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanMovingAverage
# This function plots the moving average of runs
#
#
###########################################################################################
def batsmanMovingAverage(df, name, plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function plots the runs scored by the batsman over the career as a time series. A loess regression line is plotted on the moving average of the batsman the batsman
Usage
batsmanMovingAverage(df, name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanDismissals
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
teamBatsmenPartnershipAllOppnAllMatches
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanMovingAverage(df,"SK Raina")
'''
rcParams['figure.figsize'] = 8, 5
y_av = movingaverage(df.runs, 10)
date= pd.to_datetime(df['date'])
plt.plot(date, y_av,"b")
plt.xlabel('Date',fontsize=8)
plt.ylabel('Runs',fontsize=8)
plt.xticks(rotation=90)
atitle = name + "-Moving average of runs"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanCumulativeAverageRuns
# This functionplots the cumulative average runs
#
#
###########################################################################################
def batsmanCumulativeAverageRuns(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Batsman's cumulative average runs
Description
This function computes and plots the cumulative average runs of a batsman
Usage
batsmanCumulativeAverageRuns(df,name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanCumulativeStrikeRate bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanCumulativeAverageRuns(df,"SK Raina")
'''
rcParams['figure.figsize'] = 8, 5
cumAvgRuns = df['runs'].cumsum()/pd.Series(np.arange(1, len( df['runs'])+1), df['runs'].index)
plt.plot(cumAvgRuns)
plt.xlabel('No of matches',fontsize=8)
plt.ylabel('Cumulative Average Runs',fontsize=8)
plt.xticks(rotation=90)
atitle = name + "-Cumulative Average Runs vs matches"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanCumulativeStrikeRate
# This function plots the cumulative average Strike rate
#
#
###########################################################################################
def batsmanCumulativeStrikeRate(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the cumulative average strike rate of a batsman
Usage
batsmanCumulativeStrikeRate(df,name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
#batsmanCumulativeAverageRunsdf(df,name)
'''
rcParams['figure.figsize'] = 8, 5
cumAvgRuns = df['SR'].cumsum()/pd.Series(np.arange(1, len( df['SR'])+1), df['SR'].index)
plt.plot(cumAvgRuns)
plt.xlabel('No of matches',fontsize=8)
plt.ylabel('Cumulative Average Strike Rate',fontsize=8)
plt.xticks(rotation=70)
atitle = name + "-Cumulative Average Strike Rate vs matches"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanRunsAgainstOpposition
# This function plots the batsman's runs against opposition
#
#
###########################################################################################
def batsmanRunsAgainstOpposition(df,name= "A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the mean runs scored by the batsman against different oppositions
Usage
batsmanRunsAgainstOpposition(df, name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanFoursSixes
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
teamBatsmenPartnershipAllOppnAllMatches
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanRunsAgainstOpposition(df,name)
'''
rcParams['figure.figsize'] = 8, 5
df1 = df[['batsman', 'runs','team2']]
df2=df1.groupby('team2').agg(['sum','mean','count'])
df2.columns= ['_'.join(col).strip() for col in df2.columns.values]
# Reset index
df3=df2.reset_index(inplace=False)
ax=sns.barplot(x='team2', y="runs_mean", data=df3)
plt.xticks(rotation="vertical",fontsize=8)
plt.xlabel('Opposition',fontsize=8)
plt.ylabel('Mean Runs',fontsize=8)
atitle=name + "-Mean Runs against opposition"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanRunsVenue
# This function plos the batsman's runs at venues
#
#
###########################################################################################
def batsmanRunsVenue(df,name= "A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the mean runs scored by the batsman at different venues of the world
Usage
batsmanRunsVenue(df, name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanFoursSixes
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
teamBatsmenPartnershipAllOppnAllMatches
batsmanRunsAgainstOpposition
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
#batsmanRunsVenue(df,name)
'''
rcParams['figure.figsize'] = 8, 5
df1 = df[['batsman', 'runs','venue']]
df2=df1.groupby('venue').agg(['sum','mean','count'])
df2.columns= ['_'.join(col).strip() for col in df2.columns.values]
# Reset index
df3=df2.reset_index(inplace=False)
ax=sns.barplot(x='venue', y="runs_mean", data=df3)
plt.xticks(rotation="vertical",fontsize=8)
plt.xlabel('Venue',fontsize=8)
plt.ylabel('Mean Runs',fontsize=8)
atitle=name + "-Mean Runs at venues"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: teamBowlingPerDetails
# This function gets the bowling performances
#
#
###########################################################################################
def teamBowlingPerDetails(team):
# Compute overs bowled
a1= getOvers(team).reset_index(inplace=False)
# Compute runs conceded
b1= getRunsConceded(team).reset_index(inplace=False)
# Compute maidens
c1= getMaidens(team).reset_index(inplace=False)
# Compute wickets
d1= getWickets(team).reset_index(inplace=False)
e1=pd.merge(a1, b1, how='outer', on='bowler')
f1= pd.merge(e1,c1,how='outer', on='bowler')
g1= pd.merge(f1,d1,how='outer', on='bowler')
g1 = g1.fillna(0)
# Compute economy rate
g1['econrate'] = g1['runs']/g1['overs']
g1.columns=['bowler','overs','runs','maidens','wicket','econrate']
g1.maidens = g1.maidens.astype(int)
g1.wicket = g1.wicket.astype(int)
return(g1)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: getTeamBowlingDetails
# This function gets the team bowling details
#
#
###########################################################################################
def getTeamBowlingDetails (team,dir=".",save=False,odir="."):
'''
Description
This function gets the bowling details of a team in all matchs against all oppositions. This gets all the details of the bowlers for e.g deliveries, maidens, runs, wickets, venue, date, winner ec
Usage
getTeamBowlingDetails(team,dir=".",save=FALSE)
Arguments
team
The team for which detailed bowling info is required
dir
The source directory of RData files obtained with convertAllYaml2RDataframes()
save
Whether the data frame needs to be saved as RData or not. It is recommended to set save=TRUE as the data can be used for a lot of analyses of batsmen
Value
bowlingDetails The dataframe with the bowling details
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
getBatsmanDetails
getBowlerWicketDetails
batsmanDismissals
getTeamBattingDetails
Examples
dir1= "C:\\software\\cricket-package\\yorkpyIPLData\\data"
eam1='Delhi Daredevils'
m=getTeamBowlingDetails(team1,dir1,save=True)
'''
# Get all matches played by team
t1 = '*' + team +'*.csv'
path= os.path.join(dir,t1)
files = glob.glob(path)
# Create an empty dataframe
details = pd.DataFrame()
# Loop through all matches played by team
for file in files:
match=pd.read_csv(file)
if(match.size != 0):
team1=match.loc[match.team != team]
else:
continue
if len(team1) !=0:
scorecard=teamBowlingPerDetails(team1)
scorecard['date']= match['date'].iloc[0]
scorecard['team2']= match['team2'].iloc[0]
scorecard['winner']= match['winner'].iloc[0]
scorecard['result']= match['result'].iloc[0]
scorecard['venue']= match['venue'].iloc[0]
details= pd.concat([details,scorecard])
details = details.sort_values(['bowler','date'])
else:
pass # The team did not bowl
if save==True:
fileName = "./" + team + "-BowlingDetails.csv"
output=os.path.join(odir,fileName)
details.to_csv(output,index=False)
return(details)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: getBowlerWicketDetails
# This function gets the bowler wicket
#
#
###########################################################################################
def getBowlerWicketDetails (team, name,dir="."):
'''
Description
This function gets the bowling of a bowler (overs,maidens,runs,wickets,venue, opposition)
Usage
getBowlerWicketDetails(team,name,dir=".")
Arguments
team
The team to which the bowler belongs
name
The name of the bowler
dir
The source directory of the data
Value
dataframe The dataframe of bowling performance
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
bowlerMovingAverage
getTeamBowlingDetails
bowlerMeanRunsConceded
teamBowlersWicketRunsOppnAllMatches
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
'''
path = dir + '/' + team + "-BowlingDetails.csv"
bowlingDetails= pd.read_csv(path,index_col=False)
bowlerDetails = bowlingDetails.loc[bowlingDetails['bowler'].str.contains(name)]
return(bowlerDetails)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerMeanEconomyRate
# This function gets the bowler mean economy rate
#
#
###########################################################################################
def bowlerMeanEconomyRate(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots mean economy rate and the number of overs bowled by the bowler
Usage
bowlerMeanEconomyRate(df, name)
Arguments
df
Data frame
name
Name of bowler
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage
bowlerWicketPlot
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerMeanEconomyRate(df, name)
'''
# Count dismissals
rcParams['figure.figsize'] = 8, 5
df2=df[['bowler','overs','econrate']].groupby('overs').mean().reset_index(inplace=False)
plt.xlabel('No of overs',fontsize=8)
plt.ylabel('Mean economy rate',fontsize=8)
sns.barplot(x='overs',y='econrate',data=df2)
atitle = name + "-Mean economy rate vs overs"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerMeanRunsConceded
# This function gets the mean runs conceded by bowler
#
#
###########################################################################################
def bowlerMeanRunsConceded (df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots mean runs conceded by the bowler for the number of overs bowled by the bowler
Usage
bowlerMeanRunsConceded(df, name)
Arguments
df
Data frame
name
Name of bowler
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage
bowlerWicketPlot
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerMeanRunsConceded(df, name)
'''
# Count dismissals
rcParams['figure.figsize'] = 8, 5
df2=df[['bowler','overs','runs']].groupby('overs').mean().reset_index(inplace=False)
plt.xlabel('No of overs',fontsize=8)
plt.ylabel('Mean runs conceded',fontsize=8)
sns.barplot(x='overs',y='runs',data=df2)
atitle = name + "-Mean runs conceded vs overs"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerMovingAverage
# This function gets the bowler moving average
#
#
###########################################################################################
def bowlerMovingAverage (df, name,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the wickets taken by the bowler over career. A loess regression fit plots the moving average of wickets taken by bowler
Usage
bowlerMovingAverage(df, name)
Arguments
df
Data frame
name
Name of bowler
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
bowlerMeanEconomyRate
bowlerWicketPlot
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerMeanEconomyRate(df, name)
'''
rcParams['figure.figsize'] = 8, 5
y_av = movingaverage(df.wicket, 30)
date= pd.to_datetime(df['date'])
plt.plot(date, y_av,"b")
plt.xlabel('Date',fontsize=8)
plt.ylabel('Wickets',fontsize=8)
plt.xticks(rotation=70)
atitle = name + "-Moving average of wickets"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerCumulativeAvgWickets
# This function gets the bowler cumulative average runs
#
#
###########################################################################################
def bowlerCumulativeAvgWickets(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the cumulative average wickets of a bowler
Usage
bowlerCumulativeAvgWickets(df,name)
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate batsmanCumulativeStrikeRate batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerCumulativeAvgWickets(df, name)
'''
rcParams['figure.figsize'] = 8, 5
cumAvgRuns = df['wicket'].cumsum()/pd.Series(np.arange(1, len( df['wicket'])+1), df['wicket'].index)
plt.plot(cumAvgRuns)
plt.xlabel('No of matches',fontsize=8)
plt.ylabel('Cumulative Average wickets',fontsize=8)
plt.xticks(rotation=90)
atitle = name + "-Cumulative Average wickets vs matches"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerCumulativeAvgEconRate
# This function gets the bowler cumulative average economy rate
#
#
###########################################################################################
def bowlerCumulativeAvgEconRate(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the cumulative average economy rate of a bowler
Usage
bowlerCumulativeAvgEconRate(df,name)
Arguments
df
Data frame
name
Name of batsman
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgWickets batsmanCumulativeStrikeRate batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="R Ashwin"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerMeanEconomyRate(df, name)
'''
rcParams['figure.figsize'] = 8, 5
cumAvgRuns = df['econrate'].cumsum()/pd.Series(np.arange(1, len( df['econrate'])+1), df['econrate'].index)
plt.plot(cumAvgRuns)
plt.xlabel('No of matches',fontsize=7)
plt.ylabel('Cumulative Average economy rate',fontsize=8)
plt.xticks(rotation=70)
atitle = name + "-Cumulative Average economy rate vs matches"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerWicketPlot
# This function gets the bowler wicket plot
#
#
###########################################################################################
def bowlerWicketPlot (df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the average wickets taken by the bowler versus the number of overs bowled
Usage
bowlerWicketPlot(df, name)
Arguments
df
Data frame
name
Name of bowler
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
bowlerMeanEconomyRate
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerMeanEconomyRate(df, name)
'''
rcParams['figure.figsize'] = 8, 5
# Count dismissals
df2=df[['bowler','overs','wicket']].groupby('overs').mean().reset_index(inplace=False)
plt.xlabel('No of overs',fontsize=8)
plt.ylabel('Mean wickets',fontsize=8)
sns.barplot(x='overs',y='wicket',data=df2)
atitle = name + "-Mean wickets vs overs"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerWicketsAgainstOpposition
# This function gets the bowler's performance against opposition
#
#
###########################################################################################
def bowlerWicketsAgainstOpposition (df,name= "A Leg Glance", plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots mean number of wickets taken by the bowler against different opposition
Usage
bowlerWicketsAgainstOpposition(df, name)
Arguments
df
Data frame
name
Name of bowler
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage
bowlerWicketPlot
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerWicketsAgainstOpposition(df, name)
'''
rcParams['figure.figsize'] = 8, 5
df1 = df[['bowler', 'wicket','team2']]
df2=df1.groupby('team2').agg(['sum','mean','count'])
df2.columns= ['_'.join(col).strip() for col in df2.columns.values]
# Reset index
df3=df2.reset_index(inplace=False)
ax=sns.barplot(x='team2', y="wicket_mean", data=df3)
plt.xticks(rotation=90,fontsize=7)
plt.xlabel('Opposition',fontsize=7)
plt.ylabel('Mean wickets',fontsize=8)
atitle=name + "-Mean wickets against opposition"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: bowlerWicketsVenue
# This function gets the bowler wickets at venues
#
#
###########################################################################################
def bowlerWicketsVenue (df,name= "A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Bowler performance at different venues
Description
This function computes and plots mean number of wickets taken by the bowler in different venues
Usage
bowlerWicketsVenue(df, name)
Arguments
df
Data frame
name
Name of bowler
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage
bowlerWicketPlot
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBowlerWicketDetails(team, name,dir=".")
bowlerWicketsVenue(df, name)
'''
rcParams['figure.figsize'] = 8, 5
df1 = df[['bowler', 'wicket','venue']]
df2=df1.groupby('venue').agg(['sum','mean','count'])
df2.columns= ['_'.join(col).strip() for col in df2.columns.values]
# Reset index
df3=df2.reset_index(inplace=False)
ax=sns.barplot(x='venue', y="wicket_mean", data=df3)
plt.xticks(rotation=90,fontsize=7)
plt.xlabel('Venue',fontsize=7)
plt.ylabel('Mean wickets',fontsize=8)
atitle=name + "-Mean wickets at different venues"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 March 2019
# Function: saveAllMatchesBetween2IntlT20s
# This function saves all the matches between 2 Intl T20 teams
#
###########################################################################################
def saveAllMatchesBetween2IntlT20s(dir1,odir="."):
'''
Saves all matches between 2 IPL teams as dataframe
Description
This function saves all matches between 2 Intl. T20 countries as a single dataframe in the
current directory
Usage
saveAllMatchesBetween2IntlT20s(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenVsBowlersOppnAllMatches
'''
teams = ["Afghanistan","Australia","Bangladesh","Bermuda","Canada","England",
"Hong Kong","India","Ireland", "Kenya","Nepal","Netherlands",
"New Zealand", "Oman","Pakistan","Scotland","South Africa",
"Sri Lanka", "United Arab Emirates","West Indies", "Zimbabwe"]
for team1 in teams:
for team2 in teams:
if team1 != team2:
print("Team1=",team1,"team2=", team2)
getAllMatchesBetweenTeams(team1,team2,dir=dir1,save=True,odir=odir)
time.sleep(2) #Sleep before next save
return
###########################################################################################
# Designed and developed by <NAME>
# Date : 2 Mar 2019
# Function: saveAllMatchesAllOppositionIntlT20
# This function saves all the matches between all Intl T20 teams
#
###########################################################################################
def saveAllMatchesAllOppositionIntlT20(dir1,odir="."):
'''
Saves matches against all Intl T20 teams as dataframe and CSV for an IPL team
Description
This function saves all Intl T20 matches agaist all opposition as a single
dataframe in the current directory
Usage
saveAllMatchesAllOppositionIntlT20(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
convertYaml2PandasDataframeT20
teamBattingScorecardMatch
'''
teams = ["Afghanistan","Australia","Bangladesh","Bermuda","Canada","England",
"Hong Kong","India","Ireland", "Kenya","Nepal","Netherlands",
"New Zealand", "Oman","Pakistan","Scotland","South Africa",
"Sri Lanka", "United Arab Emirates","West Indies", "Zimbabwe"]
for team in teams:
print("Team=",team)
getAllMatchesAllOpposition(team,dir=dir1,save=True,odir=odir)
time.sleep(2) #Sleep before next save
##########################################################################################
# Designed and developed by <NAME>
# Date : 2 March 2019
# Function: saveAllMatchesBetween2BBLTeams
# This function saves all the matches between 2 BBL Teams
#
###########################################################################################
def saveAllMatchesBetween2BBLTeams(dir1):
'''
Saves all matches between 2 BBLteams as dataframe
Description
This function saves all matches between 2 BBL T20 countries as a single dataframe in the
current directory
Usage
saveAllMatchesBetween2BBLTeams(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenVsBowlersOppnAllMatches
'''
teams = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>", "Sydney Sixers",
"Sydney Thunder"]
for team1 in teams:
for team2 in teams:
if team1 != team2:
print("Team1=",team1,"team2=", team2)
getAllMatchesBetweenTeams(team1,team2,dir=dir1,save=True)
time.sleep(2) #Sleep before next save
return
###########################################################################################
# Designed and developed by <NAME>
# Date : 2 Mar 2019
# Function: saveAllMatchesAllOppositionBBLT20
# This function saves all the matches between all BBL T20 teams
#
###########################################################################################
def saveAllMatchesAllOppositionBBLT20(dir1):
'''
Saves matches against all BBL T20 teams as dataframe and CSV for an IPL team
Description
This function saves all BBL T20 matches agaist all opposition as a single
dataframe in the current directory
Usage
saveAllMatchesAllOppositionBBLT20(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
convertYaml2PandasDataframeT20
teamBattingScorecardMatch
'''
teams = ["<NAME>", "<NAME>", "Hobart Hurricanes",
"Melbourne Renegades", "Perth Scorchers", "Sydney Sixers",
"Sydney Thunder"]
for team in teams:
print("Team=",team)
getAllMatchesAllOpposition(team,dir=dir1,save=True)
time.sleep(2) #Sleep before next save
##########################################################################################
# Designed and developed by <NAME>
# Date : 2 March 2019
# Function: saveAllMatchesBetween2NWBTeams
# This function saves all the matches between 2 NWB Teams
#
###########################################################################################
def saveAllMatchesBetween2NWBTeams(dir1):
'''
Saves all matches between 2 NWB teams as dataframe
Description
This function saves all matches between 2 NWB T20 countries as a single dataframe in the
current directory
Usage
saveAllMatchesBetween2NWBTeams(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenVsBowlersOppnAllMatches
'''
teams = ["Derbyshire", "Durham", "Essex", "Glamorgan",
"Gloucestershire", "Hampshire", "Kent","Lancashire",
"Leicestershire", "Middlesex","Northamptonshire",
"Nottinghamshire","Somerset","Surrey","Sussex","Warwickshire",
"Worcestershire","Yorkshire"]
for team1 in teams:
for team2 in teams:
if team1 != team2:
print("Team1=",team1,"team2=", team2)
getAllMatchesBetweenTeams(team1,team2,dir=dir1,save=True)
time.sleep(2) #Sleep before next save
return
###########################################################################################
# Designed and developed by <NAME>
# Date : 2 Mar 2019
# Function: saveAllMatchesAllOppositionNWBT20
# This function saves all the matches between all NWB T20 teams
#
###########################################################################################
def saveAllMatchesAllOppositionNWBT20(dir1):
'''
Saves matches against all NWB T20 teams as dataframe and CSV for an IPL team
Description
This function saves all NWBT20 matches agaist all opposition as a single
dataframe in the current directory
Usage
saveAllMatchesAllOppositionNWBT20(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
convertYaml2PandasDataframeT20
teamBattingScorecardMatch
'''
teams = ["Derbyshire", "Durham", "Essex", "Glamorgan",
"Gloucestershire", "Hampshire", "Kent","Lancashire",
"Leicestershire", "Middlesex","Northamptonshire",
"Nottinghamshire","Somerset","Surrey","Sussex","Warwickshire",
"Worcestershire","Yorkshire"]
for team in teams:
print("Team=",team)
getAllMatchesAllOpposition(team,dir=dir1,save=True)
time.sleep(2) #Sleep before next save
##########################################################################################
# Designed and developed by <NAME>
# Date : 28 Feb 2020
# Function: rankIntlT20Batting
# This function ranks Intl T20 batsman
#
###########################################################################################
def rankIntlT20Batting(dir1):
countries ={"India":"india", "United States of America":"usa", "Canada":"canada", "United Arab Emirates":"uae",
"Afghanistan":"afghanistan", "West Indies":"westindies","Oman":"oman","Germany":"germany",
"Namibia":"namibia","Germany":"germany","Sri Lanka":"sl","Singapore":"singapore",
"Malaysia":"malaysia","South Africa": "sa","Netherlands":"netherlands",
"Zimbabwe":"zimbabwe","Pakistan":"pakistan","Scotland":"scotland","Kuwait":"kuwait",
"New Zealand":"nz","Vanuatu":"vanuatu","Papua New Guinea": "png","Australia":"aus",
"Irelaand":"ireland","England":"england","South Korea":"sk","Japan":"japan","Bangladesh":"bangladesh",
"Nepal":"nepal","Cayman Island":"cayman","Rwanda":"rwanda","Qatar":"qatar","Botswana":"botswana",
"Rwanda":"rwanda","Uganda":"uganda","Maldives":"maldives","Fiji":"fiji","Mozambique":"mozam",
"Hong Kong":"hk","Denmark":"denmark","Norway":"norway"
}
df=pd.DataFrame()
for key in countries:
val = countries[key] + "_details"
val= getTeamBattingDetails(key,dir=dir1, save=False,odir=".")
df = pd.concat([df,val])
df1=df.groupby('batsman').agg(['count','mean'])
df1.columns = ['_'.join(col).strip() for col in df1.columns.values]
df2 =df1[['runs_count','runs_mean','SR_mean']]
df3=df2[df2['runs_count']>40]
df4=df3.sort_values(['runs_mean','SR_mean'],ascending=False)
df4.columns=['matches','runs_mean','SR_mean']
return(df4)
#########################################################################################
# Designed and developed by <NAME>esh
# Date : 28 Feb 2020
# Function: rankIntlT20Bowling
# This function ranks Intl T20 bowlers
#
###########################################################################################
def rankIntlT20Bowling(dir1):
countries ={"India":"india", "United States of America":"usa", "Canada":"canada", "United Arab Emirates":"uae",
"Afghanistan":"afghanistan", "West Indies":"westindies","Oman":"oman","Germany":"germany",
"Namibia":"namibia","Germany":"germany","Sri Lanka":"sl","Singapore":"singapore",
"Malaysia":"malaysia","South Africa": "sa","Netherlands":"netherlands",
"Zimbabwe":"zimbabwe","Pakistan":"pakistan","Scotland":"scotland","Kuwait":"kuwait",
"New Zealand":"nz","Vanuatu":"vanuatu","Papua New Guinea": "png","Australia":"aus",
"Irelaand":"ireland","England":"england","South Korea":"sk","Japan":"japan","Bangladesh":"bangladesh",
"Nepal":"nepal","Cayman Island":"cayman","Rwanda":"rwanda","Qatar":"qatar","Botswana":"botswana",
"Rwanda":"rwanda","Uganda":"uganda","Maldives":"maldives","Fiji":"fiji","Mozambique":"mozam",
"Hong Kong":"hk","Denmark":"denmark","Norway":"norway"
}
df=pd.DataFrame()
for key in countries:
val = countries[key] + "_details"
val= getTeamBowlingDetails(key,dir=dir1, save=False,odir=".")
df = pd.concat([df,val])
df1=df.groupby('bowler').agg(['count','mean'])
df1.columns = ['_'.join(col).strip() for col in df1.columns.values]
df2 =df1[['wicket_count','wicket_mean','econrate_mean']]
df3=df2[df2['wicket_count']>40]
df4=df3.sort_values(['wicket_mean','econrate_mean'],ascending=False)
df4.columns=['matches','wicket_mean','econrate_mean']
return(df4)
#########################################################################################
# Designed and developed by <NAME>
# Date : 28 Feb 2020
# Function: rankIPLT20Batting
# This function ranks IPL T20 batsmen
#
###########################################################################################
def rankIPLT20Batting(dir1):
iplTeams ={"Chennai Super Kings":"csk","Deccan Chargers":"dc","Delhi Daredevils":"dd",
"Kings XI Punjab":"kxip", 'Kochi Tuskers Kerala':"kct","Kolkata Knight Riders":"kkr",
"Mumbai Indians":"mi", "Pune Warriors":"pw","Rajasthan Royals":"rr",
"Royal Challengers Bangalore":"rps","Sunrisers Hyderabad":"sh","Gujarat Lions":"gl",
"Rising Pune Supergiants":"rps"}
df=pd.DataFrame()
for key in iplTeams:
val = iplTeams[key] + "_details"
val= getTeamBattingDetails(key,dir=dir1, save=False,odir=".")
df = pd.concat([df,val])
df1=df.groupby('batsman').agg(['count','mean'])
df1.columns = ['_'.join(col).strip() for col in df1.columns.values]
df2 =df1[['runs_count','runs_mean','SR_mean']]
df3=df2[df2['runs_count']>40]
df4=df3.sort_values(['runs_mean','SR_mean'],ascending=False)
df4.columns=['matches','runs_mean','SR_mean']
return(df4)
#########################################################################################
# Designed and developed by <NAME>
# Date : 28 Feb 2020
# Function: rankIPLT20Bowling
# This function ranks IPL T20 bowlers
#
###########################################################################################
def rankIPLT20Bowling(dir1):
iplTeams ={"Chennai Super Kings":"csk","Deccan Chargers":"dc","Delhi Daredevils":"dd",
"Kings XI Punjab":"kxip", 'Kochi Tuskers Kerala':"kct","Kolkata Knight Riders":"kkr",
"Mumbai Indians":"mi", "Pune Warriors":"pw","Rajasthan Royals":"rr",
"Royal Challengers Bangalore":"rps","Sunrisers Hyderabad":"sh","Gujarat Lions":"gl",
"Rising Pune Supergiants":"rps"}
df= | pd.DataFrame() | pandas.DataFrame |
import sys
from sqlalchemy import create_engine
import pandas as pd
def load_data(messages_filepath, categories_filepath):
"""
Load messages and categroies from CSV files to Pandas df
:param messages_filepath: str, filepath of messages
:param categories_filepath: str, filepath of categories
:return: df: dataset combing messages and categories
"""
messages = pd.read_csv(messages_filepath)
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
#!/usr/bin/env python
'''
Author: <NAME>
This program will read subnet planning and port matrix from two different spreadsheets and by use of Jinja2
will create a configuration file for a device or devices.
At the same time the program will create a YAML file with the device(s) configuration and also will create a
YAML Ansible playbook which can be run in case changes to the device config is require. All you have to do
is make changes to the device YAML config file and then run:
- ansible_playbook <device playbook>.yaml.
Make sure you have the following 3 files in the same directory:
- <device name>.yaml
- <device playbook>.yaml
- <jinja2 template>.j2 - this is the jinja2 template that was used by this PY program to generate
the initial config(s).
'''
from pandas.io import excel
from pandas import DataFrame
from netmiko import ConnectHandler
from datetime import datetime
from getpass import getpass
import threading
from threading import Thread
from threading import Lock
from time import time
from queue import Queue
import argparse
import socket
import struct
from jinja2 import Environment, FileSystemLoader
import yaml
# default show commands
SHOWCOMMANDS = ['show run','show interface status','show vlan']
arguments = ''
templatefile = ''
TS_LIMIT = 20
QS_LIMIT = 50
TS_DEFAULT = 10
QS_DEFAULT = 20
WRITE_CONFIG_DEFAULT = 'N'
default_user = ''
default_pass = ''
default_secret = ''
device_queue = Queue()
# establishes connection to device and returns an object back
def connectToDevice(devcreds):
ctd = ConnectHandler(**devcreds)
return(ctd)
# create the header to be saved into log file for every command read from playbook
def get_logheader(commandSent):
tmp = commandSent + " - " + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
logHeader = "-" * len(tmp) + "\n" + tmp + "\n" + "-" * len(tmp) + "\n"
return(logHeader)
# returns username. function will not exit unless something is entered.
def getusername():
username = ''
while username == '':
username = input('Enter default username: ').strip()
return(username)
# returns username. function will not exit unless something is entered. this function will not allow enty passwords but will allow for passwords with just spaces in it.
def getpassword(usern):
password = ''
while password == '':
password = getpass('Enter ' + usern + ' password: ')
return(password)
#parse arguments from command line
def getargs():
parser = argparse.ArgumentParser(description='Playbook Runner by <NAME>')
parser.add_argument('--subplan',required=True, help='Subnet Planning name is required.')
parser.add_argument('--portmatrix',required=True, help='Port Matrix name is required.')
parser.add_argument('--configtype',required=True, help='Config type name is required. (AL/WL/SE)')
parser.add_argument('-t', '--j2template', required=True, help='Jinja2 Template file to use.')
parser.add_argument('-w', help='specify if configuration should be save into Startup Config.\
\'Y\' to write config \'N\' to preserve Startup Config. If this flag is not specified or any other \
value is entered the default will be no to write the config changes.\nDefault: \'N\'')
parser.add_argument('-ts', help='Number of Threads to be created.\nMust be a number from 1 thru 20\nIf a number \
greater than 20 is entered, the maximum Thread number will be used.\nDefault: \'10\'')
parser.add_argument('-qs', help='Queue size.\nMust be a number from 1 thru 50.\nIf a number greater than 50 is \
entered, the maximum Queue number will used.\nDefault: \'20\'')
parser.add_argument('-v','--version', action='version', version='%(prog)s 1.6')
args = parser.parse_args()
if args.w is None or (args.w.upper() != 'Y' and args.w.upper() != 'N'):
args.w = WRITE_CONFIG_DEFAULT
if args.qs is None:
args.qs = QS_DEFAULT
elif int(args.qs) > QS_LIMIT:
args.qs = QS_LIMIT
if args.ts is None:
args.ts = TS_DEFAULT
elif int(args.ts) > TS_LIMIT:
args.ts = TS_LIMIT
return(args)
# Initializes the threads. Expects an interger as a parameter.
def CreateThreads(n):
print('Creating ' + str(n) + ' Threads')
for x in range(int(n)):
t = Thread(target=ThreadHandler)
t.daemon = True
t.start()
def ThreadHandler():
while True:
dev_data = device_queue.get()
#print(threading.current_thread().name + '-' + dev_data['hostname'] + ' Submitted')
GenerateConfig(dev_data)
device_queue.task_done()
#print(threading.current_thread().name + '-' + dev_data['hostname'] + ' Completed!!')
# generate VLAN name
def GenVlanName(vlantype, swname):
newVlanName = swname.replace('-','_')
newVlanName = newVlanName.replace('IDF','')
newVlanName = newVlanName.replace('SE','')
newVlanName = newVlanName.replace('WL','')
newVlanName = newVlanName.replace('AL','')
newVlanName = newVlanName.replace('1FL','01')
newVlanName = newVlanName.replace('2FL','02')
newVlanName = newVlanName.replace('3FL','03')
newVlanName = newVlanName.replace('4FL','04')
newVlanName = newVlanName.replace('5FL','05')
newVlanName = newVlanName.replace('6FL','06')
return(vlantype + newVlanName)
# open file to right log
def OpenOutputConfigFile(filename):
fileH = open(filename,'w')
return(fileH)
def WriteYamlFile(rw):
fileH = open(rw.get('hostname') + ".yaml",'w')
fileH.write(yaml.dump(rw, explicit_start=True, indent=5, default_flow_style=False))
fileH.close()
# write command header and results to OpenOutputConfigFile
def WriteConfig(dicttowr, templatename, fileh):
#Load Jinja2 template
env = Environment(loader = FileSystemLoader('./'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template(templatename)
#Render template using data and print the output
GenarateDevConfig = template.render(dicttowr)
fileh.write(GenarateDevConfig)
# Connects to device runs commands and creates and log file
def GenerateConfig(rw):
fh = OpenOutputConfigFile(rw['hostname'] + '.config')
WriteConfig(rw, templatefile, fh)
fh.close()
fh = OpenOutputConfigFile(rw['hostname'] + '-Ansible-playbook.yaml')
WriteConfig(rw, 'ansible-playbook.j2', fh)
fh.close()
WriteYamlFile(rw)
def cidr_to_netmask(cidr):
host_bits = 32 - int(cidr)
netmask = socket.inet_ntoa(struct.pack('!I', (1 << 32) - (1 << host_bits)))
return netmask
# open Excel Workbook and reaad rows and queue for processing
def ReadWorkBookIntoQueue(inputSubPlan, portMatrix):
next_service = False
worksheets = {}
ManagementIP = ''
ManagementMask = ''
ManagementVLAN = ''
dataSubnet = ''
Subnetmask = 0
current_floor = 0
current_IDF_ID = ''
current_service = ''
mgmtIPoctect = 0
mgmtIPTracker = 0
portmatrixwb = excel.ExcelFile(portMatrix)
if arguments.configtype.upper() == 'AL':
configt = 'Data'
elif arguments.configtype.upper() == 'WL':
configt = 'Wireless'
else:
configt = 'Security Cameras'
with | excel.ExcelFile(inputSubPlan) | pandas.io.excel.ExcelFile |
"""
Thi script will compate variables to FRI to recaalculate the results
"""
#==============================================================================
__title__ = "FRI vs variables"
__author__ = "<NAME>"
__version__ = "v1.0(21.08.2019)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import argparse
import datetime as dt
from collections import OrderedDict
import warnings as warn
from netCDF4 import Dataset, num2date, date2num
from scipy import stats
# import rasterio
import xarray as xr
from dask.diagnostics import ProgressBar
from numba import jit
import bottleneck as bn
import scipy as sp
from scipy import stats
import glob
import statsmodels.api as sm
import statsmodels.formula.api as smf
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cpf
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# import regionmask as rm
# import itertools
# Import debugging packages
import ipdb
# from rasterio.warp import transform
from shapely.geometry import Polygon
# import geopandas as gpd
# from rasterio import features
# from affine import Affine
# +++++ Import my packages +++++
import myfunctions.corefunctions as cf
# import MyModules.PlotFunctions as pf
# import MyModules.NetCDFFunctions as ncf
#==============================================================================
def main():
# ========== Setup the paths ==========
dpath, clpath, chunksize = syspath()
data = datasets(dpath, 100)
# ========== Load the data ==========
for dsn in data:
Content_Compare(dpath, clpath, dsn, data)
ipdb.set_trace()
#==============================================================================
def Content_Compare(dpath, clpath, dsn, data):
# ========== Open the datasets =========
pre = xr.open_dataset(
clpath+"/TerraClimate_SIBERIA_ppt_1958to2018.nc", chunks={"latitude": 100, "longitude": 1000})
tas = xr.open_dataset(
clpath+"/TerraClimate_SIBERIA_tmean_1958to2018.nc", chunks={"latitude": 100, "longitude": 1000})
fri = xr.open_dataset(dpath+"/BurntArea/%s/FRI/%s_annual_burns_MW_1degreeBox_REMAPBIL.nc" %(dsn, dsn))
# pull out the areas whe the FRI is way to high
# fri = fri.where(fri.FRI < 500)
BFm = xr.open_dataset(dpath+"/masks/broad/Hansen_GFC-2018-v1.6_SIBERIA_ProcessedToTerraClimate.nc")
BFm = BFm.where(BFm.datamask == 1)
# ========== Group the data =========
seasons = ["JJA", "Annual", "DJF", "MAM", "SON"]
for per in seasons:
if seasons == "Annual":
# ========== time subset the data the datasets =========
pre_sub = pre.sel(dict(time=slice(pd.to_datetime("%d-01-01" % data[dsn]["start"]), None)))
tas_sub = tas.sel(dict(time=slice(pd.to_datetime("%d-01-01" % data[dsn]["start"]), None)))
# ========== Build the precipitation =========
# Resample to annual then get the annual mean
pre_mean = pre_sub.resample(time="1Y").sum().mean(dim='time')
pre_mean = pre_mean.where(pre_mean > 0)
# ========== Build the Temperature =========
tas_mean = tas_sub.mean(dim='time')
else:
# ========== time subset the data the datasets =========
pre_sub = pre.sel(dict(time=slice(pd.to_datetime("%d-12-01" % (data[dsn]["start"]-1)), None)))
tas_sub = tas.sel(dict(time=slice(pd.to_datetime("%d-12-01" % (data[dsn]["start"]-1)), None)))
pre_mean = pre_sub.resample(time="QS-DEC").sum()
pre_mean = pre_sub.groupby('time.season').mean()
pre_mean = pre_mean.sel(season=per).drop("season")
tas_mean = tas_sub.resample(time="QS-DEC").mean()
tas_mean = tas_mean.groupby('time.season').mean()
tas_mean = tas_mean.sel(season=per).drop("season")
# man_annual = ds[var].where(ds[var]['time.season'] == season).groupby('time.year')
print("Starting ppt mean calculation at" , | pd.Timestamp.now() | pandas.Timestamp.now |
"""
Module contains tools for processing files into DataFrames or other objects
"""
from collections import abc, defaultdict
import csv
import datetime
from io import StringIO
import itertools
import re
import sys
from textwrap import fill
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Type,
cast,
)
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
import pandas._libs.parsers as parsers
from pandas._libs.parsers import STR_NA_VALUES
from pandas._libs.tslibs import parsing
from pandas._typing import FilePathOrBuffer, StorageOptions, Union
from pandas.errors import (
AbstractMethodError,
EmptyDataError,
ParserError,
ParserWarning,
)
from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
ensure_object,
ensure_str,
is_bool_dtype,
is_categorical_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_file_like,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms, generic
from pandas.core.arrays import Categorical
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import (
Index,
MultiIndex,
RangeIndex,
ensure_index_from_sequences,
)
from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
from pandas.io.common import IOHandles, get_handle, validate_header_arg
from pandas.io.date_converters import generic_parser
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = "\ufeff"
_doc_read_csv_and_table = (
r"""
{summary}
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header=None``. Explicitly pass ``header=0`` to be able to
replace existing names. The header can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``DataFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force pandas to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a DataFrame from ``data`` with element order preserved use
``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
prefix : str, optional
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` together with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {{'c', 'python'}}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparsable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``pd.to_datetime`` after
``pd.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partially-applied
:func:`pandas.to_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : bool, default False
If True and `parse_dates` is enabled, pandas will attempt to infer the
format of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM format dates, international and European format.
cache_dates : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
lineterminator : str (length 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being
treated as the header.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
error_bad_lines : bool, default True
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_map : bool, default False
If a filepath is provided for `filepath_or_buffer`, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or 'high' for the ordinary converter,
'legacy' for the original lower precision pandas converter, and
'round_trip' for the round-trip converter.
.. versionchanged:: 1.2
{storage_options}
.. versionadded:: 1.2
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
>>> pd.{func_name}('data.csv') # doctest: +SKIP
"""
)
def validate_integer(name, val, min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Raise ValueError if the `names` parameter contains duplicates or has an
invalid data type.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output DataFrame.
Raises
------
ValueError
If names are not unique or are not ordered (e.g. set).
"""
if names is not None:
if len(names) != len(set(names)):
raise ValueError("Duplicate names are not allowed.")
if not (
is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)
):
raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
if kwds.get("date_parser", None) is not None:
if isinstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get("iterator", False)
chunksize = validate_integer("chunksize", kwds.get("chunksize", None), 1)
nrows = kwds.get("nrows", None)
# Check for duplicates in names.
_validate_names(kwds.get("names", None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
with parser:
return parser.read(nrows)
_parser_defaults = {
"delimiter": None,
"escapechar": None,
"quotechar": '"',
"quoting": csv.QUOTE_MINIMAL,
"doublequote": True,
"skipinitialspace": False,
"lineterminator": None,
"header": "infer",
"index_col": None,
"names": None,
"prefix": None,
"skiprows": None,
"skipfooter": 0,
"nrows": None,
"na_values": None,
"keep_default_na": True,
"true_values": None,
"false_values": None,
"converters": None,
"dtype": None,
"cache_dates": True,
"thousands": None,
"comment": None,
"decimal": ".",
# 'engine': 'c',
"parse_dates": False,
"keep_date_col": False,
"dayfirst": False,
"date_parser": None,
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
"mangle_dupe_cols": True,
"infer_datetime_format": False,
"skip_blank_lines": True,
}
_c_parser_defaults = {
"delim_whitespace": False,
"na_filter": True,
"low_memory": True,
"memory_map": False,
"error_bad_lines": True,
"warn_bad_lines": True,
"float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: Dict[str, Any] = {}
_deprecated_args: Set[str] = set()
@Appender(
_doc_read_csv_and_table.format(
func_name="read_csv",
summary="Read a comma-separated values (csv) file into DataFrame.",
_default_sep="','",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_csv(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
storage_options: StorageOptions = None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": ","}
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
@Appender(
_doc_read_csv_and_table.format(
func_name="read_table",
summary="Read general delimited file into DataFrame.",
_default_sep=r"'\\t' (tab-stop)",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_table(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": "\t"}
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
def read_fwf(
filepath_or_buffer: FilePathOrBuffer,
colspecs="infer",
widths=None,
infer_nrows=100,
**kwds,
):
r"""
Read a table of fixed-width formatted lines into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser determine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds["colspecs"] = colspecs
kwds["infer_nrows"] = infer_nrows
kwds["engine"] = "python-fwf"
return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
"""
Passed dialect overrides any of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = "python"
engine_specified = False
self.engine = engine
self._engine_specified = kwds.get("engine_specified", engine_specified)
_validate_skipfooter(kwds)
dialect = _extract_dialect(kwds)
if dialect is not None:
kwds = _merge_with_dialect_properties(dialect, kwds)
if kwds.get("header", "infer") == "infer":
kwds["header"] = 0 if kwds.get("names") is None else None
self.orig_options = kwds
# miscellanea
self._currow = 0
options = self._get_options_with_defaults(engine)
options["storage_options"] = kwds.get("storage_options", None)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
self.squeeze = options.pop("squeeze", False)
self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if "has_index_names" in kwds:
self.options["has_index_names"] = kwds["has_index_names"]
self._engine = self._make_engine(self.engine)
def close(self):
self._engine.close()
def _get_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in _parser_defaults.items():
value = kwds.get(argname, default)
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
if engine != "c" and value != default:
if "python" in engine and argname not in _python_unsupported:
pass
elif value == _deprecated_defaults.get(argname, default):
pass
else:
raise ValueError(
f"The {repr(argname)} option is not supported with the "
f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.get(argname, default)
options[argname] = value
if engine == "python-fwf":
# pandas\io\parsers.py:907: error: Incompatible types in assignment
# (expression has type "object", variable has type "Union[int, str,
# None]") [assignment]
for argname, default in _fwf_defaults.items(): # type: ignore[assignment]
options[argname] = kwds.get(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
# The C engine doesn't need the file-like to have the "__next__"
# attribute. However, the Python engine explicitly calls
# "__next__(...)" when iterating through such an object, meaning it
# needs to have that attribute
raise ValueError(
"The 'python' engine cannot iterate through this file buffer."
)
def _clean_options(self, options, engine):
result = options.copy()
fallback_reason = None
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
fallback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
sep = options["delimiter"]
delim_whitespace = options["delim_whitespace"]
if sep is None and not delim_whitespace:
if engine == "c":
fallback_reason = (
"the 'c' engine does not support "
"sep=None with delim_whitespace=False"
)
engine = "python"
elif sep is not None and len(sep) > 1:
if engine == "c" and sep == r"\s+":
result["delim_whitespace"] = True
del result["delimiter"]
elif engine not in ("python", "python-fwf"):
# wait until regex engine integrated
fallback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
if "python" in engine:
result["delimiter"] = r"\s+"
elif sep is not None:
encodeable = True
encoding = sys.getfilesystemencoding() or "utf-8"
try:
if len(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ("python", "python-fwf"):
fallback_reason = (
f"the separator encoded in {encoding} "
"is > 1 char long, and the 'c' engine "
"does not support such separators"
)
engine = "python"
quotechar = options["quotechar"]
if quotechar is not None and isinstance(quotechar, (str, bytes)):
if (
len(quotechar) == 1
and ord(quotechar) > 127
and engine not in ("python", "python-fwf")
):
fallback_reason = (
"ord(quotechar) > 127, meaning the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support such quotechars"
)
engine = "python"
if fallback_reason and self._engine_specified:
raise ValueError(fallback_reason)
if engine == "c":
for arg in _c_unsupported:
del result[arg]
if "python" in engine:
for arg in _python_unsupported:
if fallback_reason and result[arg] != _c_parser_defaults[arg]:
raise ValueError(
"Falling back to the 'python' engine because "
f"{fallback_reason}, but this causes {repr(arg)} to be "
"ignored as it is not supported by the 'python' engine."
)
del result[arg]
if fallback_reason:
warnings.warn(
(
"Falling back to the 'python' engine because "
f"{fallback_reason}; you can avoid this warning by specifying "
"engine='python'."
),
ParserWarning,
stacklevel=5,
)
index_col = options["index_col"]
names = options["names"]
converters = options["converters"]
na_values = options["na_values"]
skiprows = options["skiprows"]
validate_header_arg(options["header"])
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
if result.get(arg, depr_default) != depr_default:
msg = (
f"The {arg} argument has been deprecated and will be "
"removed in a future version.\n\n"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
result[arg] = parser_default
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not isinstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result["index_col"] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not isinstance(converters, dict):
raise TypeError(
"Type converters must be a dict or subclass, "
f"input was a {type(converters).__name__}"
)
else:
converters = {}
# Converting values to NA
keep_default_na = options["keep_default_na"]
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is internally handled by the
# c-engine, so only need for python parsers
if engine != "c":
if is_integer(skiprows):
skiprows = list(range(skiprows))
if skiprows is None:
skiprows = set()
elif not callable(skiprows):
skiprows = set(skiprows)
# put stuff back
result["names"] = names
result["converters"] = converters
result["na_values"] = na_values
result["na_fvalues"] = na_fvalues
result["skiprows"] = skiprows
return result, engine
def __next__(self):
try:
return self.get_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine="c"):
mapping: Dict[str, Type[ParserBase]] = {
"c": CParserWrapper,
"python": PythonParser,
"python-fwf": FixedWidthFieldParser,
}
if engine not in mapping:
raise ValueError(
f"Unknown engine: {engine} (valid options are {mapping.keys()})"
)
# error: Too many arguments for "ParserBase"
return mapping[engine](self.f, **self.options) # type: ignore[call-arg]
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
nrows = validate_integer("nrows", nrows)
index, columns, col_dict = self._engine.read(nrows)
if index is None:
if col_dict:
# Any column is actually fine:
new_rows = len(next(iter(col_dict.values())))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = len(index)
df = DataFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and len(df.columns) == 1:
return df[df.columns[0]].copy()
return df
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = min(size, self.nrows - self._currow)
return self.read(nrows=size)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _is_index_col(col):
return col is not None and col is not False
def _is_potential_multi_index(
columns, index_col: Optional[Union[bool, Sequence[int]]] = None
):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
index_col : None, bool or list, optional
Column or columns to use as the (possibly hierarchical) index
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
if index_col is None or isinstance(index_col, bool):
index_col = []
return (
len(columns)
and not isinstance(columns, MultiIndex)
and all(isinstance(c, tuple) for c in columns if c not in list(index_col))
)
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
def _validate_usecols_names(usecols, names):
"""
Validates that all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if len(missing) > 0:
raise ValueError(
f"Usecols do not match columns, columns expected but not found: {missing}"
)
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
"""
msg = (
"'usecols' must either be list-like of all strings, all unicode, "
"all integers or a callable."
)
if usecols is not None:
if callable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = (
"Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
if parse_dates is not None:
if | is_scalar(parse_dates) | pandas.core.dtypes.common.is_scalar |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import itertools
from numpy import nan
import numpy as np
from pandas import (DataFrame, Series, Timestamp, date_range, compat,
option_context, Categorical)
from pandas.core.arrays import IntervalArray, integer_array
from pandas.compat import StringIO
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameBlockInternals():
def test_cast_internals(self, float_frame):
casted = DataFrame(float_frame._data, dtype=int)
expected = DataFrame(float_frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(float_frame._data, dtype=np.int32)
expected = DataFrame(float_frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self, float_frame):
float_frame['E'] = 7.
consolidated = float_frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
float_frame['F'] = 8.
assert len(float_frame._data.blocks) == 3
float_frame._consolidate(inplace=True)
assert len(float_frame._data.blocks) == 1
def test_consolidate_inplace(self, float_frame):
frame = float_frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
float_frame[chr(letter)] = chr(letter)
def test_values_consolidate(self, float_frame):
float_frame['E'] = 7.
assert not float_frame._data.is_consolidated()
_ = float_frame.values # noqa
assert float_frame._data.is_consolidated()
def test_modify_values(self, float_frame):
float_frame.values[5] = 5
assert (float_frame.values[5] == 5).all()
# unconsolidated
float_frame['E'] = 7.
float_frame.values[6] = 6
assert (float_frame.values[6] == 6).all()
def test_boolean_set_uncons(self, float_frame):
float_frame['E'] = 7.
expected = float_frame.values.copy()
expected[expected > 1] = 2
float_frame[float_frame > 1] = 2
assert_almost_equal(expected, float_frame.values)
def test_values_numeric_cols(self, float_frame):
float_frame['foo'] = 'bar'
values = float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
values = mixed_float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_float_frame[['A', 'B', 'C']].values
assert values.dtype == np.float32
values = mixed_float_frame[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_int_frame[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = mixed_int_frame[['B', 'C']].values
assert values.dtype == np.uint64
values = mixed_int_frame[['A', 'C']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C', 'D']].values
assert values.dtype == np.int64
values = mixed_int_frame[['A']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A': [2 ** 63 - 1]})
result = df['A']
expected = Series(np.asarray([2 ** 63 - 1], np.int64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2 ** 63]})
result = df['A']
expected = Series(np.asarray([2 ** 63], np.uint64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [datetime(2005, 1, 1), True]})
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [None, 1]})
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, 2]})
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3.0]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, True]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, None]})
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, None]})
result = df['A']
expected = Series(np.asarray(
[1.0 + 2.0j, np.nan], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, True, None]})
result = df['A']
expected = Series(np.asarray(
[2.0, 1, True, None], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, datetime(2006, 1, 1), None]})
result = df['A']
expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_), name='A')
assert_series_equal(result, expected)
def test_construction_with_mixed(self, float_string_frame):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check dtypes
result = df.get_dtype_counts().sort_values()
expected = Series({'datetime64[ns]': 3})
# mixed-type frames
float_string_frame['datetime'] = datetime.now()
float_string_frame['timedelta'] = timedelta(days=1, seconds=1)
assert float_string_frame['datetime'].dtype == 'M8[ns]'
assert float_string_frame['timedelta'].dtype == 'm8[ns]'
result = float_string_frame.get_dtype_counts().sort_values()
expected = Series({'float64': 4,
'object': 1,
'datetime64[ns]': 1,
'timedelta64[ns]': 1}).sort_values()
assert_series_equal(result, expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype='timedelta64[s]')
df = DataFrame(index=range(3))
df['A'] = arr
expected = DataFrame({'A': pd.timedelta_range('00:00:01', periods=3,
freq='s')},
index=range(3))
assert_frame_equal(df, expected)
expected = DataFrame({
'dt1': Timestamp('20130101'),
'dt2': date_range('20130101', periods=3),
# 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
}, index=range(3))
df = DataFrame(index=range(3))
df['dt1'] = np.datetime64('2013-01-01')
df['dt2'] = np.array(['2013-01-01', '2013-01-02', '2013-01-03'],
dtype='datetime64[D]')
# df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
data = list(itertools.repeat((datetime(2001, 1, 1),
"aa", 20), 9))
return DataFrame(data=data,
columns=["A", "B", "C"],
dtype=dtype)
pytest.raises(NotImplementedError, f,
[("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
# 10822
# invalid error message on dt inference
if not compat.is_platform_windows():
f('M8[ns]')
def test_equals_different_blocks(self):
# GH 9330
df0 = pd.DataFrame({"A": ["x", "y"], "B": [1, 2],
"C": ["w", "z"]})
df1 = df0.reset_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
assert (df0._data.blocks[0].dtype != df1._data.blocks[0].dtype)
# do the real tests
assert_frame_equal(df0, df1)
assert df0.equals(df1)
assert df1.equals(df0)
def test_copy_blocks(self, float_frame):
# API/ENH 9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks()
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
def test_no_copy_blocks(self, float_frame):
# API/ENH 9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did change the original DataFrame
assert _df[column].equals(df[column])
def test_copy(self, float_frame, float_string_frame):
cop = float_frame.copy()
cop['E'] = cop['A']
assert 'E' not in float_frame
# copy objects
copy = float_string_frame.copy()
assert copy._data is not float_string_frame._data
def test_pickle(self, float_string_frame, empty_frame, timezone_frame):
unpickled = tm.round_trip_pickle(float_string_frame)
assert_frame_equal(float_string_frame, unpickled)
# buglet
float_string_frame._data.ndim
# empty
unpickled = tm.round_trip_pickle(empty_frame)
repr(unpickled)
# tz frame
unpickled = tm.round_trip_pickle(timezone_frame)
assert_frame_equal(timezone_frame, unpickled)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = pd.read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize('US/Eastern')
ser_starting = ser_starting.tz_convert('UTC')
ser_starting.index.name = 'starting'
ser_ending = df.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize('US/Eastern')
ser_ending = ser_ending.tz_convert('UTC')
ser_ending.index.name = 'ending'
df.starting = ser_starting.index
df.ending = ser_ending.index
tm.assert_index_equal(pd.DatetimeIndex(
df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def test_is_mixed_type(self, float_frame, float_string_frame):
assert not float_frame._is_mixed_type
assert float_string_frame._is_mixed_type
def test_get_numeric_data(self):
# TODO(wesm): unused?
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'f': Timestamp('20010102')},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64': 1,
datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
assert_series_equal(result, expected)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'd': np.array([1.] * 10, dtype='float32'),
'e': np.array([1] * 10, dtype='int32'),
'f': np.array([1] * 10, dtype='int16'),
'g': | Timestamp('20010102') | pandas.Timestamp |
import pandas as pd
def read_local_data(data_dir):
static_vars = pd.read_csv(data_dir + 'static_vars.csv')
dynamic_vars = pd.read_csv(data_dir + 'dynamic_vars.csv')
outcome_vars = | pd.read_csv(data_dir + 'outcome_vars.csv') | pandas.read_csv |
import datetime
from collections import OrderedDict
import warnings
import numpy as np
from numpy import array, nan
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from conftest import assert_frame_equal, assert_series_equal
from pvlib import irradiance
from conftest import requires_ephem, requires_numba
# fixtures create realistic test input data
# test input data generated at Location(32.2, -111, 'US/Arizona', 700)
# test input data is hard coded to avoid dependencies on other parts of pvlib
@pytest.fixture
def times():
# must include night values
return pd.date_range(start='20140624', freq='6H', periods=4,
tz='US/Arizona')
@pytest.fixture
def irrad_data(times):
return pd.DataFrame(np.array(
[[ 0. , 0. , 0. ],
[ 79.73860422, 316.1949056 , 40.46149818],
[1042.48031487, 939.95469881, 118.45831879],
[ 257.20751138, 646.22886049, 62.03376265]]),
columns=['ghi', 'dni', 'dhi'], index=times)
@pytest.fixture
def ephem_data(times):
return pd.DataFrame(np.array(
[[124.0390863 , 124.0390863 , -34.0390863 , -34.0390863 ,
352.69550699, -2.36677158],
[ 82.85457044, 82.97705621, 7.14542956, 7.02294379,
66.71410338, -2.42072165],
[ 10.56413562, 10.56725766, 79.43586438, 79.43274234,
144.76567754, -2.47457321],
[ 72.41687122, 72.46903556, 17.58312878, 17.53096444,
287.04104128, -2.52831909]]),
columns=['apparent_zenith', 'zenith', 'apparent_elevation',
'elevation', 'azimuth', 'equation_of_time'],
index=times)
@pytest.fixture
def dni_et(times):
return np.array(
[1321.1655834833093, 1321.1655834833093, 1321.1655834833093,
1321.1655834833093])
@pytest.fixture
def relative_airmass(times):
return pd.Series([np.nan, 7.58831596, 1.01688136, 3.27930443], times)
# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203
@pytest.mark.parametrize('testval, expected', [
(doy, value),
(np.float64(doy), value),
(dt_date, value),
(dt_datetime, value),
(dt_np64, value),
(np.array([doy]), np.array([value])),
(pd.Series([doy]), np.array([value])),
(dt_index, pd.Series([value], index=dt_index)),
(timestamp, value)
])
@pytest.mark.parametrize('method', [
'asce', 'spencer', 'nrel', pytest.param('pyephem', marks=requires_ephem)])
def test_get_extra_radiation(testval, expected, method):
out = irradiance.get_extra_radiation(testval, method=method)
assert_allclose(out, expected, atol=10)
def test_get_extra_radiation_epoch_year():
out = irradiance.get_extra_radiation(doy, method='nrel', epoch_year=2012)
assert_allclose(out, 1382.4926804890767, atol=0.1)
@requires_numba
def test_get_extra_radiation_nrel_numba(times):
with warnings.catch_warnings():
# don't warn on method reload or num threads
warnings.simplefilter("ignore")
result = irradiance.get_extra_radiation(
times, method='nrel', how='numba', numthreads=4)
# and reset to no-numba state
irradiance.get_extra_radiation(times, method='nrel')
assert_allclose(result,
[1322.332316, 1322.296282, 1322.261205, 1322.227091])
def test_get_extra_radiation_invalid():
with pytest.raises(ValueError):
irradiance.get_extra_radiation(300, method='invalid')
def test_grounddiffuse_simple_float():
result = irradiance.get_ground_diffuse(40, 900)
assert_allclose(result, 26.32000014911496)
def test_grounddiffuse_simple_series(irrad_data):
ground_irrad = irradiance.get_ground_diffuse(40, irrad_data['ghi'])
assert ground_irrad.name == 'diffuse_ground'
def test_grounddiffuse_albedo_0(irrad_data):
ground_irrad = irradiance.get_ground_diffuse(
40, irrad_data['ghi'], albedo=0)
assert 0 == ground_irrad.all()
def test_grounddiffuse_albedo_invalid_surface(irrad_data):
with pytest.raises(KeyError):
irradiance.get_ground_diffuse(
40, irrad_data['ghi'], surface_type='invalid')
def test_grounddiffuse_albedo_surface(irrad_data):
result = irradiance.get_ground_diffuse(40, irrad_data['ghi'],
surface_type='sand')
assert_allclose(result, [0, 3.731058, 48.778813, 12.035025], atol=1e-4)
def test_isotropic_float():
result = irradiance.isotropic(40, 100)
assert_allclose(result, 88.30222215594891)
def test_isotropic_series(irrad_data):
result = irradiance.isotropic(40, irrad_data['dhi'])
assert_allclose(result, [0, 35.728402, 104.601328, 54.777191], atol=1e-4)
def test_klucher_series_float():
# klucher inputs
surface_tilt, surface_azimuth = 40.0, 180.0
dhi, ghi = 100.0, 900.0
solar_zenith, solar_azimuth = 20.0, 180.0
# expect same result for floats and pd.Series
expected = irradiance.klucher(
surface_tilt, surface_azimuth,
pd.Series(dhi), pd.Series(ghi),
pd.Series(solar_zenith), pd.Series(solar_azimuth)
) # 94.99429931664851
result = irradiance.klucher(
surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth
)
assert_allclose(result, expected[0])
def test_klucher_series(irrad_data, ephem_data):
result = irradiance.klucher(40, 180, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
# pvlib matlab 1.4 does not contain the max(cos_tt, 0) correction
# so, these values are different
assert_allclose(result, [0., 36.789794, 109.209347, 56.965916], atol=1e-4)
# expect same result for np.array and pd.Series
expected = irradiance.klucher(
40, 180, irrad_data['dhi'].values, irrad_data['ghi'].values,
ephem_data['apparent_zenith'].values, ephem_data['azimuth'].values
)
assert_allclose(result, expected, atol=1e-4)
def test_haydavies(irrad_data, ephem_data, dni_et):
result = irradiance.haydavies(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'])
# values from matlab 1.4 code
assert_allclose(result, [0, 27.1775, 102.9949, 33.1909], atol=1e-4)
def test_reindl(irrad_data, ephem_data, dni_et):
result = irradiance.reindl(
40, 180, irrad_data['dhi'], irrad_data['dni'], irrad_data['ghi'],
dni_et, ephem_data['apparent_zenith'], ephem_data['azimuth'])
# values from matlab 1.4 code
assert_allclose(result, [np.nan, 27.9412, 104.1317, 34.1663], atol=1e-4)
def test_king(irrad_data, ephem_data):
result = irradiance.king(40, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'])
assert_allclose(result, [0, 44.629352, 115.182626, 79.719855], atol=1e-4)
def test_perez(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], relative_airmass)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=irrad_data.index)
assert_series_equal(out, expected, check_less_precise=2)
def test_perez_components(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], relative_airmass,
return_components=True)
expected = pd.DataFrame(np.array(
[[ 0. , 31.46046871, np.nan, 45.45539877],
[ 0. , 26.84138589, np.nan, 31.72696071],
[ 0. , 0. , np.nan, 4.47966439],
[ 0. , 4.62212181, np.nan, 9.25316454]]).T,
columns=['sky_diffuse', 'isotropic', 'circumsolar', 'horizon'],
index=irrad_data.index
)
expected_for_sum = expected['sky_diffuse'].copy()
expected_for_sum.iloc[2] = 0
sum_components = out.iloc[:, 1:].sum(axis=1)
sum_components.name = 'sky_diffuse'
assert_frame_equal(out, expected, check_less_precise=2)
assert_series_equal(sum_components, expected_for_sum, check_less_precise=2)
def test_perez_arrays(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'].values, dni.values,
dni_et, ephem_data['apparent_zenith'].values,
ephem_data['azimuth'].values,
relative_airmass.values)
expected = np.array(
[ 0. , 31.46046871, np.nan, 45.45539877])
assert_allclose(out, expected, atol=1e-2)
assert isinstance(out, np.ndarray)
def test_perez_scalar():
# copied values from fixtures
out = irradiance.perez(40, 180, 118.45831879, 939.95469881,
1321.1655834833093, 10.56413562, 144.76567754,
1.01688136)
# this will fail. out is ndarry with ndim == 0. fix in future version.
# assert np.isscalar(out)
assert_allclose(out, 109.084332)
@pytest.mark.parametrize('model', ['isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'])
def test_sky_diffuse_zenith_close_to_90(model):
# GH 432
sky_diffuse = irradiance.get_sky_diffuse(
30, 180, 89.999, 230,
dni=10, ghi=51, dhi=50, dni_extra=1360, airmass=12, model=model)
assert sky_diffuse < 100
def test_get_sky_diffuse_invalid():
with pytest.raises(ValueError):
irradiance.get_sky_diffuse(
30, 180, 0, 180, 1000, 1100, 100, dni_extra=1360, airmass=1,
model='invalid')
def test_liujordan():
expected = pd.DataFrame(np.array(
[[863.859736967, 653.123094076, 220.65905025]]),
columns=['ghi', 'dni', 'dhi'],
index=[0])
out = irradiance.liujordan(
pd.Series([10]), pd.Series([0.5]), pd.Series([1.1]), dni_extra=1400)
assert_frame_equal(out, expected)
def test_get_total_irradiance(irrad_data, ephem_data, dni_et, relative_airmass):
models = ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez']
for model in models:
total = irradiance.get_total_irradiance(
32, 180,
ephem_data['apparent_zenith'], ephem_data['azimuth'],
dni=irrad_data['dni'], ghi=irrad_data['ghi'],
dhi=irrad_data['dhi'],
dni_extra=dni_et, airmass=relative_airmass,
model=model,
surface_type='urban')
assert total.columns.tolist() == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
@pytest.mark.parametrize('model', ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez'])
def test_get_total_irradiance_scalars(model):
total = irradiance.get_total_irradiance(
32, 180,
10, 180,
dni=1000, ghi=1100,
dhi=100,
dni_extra=1400, airmass=1,
model=model,
surface_type='urban')
assert list(total.keys()) == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
# test that none of the values are nan
assert np.isnan(np.array(list(total.values()))).sum() == 0
def test_poa_components(irrad_data, ephem_data, dni_et, relative_airmass):
aoi = irradiance.aoi(40, 180, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
gr_sand = irradiance.get_ground_diffuse(40, irrad_data['ghi'],
surface_type='sand')
diff_perez = irradiance.perez(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'], relative_airmass)
out = irradiance.poa_components(
aoi, irrad_data['dni'], diff_perez, gr_sand)
expected = pd.DataFrame(np.array(
[[ 0. , -0. , 0. , 0. ,
0. ],
[ 35.19456561, 0. , 35.19456561, 31.4635077 ,
3.73105791],
[956.18253696, 798.31939281, 157.86314414, 109.08433162,
48.77881252],
[ 90.99624896, 33.50143401, 57.49481495, 45.45978964,
12.03502531]]),
columns=['poa_global', 'poa_direct', 'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=irrad_data.index)
assert_frame_equal(out, expected)
@pytest.mark.parametrize('pressure,expected', [
(93193, [[830.46567, 0.79742, 0.93505],
[676.09497, 0.63776, 3.02102]]),
(None, [[868.72425, 0.79742, 1.01664],
[680.66679, 0.63776, 3.28463]]),
(101325, [[868.72425, 0.79742, 1.01664],
[680.66679, 0.63776, 3.28463]])
])
def test_disc_value(pressure, expected):
# see GH 449 for pressure=None vs. 101325.
columns = ['dni', 'kt', 'airmass']
times = pd.DatetimeIndex(['2014-06-24T1200', '2014-06-24T1800'],
tz='America/Phoenix')
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
out = irradiance.disc(ghi, zenith, times, pressure=pressure)
expected_values = np.array(expected)
expected = pd.DataFrame(expected_values, columns=columns, index=times)
# check the pandas dataframe. check_less_precise is weird
assert_frame_equal(out, expected, check_less_precise=True)
# use np.assert_allclose to check values more clearly
assert_allclose(out.values, expected_values, atol=1e-5)
def test_disc_overirradiance():
columns = ['dni', 'kt', 'airmass']
ghi = np.array([3000])
solar_zenith = np.full_like(ghi, 0)
times = pd.date_range(start='2016-07-19 12:00:00', freq='1s',
periods=len(ghi), tz='America/Phoenix')
out = irradiance.disc(ghi=ghi, solar_zenith=solar_zenith,
datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[8.72544336e+02, 1.00000000e+00, 9.99493933e-01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_disc_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
columns = ['dni', 'kt', 'airmass']
times = pd.DatetimeIndex(['2016-07-19 06:11:00'], tz='America/Phoenix')
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.16046346e-02, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# max_zenith and/or max_airmass keep these results reasonable
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.0, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# still get reasonable values because of max_airmass=12 limit
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
max_zenith=100)
expected = pd.DataFrame(np.array(
[[0., 1.16046346e-02, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# still get reasonable values because of max_airmass=12 limit
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100)
expected = pd.DataFrame(np.array(
[[277.50185968, 1.0, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# max_zenith keeps this result reasonable
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_airmass=100)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.0, 36.39544757]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# allow zenith to be close to 90 and airmass to be infinite
# and we get crazy values
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
max_zenith=100, max_airmass=100)
expected = pd.DataFrame(np.array(
[[6.68577449e+03, 1.16046346e-02, 3.63954476e+01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# allow min cos zenith to be 0, zenith to be close to 90,
# and airmass to be very big and we get even higher DNI values
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100, max_airmass=100)
expected = pd.DataFrame(np.array(
[[7.21238390e+03, 1., 3.63954476e+01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_dirint_value():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure)
assert_almost_equal(dirint_data.values,
np.array([868.8, 699.7]), 1)
def test_dirint_nans():
times = pd.date_range(start='2014-06-24T12-0700', periods=5, freq='6H')
ghi = pd.Series([np.nan, 1038.62, 1038.62, 1038.62, 1038.62], index=times)
zenith = pd.Series([10.567, np.nan, 10.567, 10.567, 10.567], index=times)
pressure = pd.Series([93193., 93193., np.nan, 93193., 93193.], index=times)
temp_dew = pd.Series([10, 10, 10, np.nan, 10], index=times)
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=temp_dew)
assert_almost_equal(dirint_data.values,
np.array([np.nan, np.nan, np.nan, np.nan, 893.1]), 1)
def test_dirint_tdew():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=10)
assert_almost_equal(dirint_data.values,
np.array([882.1, 672.6]), 1)
def test_dirint_no_delta_kt():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
use_delta_kt_prime=False)
assert_almost_equal(dirint_data.values,
np.array([861.9, 670.4]), 1)
def test_dirint_coeffs():
coeffs = irradiance._get_dirint_coeffs()
assert coeffs[0, 0, 0, 0] == 0.385230
assert coeffs[0, 1, 2, 1] == 0.229970
assert coeffs[3, 2, 6, 3] == 1.032260
def test_dirint_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
# times don't have any physical relevance
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([0, 1], index=times)
solar_zenith = pd.Series([90, 89.99], index=times)
out = irradiance.dirint(ghi, solar_zenith, times)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected)
out = irradiance.dirint(ghi, solar_zenith, times, max_zenith=90)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0,
max_zenith=90)
expected = pd.Series([0.0, 144.264507], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0,
max_zenith=100)
expected = pd.Series([0.0, 144.264507], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
def test_gti_dirint():
times = pd.DatetimeIndex(
['2014-06-24T06-0700', '2014-06-24T09-0700', '2014-06-24T12-0700'])
poa_global = np.array([20, 300, 1000])
aoi = np.array([100, 70, 10])
zenith = np.array([80, 45, 20])
azimuth = np.array([90, 135, 180])
surface_tilt = 30
surface_azimuth = 180
# test defaults
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth)
expected_col_order = ['ghi', 'dni', 'dhi']
expected = pd.DataFrame(array(
[[ 21.05796198, 0. , 21.05796198],
[ 288.22574368, 60.59964218, 245.37532576],
[ 931.04078010, 695.94965324, 277.06172442]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
# test ignore calculate_gt_90
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
calculate_gt_90=False)
expected_no_90 = expected.copy()
expected_no_90.iloc[0, :] = np.nan
assert_frame_equal(output, expected_no_90)
# test pressure input
pressure = 93193.
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
pressure=pressure)
expected = pd.DataFrame(array(
[[ 21.05796198, 0. , 21.05796198],
[ 289.81109139, 60.52460392, 247.01373353],
[ 932.46756378, 648.05001357, 323.49974813]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
# test albedo input
albedo = 0.05
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
albedo=albedo)
expected = pd.DataFrame(array(
[[ 21.3592591, 0. , 21.3592591 ],
[ 292.5162373, 64.42628826, 246.95997198],
[ 941.6753031, 727.16311901, 258.36548605]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
# test temp_dew input
temp_dew = np.array([70, 80, 20])
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
temp_dew=temp_dew)
expected = pd.DataFrame(array(
[[ 21.05796198, 0. , 21.05796198],
[ 292.40468994, 36.79559287, 266.3862767 ],
[ 931.79627208, 689.81549269, 283.5817439]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
def test_erbs():
index = pd.DatetimeIndex(['20190101']*3 + ['20190620'])
ghi = pd.Series([0, 50, 1000, 1000], index=index)
zenith = pd.Series([120, 85, 10, 10], index=index)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[9.67192672e+01, 4.15703604e+01, 4.05723511e-01],
[7.94205651e+02, 2.17860117e+02, 7.18132729e-01],
[8.42001578e+02, 1.70790318e+02, 7.68214312e-01]]),
columns=['dni', 'dhi', 'kt'], index=index)
out = irradiance.erbs(ghi, zenith, index)
assert_frame_equal(np.round(out, 0), np.round(expected, 0))
def test_erbs_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
columns = ['dni', 'dhi', 'kt']
times = | pd.DatetimeIndex(['2016-07-19 06:11:00'], tz='America/Phoenix') | pandas.DatetimeIndex |
# Copyright (c) 2018, Faststream Technologies
# Author: <NAME>
import numpy as np
import pandas as pd
import os
# Import to show plots in seperate Windows
# from IPython import get_ipython
# get_ipython().run_line_magic('matplotlib', 'qt5')
# CURR and PARENT directory constants
CURR_DIR = os.path.dirname(os.path.abspath('__file__'))
PARENT_DIR = os.path.abspath(os.path.join(CURR_DIR, os.pardir))
# Import dataset ignoring headers
df = pd.read_csv(PARENT_DIR + '\\assets\\datasets\\eit.csv', index_col=[0], header = [0], skiprows= [1] ,skipinitialspace=True)
df_ranges = pd.read_csv(PARENT_DIR + '\\assets\\datasets\\eit.csv', index_col=[0], header = [0], skiprows= [0], skipinitialspace=True, nrows=0)
df_columns_ranges = list(df_ranges.columns)
df_columns_colors = list(df.columns)
df_means = df.mean()
target_series = []
# Create target_series list of booleans
for i, color in enumerate(df_columns_colors):
target_series.append(df[color] > df_means[i])
target = np.array(target_series)
target = np.transpose(target[-4:])
target_bools = []
# Create target_bools which creates the final Series of target column
for i in range(len(target)):
if np.sum(target[i]) >= 1:
target_bools.append(1)
else:
target_bools.append(0)
target_bools = | pd.Series(target_bools) | pandas.Series |
# -----------------------------------------------------------------------------
# WSDM Cup 2017 Classification and Evaluation
#
# Copyright (c) 2017 <NAME>, <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
from collections import OrderedDict
import logging
import numpy as np
import pandas as pd
import config
from src import constants
from src.dataset import DataSet
_logger = logging.getLogger()
# Use thousand separator and no decimal points
_FLOAT_FORMAT = '{:,.0f}'.format
# columns
REVISION_ID = 'revisionId'
SESSION_ID = 'revisionSessionId'
CONTENT_TYPE = 'contentType'
ROLLBACK_REVERTED = 'rollbackReverted'
ITEM_ID = 'itemId'
USER_NAME = 'userName'
REVISION_ACTION = 'revisionAction'
TIMESTAMP = 'timestamp'
REVISIONT_TAGS = 'revisionTags'
LANGUAGE_WORD_RATIO = 'languageWordRatio'
REVISION_LANGUAGE = 'revisionLanguage'
USER_COUNTRY = 'userCountry'
def compute_statistics(data):
_logger.info("Computing statistics...")
_logger.debug(
data['revisionAction']
.str
.cat(data['revisionSubaction'], sep='_', na_rep='na')
.value_counts())
_compute_feature_statistics(data)
_compute_corpus_statistics(data)
_compute_corpus_statistics_over_time(data)
_compute_dataset_statistics(data)
_compute_session_statistics(data)
_compute_backpressure_statistics(data)
# computes some statistics about selected features
# _compute_special_feature_statistics(data)
_logger.info("Computing statistics... done.")
def _compute_feature_statistics(data):
_logger.debug("Computing descriptive statistics...")
data.describe(include='all').to_csv(
config.OUTPUT_PREFIX + "_feature_statistics.csv")
_logger.debug("Computing descriptive statistics... done.")
def _compute_corpus_statistics(data):
"""Compute statistics for the whole corpus.
Evaluate corpus in terms of total unique users, items, sessions,
and revisions with a breakdown by content type and by vandalism
status (vandalism/non-vandalism).
"""
def compute_data_frame(data):
head_mask = data[CONTENT_TYPE] == 'TEXT'
stmt_mask = (data[CONTENT_TYPE] == 'STATEMENT')
sitelink_mask = (data[CONTENT_TYPE] == 'SITELINK')
body_mask = (stmt_mask | sitelink_mask)
result = OrderedDict()
result['Entire corpus'] = compute_column_group(data)
result['Item head'] = compute_column_group(data[head_mask])
result['Item body'] = compute_column_group(data[body_mask])
# result['STATEMENT'] = compute_column_group(data[stmt_mask])
# result['SITELINK'] = compute_column_group(data[sitelink_mask])
result = pd.concat(result, axis=1, keys=result.keys())
return result
def compute_column_group(data):
vandalism_mask = data[ROLLBACK_REVERTED].astype(np.bool)
regular_mask = ~vandalism_mask
result = OrderedDict()
result['Total'] = compute_column(data)
result['Vandalism'] = compute_column(data[vandalism_mask])
result['Regular'] = compute_column(data[regular_mask])
result = pd.concat(result, axis=1, keys=result.keys())
return result
def compute_column(data):
result = pd.Series()
result['Revisions'] = data[REVISION_ID].nunique()
result['Sessions'] = data[SESSION_ID].nunique()
result['Items'] = data[ITEM_ID].nunique()
result['Users'] = data[USER_NAME].nunique()
return result
def compute_actions(data):
vandalism_counts = \
data[data[ROLLBACK_REVERTED]][REVISION_ACTION].value_counts()
vandalism_counts.name = 'vandalism_count'
total_counts = data[REVISION_ACTION].value_counts()
total_counts.name = 'total_count'
counts = pd.concat([vandalism_counts, total_counts], axis=1)
counts.sort_values('vandalism_count', inplace=True, ascending=False)
_logger.debug(
'Counts: \n' +
str(counts)
)
statistics = compute_data_frame(data)
statistics.to_csv(config.OUTPUT_PREFIX + "_corpus_statistics.csv")
statistics = _round_to_thousands(statistics)
statistics.to_latex(
config.OUTPUT_PREFIX + "_corpus_statistics.tex",
float_format=_FLOAT_FORMAT)
_logger.info(statistics)
def _compute_corpus_statistics_over_time(data):
df = data.copy()
df = data.loc[:, [TIMESTAMP, CONTENT_TYPE, ROLLBACK_REVERTED]]
df = pd.get_dummies(df, columns=[CONTENT_TYPE])
df = df.rename(columns={'contentType_TEXT': 'TEXT',
'contentType_STATEMENT': 'STATEMENT',
'contentType_SITELINK': 'SITELINK',
'contentType_MISC': 'MISC',
'rollbackReverted': 'VANDALISM'})
df['TEXT_VANDALISM'] = (df['TEXT'] & df['VANDALISM'])
df['STATEMENT_VANDALISM'] = (df['STATEMENT'] & df['VANDALISM'])
df['SITELINK_VANDALISM'] = (df['SITELINK'] & df['VANDALISM'])
df['MISC_VANDALISM'] = (df['MISC'] & df['VANDALISM'])
df['REVISIONS'] = 1
df['VANDALISM'] = df['VANDALISM'].astype(np.bool)
df.set_index(TIMESTAMP, inplace=True)
df = df[['REVISIONS', 'VANDALISM',
'TEXT', 'TEXT_VANDALISM',
'STATEMENT', 'STATEMENT_VANDALISM',
'SITELINK', 'SITELINK_VANDALISM',
'MISC', 'MISC_VANDALISM']]
grouped = df.groupby(pd.TimeGrouper(freq='M'))
result = grouped.sum()
result.to_csv(config.OUTPUT_PREFIX + "_corpus_statistics_over_time.csv")
def _compute_dataset_statistics(data):
"""
Compute dataset statistics for training, validation and test set.
Evaluate datasets in terms of time period covered, revisions, sessions,
items, and users.
"""
def compute_data_frame(data):
_logger.debug("Splitting statistics...")
training_set_start_index = 0 # compute statistics from start of dataset
validation_set_start_index = \
DataSet.get_index_for_revision_id_from_df(data, constants.VALIDATION_SET_START)
test_set_start_index = \
DataSet.get_index_for_revision_id_from_df(data, constants.TEST_SET_START)
tail_set_start_index = \
DataSet.get_index_for_revision_id_from_df(data, constants.TAIL_SET_START)
training_set = data[training_set_start_index:validation_set_start_index]
validation_set = data[validation_set_start_index:test_set_start_index]
test_set = data[test_set_start_index:tail_set_start_index]
result = []
result.append(compute_splitting_statistics_row(training_set, 'Training'))
result.append(compute_splitting_statistics_row(validation_set, 'Validation'))
result.append(compute_splitting_statistics_row(test_set, 'Test'))
result = pd.concat(result, axis=0)
return result
def compute_splitting_statistics_row(data, label):
result = | pd.Series() | pandas.Series |
import numpy as np
import pandas as pd
import scipy.sparse as sp
import sklearn.preprocessing as pp
from math import exp
from heapq import heappush, heappop
# conventional i2i
class CosineSimilarity():
# expects DataFrame, loaded from ratings.csv
def __init__(self, df, limit=20):
self.limit = limit
# no need for timestamp here
df = df.drop(labels = 'timestamp', axis = 1)
# let's see what's the mean rating for each movie,
df_mean = df.groupby(['movieId'], sort = False).mean().rename(columns = {'rating': 'mean'})
# join mean values to original DataFrame
df = df.join(df_mean, on = 'movieId', sort = False)
# and subtract mean values from each rating,
# so that rating of 0 becomes neutral
df['rating'] = df['rating'] - df['mean']
# ok? now pivot original DataFrame so that it becomes a feature/document matrix
# and fill all NaNs (where a user hasn't rated a movie) with zeros, which is legal now
df = df.pivot_table(index = 'userId', columns = 'movieId', values = 'rating').fillna(0)
# if there were movies, having only equal ratings (ie, all 4.0)
# then we can't really recommend anything to them, hence removing
df = df.loc[:, (df != 0).any(axis = 0)]
# here we go, let's turn DataFrame into sparse matrix, normalize ratings,
cnm = pp.normalize(sp.csr_matrix(df.values), axis = 0)
# calculate recommendations and turn sparse matrix back into DataFrame,
# having movieId index, movieId columns and values, representing relevance of A to B
self.recs = pd.DataFrame((cnm.T * cnm).todense(), columns=df.columns, index=df.columns)
# retrieves "limit" of recommendations for given movie_id out of precalculated DataFrame
def recommend(self, movie_id):
if not movie_id in self.recs.index.values:
return pd.DataFrame([], columns=['movieId']).set_index('movieId')
r = self.recs[movie_id].sort_values(ascending=False)
r = r.drop(labels=movie_id)
r = r.to_frame().rename(columns = {movie_id: 'score'})
return r.head(n=self.limit)
# now, the new way of getting i2i recommendations
class ShortestPath():
# expects DataFrame, loaded from ratings.csv
def __init__(self, df, limit=20):
self.limit = limit
# here the order of happenings is very crucial,
df = df.sort_values(by = ['userId', 'timestamp'])
# once sorted, we can remove timestamp (will save us a couple of bytes)
df = df.drop(labels = 'timestamp', axis = 1)
# mids stands for movie IDs (I'm yet another lazy coder)
self.mids = df.index.get_level_values('movieId').unique()
df = df.reset_index(level = 'movieId')
# al is adjacency list, by the way
al = {}
for uid in df.index.get_level_values('userId').unique():
# let's examine each user path
path = df.loc[uid]
if isinstance(path, pd.DataFrame):
# otherwise means a user made only one rating, not quite helpful here
for m1, m2 in zip(path[:-1].itertuples(), path[1:].itertuples()):
# for each pair of rated movie and next rated movie
al.setdefault(m1.movieId, {}).setdefault(m2.movieId, 0)
# we see what a user thinks of how similar they are,
# the more both ratings are close to each other - the higher
# is similarity
al[m1.movieId][m2.movieId] += 1/exp(abs(m1.rating - m2.rating)) - 0.5
for mid in al:
# let's make a list for each movie in adjacency list, so that
# adjacency list becomes a list indeed, along the way, we have to
# inverse summed up similarity, so that the higher is the similarity -
# the shorter is the length of an edge in movies graph
al[mid] = list(map(lambda kv: (kv[0], -kv[1]), al[mid].items()))
# yes, list is required to be sorted from closest to farthest
al[mid].sort(key = lambda r: r[1])
res = {}
for mid in al:
# you still here? sweet
# we have BFS with priority queue here,
# I always thought that its name is Dijkstra algorithm
# although lately realized, that Dijkstra's one used to be
# a little bit more naive. Wat moet ik doen?
# r stands for Results
r = {}
# e stands for elements in the queue
e = {}
# q stands for queue (sincerely, C.O.)
q = []
# d stands for Depth of search
# (well, actually, there's no depth in breadth first search,
# it's just a number of nodes we're willing to visit)
d = limit + 1
# starting from originator itself
e[mid] = [0, mid]
heappush(q, e[mid])
while q:
# while there are vertices in the queue
v = heappop(q)
# and they are not dummy (-1 is explained below) or not known
if v[1] == -1 or not v[1] in al:
continue
d -= 1
# and required depth isn't reached:
if d < 0:
break
# we consider current vertice a next relevant recommendation
r[v[1]] = v[0]
# and we have to fill the queue with
# other adjacent vertices
for av in al[v[1]]:
if av[0] in r:
# this one we've already seen
continue
# we're getting further from originator
alt = v[0] + av[1]
if av[0] in e:
# but what if next adjacent vertice is already queued
if alt < e[av[0]][0]:
# well, if we found a shorter path, let's prioritize
# this vertice higher in the queue
ii = e[av[0]][1]
# here we go, -1 is a dummy distance value for a vertice
# that has been moved in the queue, one doesn't simply
# remove a node from heapified list, if you know what I mean
e[av[0]][1] = -1
# so we enqueue a new one
e[av[0]] = [alt, ii]
heappush(q, e[av[0]])
else:
# otherwise, just put it in the queue
e[av[0]] = [alt, av[0]]
heappush(q, e[av[0]])
# of course, recommendation of a movie to itself is way too obvious
del r[mid]
# listify and sort other recommendaions
res[mid] = list(r.items())
res[mid].sort(key = lambda r: -r[1])
# save results
self.recs = res
# returns all recommendations for a given movie_id
# the trick here is that "limit" has already been applied upon calculation
# and the higher is the "limit" - the longer calculation takes, linearly,
# so here's no magical overtake of fancy scipy sparse matrix by pure python algorithm
def recommend(self, movie_id):
if not movie_id in self.recs:
return pd.DataFrame([], columns=['movieId']).set_index('movieId')
r = | pd.DataFrame(self.recs[movie_id], columns=['movieId', 'score']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn import metrics
import pickle
from sklearn.preprocessing import label_binarize
import os
import argparse
def get_thres_fold(gold_labels_train_folds, results_softmax, folds=5):
'''
find the threshold that equates the label cardinality of the dev set
to that of train set per fold.
'''
T = np.arange(0,1,step=0.001)
optimal_t = np.zeros(folds) # t[i] is the optimal t for fold i
for i in range(folds):
gold_labels = gold_labels_train_folds[i]
train_size = len(gold_labels_train_folds[i])
LCard_train = gold_labels_train_folds[i].to_numpy().sum()/train_size
print("LCard_train: ", LCard_train)
test_size = results_softmax[i].shape[0]
diff = np.zeros_like(T) # to store differences of label cardinalities
for (j,t) in enumerate(T):
binary_df_pred = results_softmax[i]>t
LCard_test = binary_df_pred.sum()/test_size
diff[j] = np.abs(LCard_train - LCard_test)
optimal_t[i] = T[np.argmin(diff)]
return optimal_t
def get_thres_fold_frame(gold_labels_train_folds, results_softmax, folds=5):
'''
find the threshold that equates the label cardinality of the dev set
to that of train set per fold and per frame, thus |frames|*|folds| thresholds.
'''
T = np.arange(0,1,step=0.001)
optimal_t = np.zeros((folds,9)) # t[i] is the optimal t for fold i
for i in range(folds):
gold_labels = gold_labels_train_folds[i]
gold_labels.columns = [str(f+1) for f in np.arange(9)]
train_size = len(gold_labels_train_folds[i])
LCard_train_frames = gold_labels.sum(axis=0)/train_size
test_size = results_softmax[i].shape[0]
for fold in range(9):
fold_preds = results_softmax[i][:,fold]
diff = np.zeros_like(T) # to store differences of label cardinalities
for (j,t) in enumerate(T):
binary_df_pred = fold_preds>t
LCard_test = binary_df_pred.sum()/test_size
diff[j] = np.abs(LCard_train_frames[fold] - LCard_test)
optimal_t[i,fold] = T[np.argmin(diff)]
return optimal_t
def no_label_predicted(binary_df_pred):
'''
f1 scores does not include frames that never occur in a given
fold or is never predicted by the model as either the precision
or the recall is not defined. However, all EM scores include all
frames
'''
return np.where(np.logical_not(binary_df_pred).all(1))
def get_metrics(optimal_t, results, gold_labels_test_folds):
'''
function to compute all metrics presented in the paper
'''
metrics_folds = {}
folds = len(results)
# if multiclass this part is for finding accuracy for the samples that have
# indeed a single frame so even we're computing metrics for multiclass we have to retrieve the gold labels from the
# dataset which includes second labels as well, if available
if optimal_t is None:
multilabeled_gold = get_gold_labels(False, None, "dataset", folds, target=False)
for i in range(folds):
# gold labels for the fold in label indicator format
gold_labels = gold_labels_test_folds[i]
size = len(gold_labels)
multiple_frame = np.where(gold_labels.sum(1)>2)
if optimal_t is None:
# create binary results for f1 scores
binary_df_pred = results[i] > 0
else:
# create binary results for f1 scores
binary_df_pred = results[i] > optimal_t[i]
# if no frame is predicted, select the frame with the highest confidence
no_label_indices = no_label_predicted(binary_df_pred)
# print("no_label_indices: ", no_label_indices)
no_label_max_pred_indices = results[i][no_label_indices].argmax(1)
binary_df_pred[no_label_indices, no_label_max_pred_indices] = True
# eliminate frames which either never predicted or never occurred
never_predicted = np.where(binary_df_pred.sum(axis=0)==0)[0]
print("Frames that are never predicted (precision not defined): ", never_predicted+1)
# eliminate frames which never occured
never_occurred = np.where(gold_labels.sum(axis=0)==0)[0]
print("Frames that never occur in the gold set (recall not defined): ", never_occurred+1)
label_indices = np.setdiff1d(np.arange(9), np.union1d(never_occurred, never_predicted))
print("Frames that are included in F1-Scores (EMs include all): ", label_indices+1)
# these will used for f1-scores
gold_labels_selected = gold_labels.iloc[:,label_indices]
binary_predictions_selected = binary_df_pred[:,label_indices]
# well-defined f1 scores require a frame to be both predicted and occur
f1_macro = metrics.f1_score(gold_labels_selected, binary_predictions_selected, average='macro')
f1_micro = metrics.f1_score(gold_labels_selected, binary_predictions_selected, average='micro')
f1_weighted = metrics.f1_score(gold_labels_selected, binary_predictions_selected, average='weighted')
# for auc we use weighted averaging
auc = metrics.roc_auc_score(gold_labels_selected, results[i][:,label_indices], average = 'weighted')
# compute evaluations about multilabeled frame predictions
# N/A if multiclass classification
if optimal_t is None:
match_multiple = np.nan
number_multiple = np.nan
argmax_preds = gold_labels.to_numpy().argmax(1)
argmax_gold = binary_df_pred.argmax(1) # binary_df_pred already contains one-hot vectors, argmax is still fine
exact_match = np.sum(argmax_preds == argmax_gold)/size
# single-labeled accuracy
multiple_frame_articles_bool = np.sum(multilabeled_gold[i], axis=1) > 1.0
results_single = np.equal(argmax_preds,argmax_gold)[~multiple_frame_articles_bool]
match_single = np.mean(results_single)
else:
exact_match = np.sum(np.equal(gold_labels, binary_df_pred).all(1))/size
multiple_frame_articles_bool = np.sum(gold_labels, axis=1) > 1.0
results_multiple = np.equal(gold_labels, binary_df_pred).loc[multiple_frame_articles_bool]
number_multiple = len(results_multiple)
match_multiple = np.sum(results_multiple.all(1))/number_multiple
# single-labeled accuracy
results_single = np.equal(gold_labels, binary_df_pred).loc[~multiple_frame_articles_bool]
number_single = len(results_single)
match_single = np.sum(results_single.all(1))/number_single
metrics_folds[i] = {"f1_macro":f1_macro,
"f1_micro":f1_micro,
"f1_weighted":f1_weighted,
"exact_match":exact_match,
"auc":auc,
"exact_match_multiple":match_multiple,
"number_multiple":number_multiple,
"exact_match_single":match_single}
return metrics_folds
def collate_results(p, thres, test_path, train_path, target=False):
# read results
pickle_in = open(p,"rb")
results = pickle.load(pickle_in)
# read gold labels for test folds
gold_labels_test_folds = get_gold_labels(False, train_path, test_path,
folds=len(results), target=target)
if train_path is not None:
assert thres in ["fold", "fold_frame"]
gold_labels_train_folds = get_gold_labels(True, train_path, test_path,
folds=len(results), target=target)
# if multiclass results are from softmax
if thres == 'multiclass':
results_mc = {}
golds_mc ={}
print(results.keys())
for fold in results.keys():
zeros = np.zeros_like(results[fold])
zeros[np.arange(zeros.shape[0]), results[fold].argmax(axis=1)] = 1.0
# select the softmax predictions that correspond to gold labels
results_mc[fold] = zeros * results[fold]
golds_mc[fold] = gold_labels_test_folds[fold]
# store results back in binarized format
results = results_mc
gold_labels_test_folds = golds_mc
if thres == 'fold':
gold_labels_train_folds = get_gold_labels(True, train_path, test_path, len(results), target)
optimal_t = get_thres_fold(gold_labels_train_folds, results, folds=1)
elif thres == 'fold_frame':
gold_labels_train_folds = get_gold_labels(True, train_path, test_path, len(results), target)
optimal_t = get_thres_fold_frame(gold_labels_train_folds, results, folds=1)
elif thres == 'sigmoid':
optimal_t = np.full(9,0.5)
elif thres == 'multiclass':
optimal_t = None
else:
raise NameError("Thresholding strategy {} not known.".format(thres))
metrics = get_metrics(optimal_t, results, gold_labels_test_folds)
s = string_for_display( | pd.DataFrame(metrics) | pandas.DataFrame |
import imgaug as ia
ia.seed(1)
# imgaug uses matplotlib backend for displaying images
#%matplotlib inline
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
from imgaug import augmenters as iaa
# imageio library will be used for image input/output
import imageio
import pandas as pd
import numpy as np
import re
import os
import glob
# this library is needed to read XML files for converting it into CSV
import xml.etree.ElementTree as ET
import shutil
def xml_to_csv(path):
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
try:
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
except:
pass
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
# apply the function to convert all XML files in images/ folder into labels.csv
labels_df = xml_to_csv('Training_Sailboat/')
labels_df.to_csv(('new_labels100.csv'), index=None)
# function to convert BoundingBoxesOnImage object into DataFrame
def bbs_obj_to_df(bbs_object):
# convert BoundingBoxesOnImage object into array
bbs_array = bbs_object.to_xyxy_array()
# convert array into a DataFrame ['xmin', 'ymin', 'xmax', 'ymax'] columns
df_bbs = pd.DataFrame(bbs_array, columns=['xmin', 'ymin', 'xmax', 'ymax'])
return df_bbs
height_resize = iaa.Sequential([
iaa.Resize({"height": 600, "width": 'keep-aspect-ratio'})
])
width_resize = iaa.Sequential([
iaa.Resize({"height": 'keep-aspect-ratio', "width": 600})
])
aug =iaa.Fliplr(1) # more augmentations can be used from https://github.com/aleju/imgaug
def image_aug(df, images_path, aug_images_path, image_prefix, augmentor):
# create data frame which we're going to populate with augmented image info
aug_bbs_xy = pd.DataFrame(columns=
['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
)
grouped = df.groupby('filename')
for filename in df['filename'].unique():
# get separate data frame grouped by file name
group_df = grouped.get_group(filename)
group_df = group_df.reset_index()
group_df = group_df.drop(['index'], axis=1)
# read the image
image = imageio.imread(images_path + filename)
# get bounding boxes coordinates and write into array
bb_array = group_df.drop(['filename', 'width', 'height', 'class'], axis=1).values
# pass the array of bounding boxes coordinates to the imgaug library
bbs = BoundingBoxesOnImage.from_xyxy_array(bb_array, shape=image.shape)
# apply augmentation on image and on the bounding boxes
image_aug, bbs_aug = augmentor(image=image, bounding_boxes=bbs)
# disregard bounding boxes which have fallen out of image pane
bbs_aug = bbs_aug.remove_out_of_image()
# clip bounding boxes which are partially outside of image pane
bbs_aug = bbs_aug.clip_out_of_image()
# don't perform any actions with the image if there are no bounding boxes left in it
if re.findall('Image...', str(bbs_aug)) == ['Image([]']:
pass
# otherwise continue
else:
# write augmented image to a file
imageio.imwrite(aug_images_path + image_prefix + filename, image_aug)
# create a data frame with augmented values of image width and height
info_df = group_df.drop(['xmin', 'ymin', 'xmax', 'ymax'], axis=1)
for index, _ in info_df.iterrows():
info_df.at[index, 'width'] = image_aug.shape[1]
info_df.at[index, 'height'] = image_aug.shape[0]
# rename filenames by adding the predifined prefix
info_df['filename'] = info_df['filename'].apply(lambda x: image_prefix + x)
# create a data frame with augmented bounding boxes coordinates using the function we created earlier
bbs_df = bbs_obj_to_df(bbs_aug)
# concat all new augmented info into new data frame
aug_df = pd.concat([info_df, bbs_df], axis=1)
# append rows to aug_bbs_xy data frame
aug_bbs_xy = pd.concat([aug_bbs_xy, aug_df])
# return dataframe with updated images and bounding boxes annotations
aug_bbs_xy = aug_bbs_xy.reset_index()
aug_bbs_xy = aug_bbs_xy.drop(['index'], axis=1)
return aug_bbs_xy
def resize_imgaug(df, images_path, aug_images_path, image_prefix):
# create data frame which we're going to populate with augmented image info
aug_bbs_xy = pd.DataFrame(columns=
['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
)
grouped = df.groupby('filename')
for filename in df['filename'].unique():
# Get separate data frame grouped by file name
group_df = grouped.get_group(filename)
group_df = group_df.reset_index()
group_df = group_df.drop(['index'], axis=1)
# The only difference between if and elif statements below is the use of height_resize and width_resize augmentors
# defined previously.
# If image height is greater than or equal to image width
# AND greater than 600px perform resizing augmentation shrinking image height to 600px.
if group_df['height'].unique()[0] >= group_df['width'].unique()[0] and group_df['height'].unique()[0] > 600:
# read the image
image = imageio.imread(images_path + filename)
# get bounding boxes coordinates and write into array
bb_array = group_df.drop(['filename', 'width', 'height', 'class'], axis=1).values
# pass the array of bounding boxes coordinates to the imgaug library
bbs = BoundingBoxesOnImage.from_xyxy_array(bb_array, shape=image.shape)
# apply augmentation on image and on the bounding boxes
image_aug, bbs_aug = height_resize(image=image, bounding_boxes=bbs)
# write augmented image to a file
imageio.imwrite(aug_images_path + image_prefix + filename, image_aug)
# create a data frame with augmented values of image width and height
info_df = group_df.drop(['xmin', 'ymin', 'xmax', 'ymax'], axis=1)
for index, _ in info_df.iterrows():
info_df.at[index, 'width'] = image_aug.shape[1]
info_df.at[index, 'height'] = image_aug.shape[0]
# rename filenames by adding the predifined prefix
info_df['filename'] = info_df['filename'].apply(lambda x: image_prefix + x)
# create a data frame with augmented bounding boxes coordinates using the function we created earlier
bbs_df = bbs_obj_to_df(bbs_aug)
# concat all new augmented info into new data frame
aug_df = pd.concat([info_df, bbs_df], axis=1)
# append rows to aug_bbs_xy data frame
aug_bbs_xy = pd.concat([aug_bbs_xy, aug_df])
# if image width is greater than image height
# AND greater than 600px perform resizing augmentation shrinking image width to 600px
elif group_df['width'].unique()[0] > group_df['height'].unique()[0] and group_df['width'].unique()[0] > 300:
# read the image
image = imageio.imread(images_path + filename)
# get bounding boxes coordinates and write into array
bb_array = group_df.drop(['filename', 'width', 'height', 'class'], axis=1).values
# pass the array of bounding boxes coordinates to the imgaug library
bbs = BoundingBoxesOnImage.from_xyxy_array(bb_array, shape=image.shape)
# apply augmentation on image and on the bounding boxes
image_aug, bbs_aug = width_resize(image=image, bounding_boxes=bbs)
# write augmented image to a file
imageio.imwrite(aug_images_path + image_prefix + filename, image_aug)
# create a data frame with augmented values of image width and height
info_df = group_df.drop(['xmin', 'ymin', 'xmax', 'ymax'], axis=1)
for index, _ in info_df.iterrows():
info_df.at[index, 'width'] = image_aug.shape[1]
info_df.at[index, 'height'] = image_aug.shape[0]
# rename filenames by adding the predifined prefix
info_df['filename'] = info_df['filename'].apply(lambda x: image_prefix + x)
# create a data frame with augmented bounding boxes coordinates using the function we created earlier
bbs_df = bbs_obj_to_df(bbs_aug)
# concat all new augmented info into new data frame
aug_df = pd.concat([info_df, bbs_df], axis=1)
# append rows to aug_bbs_xy data frame
aug_bbs_xy = | pd.concat([aug_bbs_xy, aug_df]) | pandas.concat |
from glob import glob
import pandas as pd
import numpy as np # linear algebra
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from models import get_model_classif_nasnet
from utils import data_gen, chunker, read_image
labeled_files = glob('/media/ml/data_ml/dogs-vs-cats/train/*.jpg')
test_files = glob('/media/ml/data_ml/dogs-vs-cats/test1/*.jpg')
train, val = train_test_split(labeled_files, test_size=0.1, random_state=101010)
model = get_model_classif_nasnet()
batch_size = 32
h5_path = "model.h5"
checkpoint = ModelCheckpoint(h5_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
_ = model.fit_generator(
data_gen(train, batch_size),
validation_data=data_gen(val, batch_size),
epochs=10, verbose=1,
callbacks=[checkpoint],
steps_per_epoch=len(train) // batch_size,
validation_steps=len(val) // batch_size)
model.load_weights(h5_path)
preds = []
ids = []
for batch in chunker(test_files, batch_size):
X = [preprocess_input(read_image(x)) for x in batch]
X = np.array(X)
preds_batch = model.predict(X).ravel().tolist()
preds += preds_batch
df = | pd.DataFrame({'id': test_files, 'label': preds}) | pandas.DataFrame |
import pandas as pd
import json
import numpy as np
from dataclasses import dataclass
import os
from os.path import join, splitext
import unidecode
import pickle as pkl
import sys
from sklearn.model_selection import KFold
import functools
import rampwf
from sklearn.base import is_classifier
from sklearn.metrics import f1_score
from rampwf.prediction_types.base import BasePrediction
from rampwf.score_types import BaseScoreType
from rampwf.workflows import SKLearnPipeline
import warnings
PARTIES_SIGLES = [
"SOC",
"FI",
"Dem",
"LT",
"GDR",
"LaREM",
"Agir ens",
"UDI-I",
"LR",
"NI",
]
RANDOM_STATE = 777
DATA_HOME = "data"
if not sys.warnoptions:
warnings.simplefilter("ignore")
@dataclass
class Vote:
"""Base class containing all relevant basis information of the dataset"""
id: str
code_type_vote: str
libelle_type_vote: str
demandeur: str
libelle: str
nb_votants: int
date: str # en faire un datetime ce serait bien ; à regarder
vote_counts: pd.DataFrame
@classmethod
def load_from_files(cls, id, data_home=DATA_HOME, train_or_test="train"):
f_name = join(data_home, train_or_test, id)
with open(f_name + ".json", "r") as f:
vote_metadata = json.load(f)
vote_counts = (
pd.read_csv(f_name + ".csv", sep=",")
.rename(columns={"Unnamed: 0": "party"})
.
# renommer la première colonne (partis)
set_index("party")
)
vote = cls(
id=id,
code_type_vote=vote_metadata["code_type_vote"],
libelle_type_vote=vote_metadata["libelle_type_vote"],
demandeur=vote_metadata["demandeur"],
libelle=vote_metadata["libelle"],
nb_votants=vote_metadata["nb_votants"],
date=vote_metadata["date"],
vote_counts=vote_counts,
)
return vote
def to_X_y(self):
"""Transform a Vote object into an observation X of features (dictionnary)
and a label y
"""
number_of_dpt_per_party = {
party: sum(self.vote_counts.loc[party])
for party in self.vote_counts.index
}
X = {
"code_type_vote": self.code_type_vote,
"libelle_type_vote": self.libelle_type_vote,
"demandeur": self.demandeur,
"libelle": self.libelle,
"nb_votants": self.nb_votants,
"date": self.date,
"presence_per_party": number_of_dpt_per_party,
}
vote_columns = self.vote_counts.columns
y = {}
for party in self.vote_counts.index:
major_position = vote_columns[
np.argmax(self.vote_counts.loc[party])
]
y[party] = 1.0 * (major_position == "pours")
return X, y
# ----------
# score type
# ----------
class CustomF1Score(BaseScoreType):
def __init__(
self,
weights_type="log",
precision=3,
):
"""Custom weighted F1 score. Weights depends on group's amount of deputies.
Args:
weights_type (str, optional): 'log' or 'linear'. Defaults to 'log'.
precision (int, optional): decimals considered. Defaults to 3.
"""
self.name = f"Weighted F1-score ({weights_type})"
self.set_weights(path=".", type=weights_type)
self.precision = precision
def __call__(self, y_true, y_pred) -> float:
score_list = []
for i, w in enumerate(self.weights_):
score_list.append(f1_score(y_true[:, i], y_pred[:, i]))
weighted_score = np.array(score_list) @ self.weights_
return weighted_score
def set_weights(self, path, type="linear"):
"""Return the weights associated to each party. The default weight for a party
(type='linear') is the mere proportion of deputies in the party among all the
deputies. if type='log', the weight is passed through natural logartihm.
"""
file_name = join(path, "data/dpt_data", "liste_deputes_excel.csv")
dpt_data = pd.read_csv(file_name, sep=";")
groups_column_name = dpt_data.columns[-1]
counts = (
dpt_data.groupby(groups_column_name)
.nunique()["identifiant"]
.to_dict()
)
if type == "linear":
list_count = np.array([counts[key] for key in PARTIES_SIGLES])
elif type == "log":
list_count = np.log(
np.array([counts[key] for key in PARTIES_SIGLES])
)
else:
raise ValueError("Unknown value for argument 'type' :", type)
self.weights_ = list_count / np.sum(list_count)
# -----------------------
# A little bit of reading
# -----------------------
def _read_data(path, train_or_test="train", save=True):
"""Return the features dataset X and the labels dataset y for either the train or the test"""
directory = join(path, DATA_HOME, train_or_test)
votes_names = os.listdir(directory)
votes_names = [
splitext(vote)[0] for vote in votes_names if vote.endswith(".json")
]
votes_names.sort(key=lambda name: int(splitext(name)[0][10:]))
for i, f_name in enumerate(votes_names):
vote = Vote.load_from_files(f_name, train_or_test=train_or_test)
features, label = vote.to_X_y()
if i == 0:
X = pd.DataFrame(columns=[key for key in features.keys()])
y = pd.DataFrame(columns=[key for key in label.keys()])
X.loc[f_name] = features
y.loc[f_name] = label
# Add a column equal to the index
X["vote_uid"] = X.index
y = y.to_numpy()
if save:
file_name = join(
path, DATA_HOME, train_or_test, train_or_test + "_data.pkl"
)
with open(file_name, "wb") as f:
pkl.dump((X, y), f)
return X, y
def _read_info_actors():
filename = "data/dpt_data/nosdeputes.fr_synthese_2020-11-21.csv"
df = pd.read_csv(filename, sep=";")
old_cols = [
"id",
"nom",
"prenom",
"nom_de_famille",
"date_naissance",
"sexe",
"parti_ratt_financier",
]
new_cols = [
"custom_id",
"membre_fullname",
"membre_prenom",
"membre_nom",
"membre_birthDate",
"membre_sex",
"membre_parti",
]
df.rename(
dict(zip(old_cols, new_cols)),
axis=1,
inplace=True,
)
df = df[new_cols]
return df
def _read_actor(filename):
acteur = pd.read_csv(filename, sep=";")
id = acteur["uid[1]"]
civ = acteur["etatCivil[1]/ident[1]/civ[1]"]
prenom = acteur["etatCivil[1]/ident[1]/prenom[1]"]
nom = acteur["etatCivil[1]/ident[1]/nom[1]"]
output = pd.DataFrame(
{
"membre_acteurRef": id,
"membre_civ": civ,
"membre_prenom": prenom,
"membre_nom": nom,
}
)
return output
def _read_all_actors():
all_acteur_filenames = os.listdir("data/acteur")
output = pd.DataFrame()
for filename in all_acteur_filenames:
acteur = _read_actor("data/acteur/" + filename)
# Update
if not output.empty:
output = output.append(acteur)
else:
output = acteur
return output
def get_actor_party_data():
"""
Returns general information about deputies and parties.
To be used for creating features.
Returns:
actors: pd.DataFrame with info about actors.
"""
try:
actors = | pd.read_csv("data/acteurs.csv") | pandas.read_csv |
import os
from os.path import expanduser
import altair as alt
import numpy as np
import pandas as pd
from scipy.stats.stats import pearsonr
import sqlite3
from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot
from config import dummy_start_date, dummy_end_date, cutoff_date
# %matplotlib inline
plot_start_date = dummy_start_date
plot_end_date = dummy_end_date
if cutoff_date is not None:
plot_start_date = cutoff_date
day = np.timedelta64(1, 'D')
fiction_scale = alt.Scale(domain=[True, False])
def get_data(library_paths=[expanduser('~/books/non-fiction/')]):
db_path = library_paths[0] + 'metadata.db'
conn = sqlite3.connect(db_path)
custom_column_index = dict(pd.read_sql_query("""
SELECT label, id FROM custom_columns
""", conn).to_dict(orient='split')['data'])
def tbl(name):
return 'custom_column_' + str(custom_column_index[name])
df = pd.read_sql_query(f"""
SELECT
title,
author_sort AS author,
series.name AS series,
series_index,
pubdate,
timestamp,
last_modified,
languages.lang_code AS language,
{tbl('started')}.value AS start,
{tbl('finished')}.value AS end,
{tbl('words')}.value AS words,
{tbl('pages')}.value AS pages,
{tbl('fre')}.value AS fre,
{tbl('fkg')}.value AS fkg,
{tbl('gfi')}.value AS gfi,
({tbl('shelf')}.value = 'Fiction') AS is_fiction,
ifnull({tbl('read')}.value, 0) AS is_read
FROM books
LEFT OUTER JOIN books_series_link
ON books.id = books_series_link.book
LEFT OUTER JOIN series
ON books_series_link.series = series.id
JOIN books_languages_link
ON books.id = books_languages_link.book
JOIN languages
ON books_languages_link.lang_code = languages.id
LEFT OUTER JOIN {tbl('pages')}
ON {tbl('pages')}.book = books.id
LEFT OUTER JOIN {tbl('words')}
ON {tbl('words')}.book = books.id
LEFT OUTER JOIN {tbl('fre')}
ON {tbl('fre')}.book = books.id
LEFT OUTER JOIN {tbl('fkg')}
ON {tbl('fkg')}.book = books.id
LEFT OUTER JOIN {tbl('gfi')}
ON {tbl('gfi')}.book = books.id
JOIN books_{tbl('shelf')}_link
ON books_{tbl('shelf')}_link.book = books.id
JOIN {tbl('shelf')}
ON {tbl('shelf')}.id = books_{tbl('shelf')}_link.value
LEFT OUTER JOIN {tbl('started')}
ON {tbl('started')}.book = books.id
LEFT OUTER JOIN {tbl('finished')}
ON {tbl('finished')}.book = books.id
LEFT OUTER JOIN {tbl('read')} ON {tbl('read')}.book = books.id
WHERE
{tbl('shelf')}.value = 'Fiction'
OR {tbl('shelf')}.value = 'Nonfiction'
""", conn, parse_dates=['start', 'end', 'pubdate', 'timestamp',
'last_modified'])
# Books with no page count are either simply placeholders, not a
# proper part of the library, or have just been added. In both
# cases, it is OK to ignore them.
df = df.loc[df.pages.notna()]
# Fix data types
df.language = df.language.astype('category')
df.pages = df.pages.astype('int64')
# We cannot make df.words an int64 column, as some PDF files have
# no word count associated with them and int64 columns cannot
# contain NAs.
df.is_fiction = df.is_fiction.astype(bool)
df.is_read = df.is_read.astype(bool)
# Compute intermediate columns
df.pubdate = df.pubdate.map(to_local)
df = df.assign(words_per_page=df.words / df.pages,
words_per_day=df.words / ((df.end - df.start) / day))
def to_numeric(x):
return | pd.to_numeric(x, errors='coerce', downcast='integer') | pandas.to_numeric |
from tqdm.notebook import trange, tqdm
import pandas as pd
import matplotlib
import numpy as np
# import csv
from itertools import product
from functools import reduce
import pickle as pkl
from warnings import catch_warnings
from warnings import filterwarnings
import time
import datetime
from multiprocessing import cpu_count, Pool
# from joblib import Parallel
# from joblib import delayed
from dateutil.relativedelta import relativedelta
import pandas_market_calendars as mcal
import mplfinance as mpl
import pmdarima as pm
from pmdarima import pipeline
from pmdarima.metrics import smape
from sklearn.metrics import mean_squared_error as mse
import prophet
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import ticker
from matplotlib.dates import DateFormatter
from matplotlib.ticker import FuncFormatter
# plt.style.use('ggplot')
sns.set_theme(style="darkgrid")
# import matplotlib.dates as mdates
# import matplotlib.units as munits
# converter = mdates.ConciseDateConverter()
# munits.registry[np.datetime64] = converter
# munits.registry[datetime.date] = converter
# munits.registry[datetime.datetime] = converter
font = {'family' : 'sans-serif',
'sans-serif' : 'Tahoma', # Verdana
'weight' : 'normal',
'size' : '16'}
matplotlib.rc('font', **font)
pd.set_option('display.max_columns',None)
pd.set_option('display.max_rows',25)
try:
from code.functions import *
except Exception as e:
from functions import *
from pathlib import Path
TOP = Path(__file__ + '../../..').resolve()
NYSE = mcal.get_calendar('NYSE')
CBD = NYSE.holidays()
# print(f'Pmdarima_Model.py loaded from {TOP}/data..')
class Pmdarima_Model:
def __init__(self, df, data_name, n, periods, freq, train_size=80, trend='c', with_intercept='auto',
order=(0,1,0), s_order=(0,0,0), seas=0, fit_seas=False, f_seas=252, k=4,
estimate_diffs=False, impute=False, AA_d=None, AA_D=None,
#max_d=2, max_p=2, max_q=2, max_D=2, max_P=2, max_Q=2,
date=True, fourier=True, box=False, log=False, verbose=1):
try:
assert(type(df) in (pd.Series, pd.DataFrame)), "Data is not of type Pandas Series or DataFrame."
assert(type(df.index) == (pd.DatetimeIndex)), "Data index is not of type Pandas DatetimeIndex."
except AssertionError as e:
print(e)
print('Failed to load data.')
raise
# if d:
# try:
# assert(order[1] == d), "Variables d and d in order conflict."
# except AssertionError as e:
# print(e)
# print('Failed to initialize Class.')
# raise
if type(df) == pd.Series:
self.df = pd.DataFrame(df)
else:
self.df = df
if impute:
self.df = df.interpolate()
self.hist_dates_df = pd.DataFrame(self.df.index, columns=['date'])
self.train_size = train_size
self.df_train, self.df_test = pm.model_selection.train_test_split(self.df,
train_size = self.train_size/100)
self.dates = df.index
self.length = df.shape[0]
self.data_name = data_name
self.ts = data_name.replace(' ', '_')
self.timeframe = f'{n} {periods.title()}'
self.tf = f'{n}{periods[0].upper()}'
self.freq = freq
self.f = freq.split()[0] + freq.split()[1][0].upper()
self.m = seas
self.f_m = f_seas
self.k = k
self.estimate_diffs = estimate_diffs
# self.arima_order = order
self.p = order[0]
self.d = order[1]
self.q = order[2]
self.fit_seas = fit_seas
self.P = s_order[0]
self.D = s_order[1]
self.Q = s_order[2]
self.t = trend
self.n_diffs = AA_d
self.ns_diffs = AA_D
if self.estimate_diffs:
self.__estimate_diffs()
self.with_intercept = with_intercept
# self.no_intercept = no_intercept
self.mod_order = f'({self.p}, {self.d}, {self.q})[\'{self.t}\']'
self.date = date
self.fourier = fourier
self.box = box
self.log = log
self.__train_test_split_dates()
self.AA_best_params, self.AA_mod_pipe = self.__reset_mod_params()
self.GS_best_params, self.GS_best_mod_pipe = self.__reset_mod_params()
self.mod_params, self.mod_params_df, self.mod_pipe = self.__reset_mod_params('adhoc')
self.y_hat = None
self.conf_ints = None
self.AIC = None
self.RMSE = np.inf
self.RMSE_pc = np.inf
self.SMAPE = np.inf
self.GS_first_mod = True
self.mod_CV_filepath = f'{TOP}/model_CV_scores/{self.ts}_{self.tf}_{self.f}.csv'
print('Successfully created instance of Class Pmdarima_Model.') if verbose else None
def __estimate_diffs(self):
'''
Helper function for calculation of diffs to use if
estimate_diffs=True is passed at class initialization.
'''
kpss_diffs = pm.arima.ndiffs(self.df_train, alpha=0.05, test='kpss', max_d=6)
adf_diffs = pm.arima.ndiffs(self.df_train, alpha=0.05, test='adf', max_d=6)
self.n_diffs = max(adf_diffs, kpss_diffs)
if self.fit_seas:
ocsb_diffs = pm.arima.nsdiffs(self.df_train, m=self.m, test='ocsb', max_D=6)
ch_diffs = pm.arima.nsdiffs(self.df_train, m=self.m, test='ch', max_D=6)
self.ns_diffs = max(ocsb_diffs, ch_diffs)
def __reset_mod_params(self, init=None):
'''
Helper function for intializing a human-readable model params string
as passed at class intialization.
'''
if init: # for an adhoc run
mod_params, mod_params_df, mod_pipe = self.__setup_mod_params(self.p, self.d, self.q,
self.t, self.P, self.D, self.Q, self.m, self.with_intercept,
self.f_m, self.k, self.date, self.fourier, self.box,
self.log, func='adhoc', verbose=1)
return mod_params, mod_params_df, mod_pipe
else:
mod_pipe = None
mod_params = None
return mod_params, mod_pipe
@staticmethod
def __unpickle_model(ts, tf, f, func='GS'):
pkl_filepath = Pmdarima_Model.__get_pkl_filepath(ts, tf, f, func=func)
print(f'Loading best model from {pkl_filepath}.')
mod_file = open(pkl_filepath,'rb')
mod_data = pkl.load(mod_file)
mod_file.close()
return mod_data
@staticmethod
def __get_pkl_filepath(ts, tf, f, func='GS'):
# pkl_filepath = f'{TOP}/models/{self.ts}_{self.tf}_{self.f}_{func}_best_model.pkl'
pkl_filepath = f'{TOP}/models/{ts}_{tf}_{f}_{func}_best_model.pkl'
return pkl_filepath
def __pickle_model(self, func='AA', verbose=1):
'''
Helper function for pickling a model along with its params as a
human-readable string.
'''
def __pickle_it(params, pipe, params_df, scores, results, func_type='adhoc', verbose=1):
mod_file = open(pkl_filepath,'wb')
pkl.dump((params, pipe, params_df, scores, results), mod_file)
# if func_type == 'AutoARIMA':
# pkl.dump((self.AA_best_params, self.AA_mod_pipe, self.AA_best_mod_params_df, scores, results), mod_file)
# elif func_type == 'GridSearchCV':
# pkl.dump((self.GS_best_params, self.GS_best_mod_pipe, self.GS_best_mod_params_df, scores, results), mod_file)
# else: # func_type == 'adhoc'
# pkl.dump((self.mod_params, self.mod_pipe, self.mod_params_df, scores, results), mod_file)
mod_file.close()
scores = (self.AIC, self.RMSE, self.RMSE_pc, self.SMAPE)
results = (self.y_hat, self.conf_ints)
if func == 'AA':
func_type = 'AutoARIMA'
params = self.AA_best_params
pipe = self.AA_mod_pipe
params_df = self.AA_best_mod_params_df
elif func == 'GS':
func_type = 'GridSearchCV'
params = self.GS_best_params
pipe = self.GS_best_mod_pipe
params_df = self.GS_best_mod_params_df
else: # func == 'adhoc':
func_type = 'adhoc'
params = self.mod_params
pipe = self.mod_pipe
params_df = self.mod_params_df
# var = self.data_name.lower()
# pkl_filepath = __get_pkl_filepath(func='GS')
# f'{TOP}/models/{self.ts}_{self.tf}_{self.f}_{func}_best_model.pkl'
pkl_filepath = Pmdarima_Model.__get_pkl_filepath(self.ts, self.tf, self.f, func=func)
if os.path.exists(pkl_filepath):
# mod_file = open("../models/TSY_10Y_Note_3Y_1D_GS_best_model.pkl",'rb')
# mod_file = open(pkl_filepath,'r+b')
# mod_data = pkl.load(mod_file)
mod_data = Pmdarima_Model.__unpickle_model(self.ts, self.tf, self.f, func=func)
try:
if self.RMSE < mod_data[3][2]:
__pickle_it(params, pipe, params_df, scores, results, func_type, verbose)
print(f'Model outperforms existing best {func_type} model at {pkl_filepath}, overwriting.') if verbose else None
else:
# mod_file.close()
print(f'Model did not outperform existing {func_type} model at {pkl_filepath}, not pickling model.') if verbose else None
return
except IndexError:
__pickle_it(params, pipe, params_df, scores, results, func_type, verbose)
print('Model file contains missing data, overwriting.') if verbose else None
else:
mod_file = open(pkl_filepath,'wb')
__pickle_it(params, pipe, params_df, scores, results, func_type, verbose)
print(f'Saved best {func_type} model as {pkl_filepath}.') if verbose else None
return
def __split_df_dates(self, train, test):
'''
Helper function of splitting train and test sets into date variables
as X and data variables as y.
'''
X_train = pd.DataFrame(train.index)
y_train = train.values
X_test = pd.DataFrame(test.index, index=range(X_train.size, self.length))
y_test = test.values
return X_train, y_train, X_test, y_test
def __train_test_split_dates(self):
'''
Helper function for initializing the date split train vs test sets.
'''
self.X_train, self.y_train, self.X_test, self.y_test = self.__split_df_dates(self.df_train, self.df_test)
# return self.X_train, self.y_train, self.X_test, self.y_test
def __fit_predict(self, model, days_fc, new_dates, index_fc, hist_df, hist_dates_df, en_ex, new_dates_df=None, exog_df=None, verbose=1):
# model.fit(self.df, hist_dates_df)
'''
Helper function for fitting a model on the full input DataFrame and
running an out of sample prediction.
For final predictions on endogenous variable, `hist_df` and `exog_df` must have 'date' as a column - function will convert if found as index instead.
'''
if en_ex == 'exog':
model.fit(y=self.df, X=hist_dates_df)
print('Successfully fit model on historical observations.') if verbose else None
y_hat, conf_ints = model.predict(X=new_dates_df, return_conf_int=True)
fc_df = pd.DataFrame(y_hat, index=index_fc, columns=self.df.columns)
fc_date_df = pd.DataFrame(zip(new_dates, y_hat), index=index_fc, columns=['date', self.df.columns[0]])
fc_date_df.set_index('date', inplace=True)
elif en_ex == 'endo':
if type(exog_df.index) == pd.DatetimeIndex:
exog_df.reset_index(inplace=True)
if type(hist_df.index) == pd.DatetimeIndex:
hist_df.reset_index(inplace=True)
model.fit(y=self.df, X=hist_df)
print('Successfully fit model on historical observations.') if verbose else None
y_hat, conf_ints = model.predict(X=exog_df, return_conf_int=True)
# y_hat, conf_ints = self.__run_stepwise_fc(self.exog_df, model, verbose)
# results = model.predict(n_periods=days_fc, X=exog_df, return_conf_int=True)
fc_date_df = pd.DataFrame(zip(new_dates, y_hat), index=index_fc, columns=['date', self.df.columns[0]])
fc_date_df.set_index('date', inplace=True)
fc_df = fc_date_df
self.df_with_fc = self.df.append(fc_date_df)
print(f'Successfully forecasted {days_fc} days forward.') if verbose else None
# fc_df = pd.DataFrame(zip(self.new_dates_df.date.values,y_hat), columns=['date','close'])
return fc_df, y_hat, conf_ints
# return fc_df, results
# return results
# @classmethod
# def get_next_dates(cls, today, df_size, days):
@staticmethod
def __get_next_dates(today, df_size, days_fc, freq=CBD):
'''
Static method for getting new dates for out of sample predictions.
Returns a list of Pandas Timestamps, a list of numerical indices extending
the original numerical indices of the input DataFrame, and a DataFrame consisting
of the two aforementioned lists.
'''
next_day = today + freq
new_dates = pd.date_range(start=next_day, periods=days_fc, freq=freq)
index_fc = range(df_size, df_size + days_fc)
new_dates_df = pd.DataFrame(new_dates, index=index_fc, columns=['date'])
return new_dates, index_fc, new_dates_df
@classmethod
def join_exog_data(cls, *args):
'''
Takes any number of DataFrames with matching indexes and performs a join.
First DataFrame must be the dates_df. Number of observations in each must match.
'''
# try:
# assert(len(set(map(lambda df: df.shape, args))) == 1), "Input DataFrame shapes do not match."
# except AssertionError as e:
# print(e)
# print('Failed to perform join.')
# raise
# today = args[0].date.iloc[-1]
# df_size = args[0].size
# days =
# index_fc, new_dates_df = cls.get_next_dates(today, df_size, days)
# args = [new_dates_df, args]
exog_cat_df = reduce(lambda left, right: | pd.merge(left,right,left_index=True,right_index=True) | pandas.merge |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_data_process.ipynb (unless otherwise specified).
__all__ = ['imgids_from_directory', 'imgids_testing', 'read_img', 'load_RGBY_image', 'save_image', 'CellSegmentator',
'load_segmentator', 'get_cellmask', 'encode_binary_mask', 'coco_rle_encode', 'rle_encode', 'rle_decode',
'mask2rles', 'rles2bboxes', 'segment_image', 'segment_images', 'resize_image', 'crop_image',
'remove_faint_greens', 'pad_to_square', 'load_seg_trn', 'split_cells', 'generate_crops', 'fill_targets',
'generate_meta', 'get_meta', 'create_split_file', 'create_random_split', 'load_match_info',
'generate_noleak_split', 'get_img_mean_std']
# Cell
import os
import ast
from pathlib import Path
from itertools import groupby
import functools
import mlcrate
from multiprocessing import Pool
from pycocotools import mask as mutils
from pycocotools import _mask as coco_mask
import numpy as np
import pandas as pd
import cv2, PIL
import zlib
import base64
import zipfile
import uuid
from sklearn.preprocessing import LabelEncoder
from .config.config import *
from .utils.common_util import *
# Cell
def imgids_from_directory(path):
if isinstance(path, str):
path = Path(path)
imgids = set(n.stem.split('_')[0] for n in path.iterdir())
return list(imgids)
# Cell
imgids_testing = [
'000a6c98-bb9b-11e8-b2b9-ac1f6b6435d0',
'001838f8-bbca-11e8-b2bc-ac1f6b6435d0',
'000c99ba-bba4-11e8-b2b9-ac1f6b6435d0',
'a34d8680-bb99-11e8-b2b9-ac1f6b6435d0',
'000a9596-bbc4-11e8-b2bc-ac1f6b6435d0']
# Cell
def read_img(dir_data, image_id, color, image_size=None, suffix='.png'):
filename = dir_data/f'{image_id}_{color}{suffix}'
assert filename.exists(), f'not found {filename}'
img = cv2.imread(str(filename), cv2.IMREAD_UNCHANGED)
if image_size is not None:
img = cv2.resize(img, (image_size, image_size))
if img.max() > 255:
img_max = img.max()
img = (img/255).astype('uint8')
return img
def load_RGBY_image(dir_data, image_id,
rgb_only=False, suffix='.png', image_size=None):
red, green, blue = [
read_img(dir_data, image_id, color, image_size, suffix)
for color in ('red', 'green', 'blue')]
channels = [red, green, blue]
if not rgb_only:
yellow = read_img(
dir_data, image_id, "yellow", image_size, suffix)
channels.append(yellow)
stacked_images = np.transpose(np.array(channels), (1, 2, 0))
return stacked_images
# Cell
def save_image(dst, imgid, img):
dst = Path(dst)
for ch, color in enumerate(['red', 'green', 'blue', 'yellow']):
cv2.imwrite(str(dst / f'{imgid}_{color}.png'), img[..., ch])
# Cell
import hpacellseg.cellsegmentator as cellsegmentator
from hpacellseg.utils import label_cell, label_nuclei
from tqdm import tqdm
class CellSegmentator(cellsegmentator.CellSegmentator):
def __init__(self, nuc_model, cell_model, *args, **kwargs):
nuc_model = str(nuc_model)
cell_model = str(cell_model)
super().__init__(nuc_model, cell_model, *args, **kwargs)
def __call__(self, red, yellow, blue):
'''
`red`: list
Red images' file paths.
`yellow`: list
Yellow images' file paths.
`blue`: list
Blue images' file paths.
'''
assert len(red) == len(yellow) == len(blue)
if isinstance(red[0], Path):
red, yellow, blue = (
[str(n) for n in fns]
for fns in [red, yellow, blue])
segs_nucl = self.pred_nuclei(blue)
segs_cell = self.pred_cells([red, yellow, blue])
masks = []
for seg_nucl, seg_cell in zip(segs_nucl, segs_cell):
mask_nucl, mask_cell = label_cell(seg_nucl, seg_cell)
masks.append((mask_nucl, mask_cell))
return masks
def load_segmentator(
dir_segmentator_models, scale_factor=0.25, device="cuda",
padding=True, multi_channel_model=True):
model_nucl = dir_segmentator_models / 'nuclei-model.pth'
model_cell = dir_segmentator_models / 'cell-model.pth'
segmentator = CellSegmentator(
model_nucl, model_cell,
scale_factor=scale_factor, device=device, padding=padding,
multi_channel_model=multi_channel_model)
return segmentator
def get_cellmask(img, segmentator):
img_r, img_y, img_b = img[...,0], img[...,3], img[...,2]
masks = segmentator(red=[img_r], yellow=[img_y], blue=[img_b])
_, mask = masks[0]
return mask
# Cell
def encode_binary_mask(mask):
"""Converts a binary mask into OID challenge encoding ascii text."""
# check input mask --
if mask.dtype != np.bool:
raise ValueError(
"encode_binary_mask expects a binary mask, received dtype == %s" %
mask.dtype)
mask = np.squeeze(mask)
if len(mask.shape) != 2:
raise ValueError(
"encode_binary_mask expects a 2d mask, received shape == %s" %
mask.shape)
# convert input mask to expected COCO API input --
mask_to_encode = mask.reshape(mask.shape[0], mask.shape[1], 1)
mask_to_encode = mask_to_encode.astype(np.uint8)
mask_to_encode = np.asfortranarray(mask_to_encode)
# RLE encode mask --
encoded_mask = coco_mask.encode(mask_to_encode)[0]["counts"]
# compress and base64 encoding --
binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION)
base64_str = base64.b64encode(binary_str)
return base64_str.decode()
def coco_rle_encode(bmask):
rle = {'counts': [], 'size': list(bmask.shape)}
counts = rle.get('counts')
for i, (value, elements) in enumerate(groupby(bmask.ravel(order='F'))):
if i == 0 and value == 1:
counts.append(0)
counts.append(len(list(elements)))
return rle
# Cell
def rle_encode(img, mask_val=1):
"""
Turns our masks into RLE encoding to easily store them
and feed them into models later on
https://en.wikipedia.org/wiki/Run-length_encoding
Args:
img (np.array): Segmentation array
mask_val (int): Which value to use to create the RLE
Returns:
RLE string
"""
dots = np.where(img.T.flatten() == mask_val)[0]
run_lengths = []
prev = -2
for b in dots:
if (b>prev+1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return ' '.join([str(x) for x in run_lengths])
def rle_decode(rle_string, height, width):
""" Convert RLE sttring into a binary mask
Args:
rle_string (rle_string): Run length encoding containing
segmentation mask information
height (int): Height of the original image the map comes from
width (int): Width of the original image the map comes from
Returns:
Numpy array of the binary segmentation mask for a given cell
"""
rows,cols = height,width
rle_numbers = [int(num_string) for num_string in rle_string.split(' ')]
rle_pairs = np.array(rle_numbers).reshape(-1,2)
img = np.zeros(rows*cols,dtype=np.uint8)
for index,length in rle_pairs:
index -= 1
img[index:index+length] = 255
img = img.reshape(cols,rows)
img = img.T
img = (img / 255).astype(np.uint8)
return img
# Cell
def mask2rles(mask):
'''
Args:
mask (np.array): 2-D array with discrete values each
representing a different class or object.
rles (list): COCO run-length encoding:
{'size': [height, width],
'counts': encoded RLE}
'''
ids_cell = np.unique(mask)
rles = []
for id in ids_cell:
if id == 0:
continue
bmask = np.where(mask == id, 1, 0)
bmask = np.asfortranarray(bmask).astype(np.uint8)
rle = mutils.encode(bmask)
rles.append(rle)
return rles
# Cell
def rles2bboxes(rles):
if len(rles) == 0:
return []
bboxes = mutils.toBbox(rles)
bboxes[:,2] += bboxes[:,0]
bboxes[:,3] += bboxes[:,1]
return bboxes
# Cell
def segment_image(dir_img=None, imgid=None, segmentator=None):
img = load_RGBY_image(dir_img, imgid)
mask = get_cellmask(img, segmentator)
rles = mask2rles(mask)
bboxes = rles2bboxes(rles)
ids = [f'{imgid}_{i}' for i in range(len(rles))]
df = pd.DataFrame(
{'Id': ids, 'rle': rles, 'bbox': list(bboxes)})
return df
def segment_images(dir_img, imgids, segmentator):
df = pd.DataFrame()
for imgid in tqdm(imgids, total=len(imgids)):
df_img = segment_image(dir_img, imgid, segmentator)
df = df.append(df_img, ignore_index=True)
return df
# Cell
def resize_image(img, sz):
return cv2.resize(img, (sz, sz), interpolation=cv2.INTER_LINEAR)
# Cell
def crop_image(img, bbox, bmask=None):
'''
Args:
img (np.array): Image to be cropped by ``bbox``.
bbox (np.array): Bounding box in terms of [x0, y0, x1, y1].
bmask (np.array, np.uint8): Binary mask for the cell.
'''
bbox = bbox.astype(np.int16)
x0, y0, x1, y1 = bbox
crop = img[y0:y1, x0:x1]
if bmask is not None:
crop = bmask[y0:y1, x0:x1][...,None] * crop
return crop
# Cell
def remove_faint_greens(xs, crops, green_thres=64):
assert len(xs) == len(crops)
xs_out = []
for x, crop in zip(xs, crops):
if crop[...,1].max() > green_thres:
xs_out.append(x)
return xs_out
# Cell
def pad_to_square(img):
'''
Pad an image to a square size, centering it as much as possible.
'''
h, w, c = img.shape
if h == w:
return img
elif h < w:
img_padded = np.zeros((w, w, c), dtype=img.dtype)
offset0 = (w - h) // 2
offset1 = (w - h) - offset0
img_padded[offset0:-offset1, :] = img.copy()
else:
img_padded = np.zeros((h, h, c), dtype=img.dtype)
offset0 = (h - w) // 2
offset1 = (h - w) - offset0
img_padded[:, offset0:-offset1] = img.copy()
return img_padded
# Cell
def load_seg_trn(pth_csv):
'''
Loads @dscettler8845's segmentation results for train set.
'''
df = | pd.read_csv(pth_csv) | pandas.read_csv |
import os
import pandas as pd
import numpy as np
from collections import Counter
from imblearn.datasets import make_imbalance
from imblearn.over_sampling import SMOTE, ADASYN
from sklearn.utils import shuffle
os.chdir('/content/gdrive/My Drive/training_testing_data/')
train = pd.read_csv('train_data_rp_3_IMBALANCED.csv')
X_train = train.iloc[:, :-1]
X_train = X_train.values
Y_train = train.iloc[:, -1:]
Y_train = Y_train.values
oversample = SMOTE()
X_train_SMOTE, Y_train_SMOTE = oversample.fit_resample(X_train, Y_train)
print('SMOTE:', sorted(Counter(Y_train_SMOTE).items()))
X_train_SMOTE, Y_train_SMOTE = shuffle(X_train_SMOTE, Y_train_SMOTE, random_state=42)
X_train_SMOTE = pd.DataFrame(X_train_SMOTE)
Y_train_SMOTE = pd.DataFrame(Y_train_SMOTE)
train_SMOTE = pd.concat([X_train_SMOTE, Y_train_SMOTE], axis=1, ignore_index=True)
train_SMOTE.to_csv('train_data_rp_3_SMOTE.csv', index=False)
oversample = ADASYN()
X_train_ADASYN, Y_train_ADASYN = oversample.fit_resample(X_train, Y_train)
print('ADASYN:', sorted(Counter(Y_train_ADASYN).items()))
X_train_ADASYN, Y_train_ADASYN = shuffle(X_train_ADASYN, Y_train_ADASYN, random_state=42)
X_train_ADASYN = | pd.DataFrame(X_train_ADASYN) | pandas.DataFrame |
import pandas as pd
from pathlib import Path
import os
from xlrd import open_workbook, XLRDError
class Ballistics:
def __init__(self, csv='./ballistics.csv', min_range=-1, max_range=-1, step=-1, range_col='Range', cols=[]):
csv_file = Path(csv)
if csv_file.is_file():
#print("File Found")
filename = os.path.split(csv)
ext = filename[1].split('.')
ext = ext[len(ext) - 1]
#print(filename[1], ext)
if ext == 'csv':
self.orig_ballistics = self.ballistics = pd.read_csv(csv)
elif ext == 'xls' or ext == 'xlsx':
try:
open_workbook(csv)
except XLRDError:
self.orig_ballistics = self.ballistics = pd.DataFrame()
print("Not A Invalid Excel File!")
else:
self.orig_ballistics = self.ballistics = pd.read_excel(csv)
else:
self.orig_ballistics = self.ballistics = pd.DataFrame()
print("Invalid File: Load CSV or Excel")
else:
self.orig_ballistics = self.ballistics = pd.DataFrame()
self.range_col = range_col
self.setrange(min_range, max_range)
self.selectcolumns(cols)
def setorigballistics(self, b):
self.orig_ballistics = b
def reset(self):
self.ballistics = self.orig_ballistics
def setrange(self, min_range=-1, max_range=-1, step=-1):
min_ranges = pd.DataFrame()
max_ranges = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from pathlib import Path
import numpy as np
import pylab as pl
from scipy.signal import find_peaks
from my_general_helpers import butter_lowpass_filter
def angle_between_points_signcorrect(x1, y1, x2, y2, x3, y3):
ang1 = np.degrees(np.arctan2(y1 - y2, x1 - x2))
ang2 = np.degrees(np.arctan2(y3 - y2, x3 - x2))
if np.ndim(x1) == 0:
if ang1 < 0:
ang1 = 360 + ang1
if ang2 < 0:
ang2 = 360 + ang2
if ang2 > ang1:
ang2 -= 360
else:
ind = np.where(ang1 < 0)
ang1[ind] = 360 + ang1[ind]
ind = np.where(ang2 < 0)
ang2[ind] = 360 + ang2[ind]
ind = np.where(ang2 > ang1)
ang2[ind] -= 360
return (ang1 - ang2) - 180
def curvature(x1, y1, x2, y2, x3, y3):#, x4, y4, x5, y5):
dx1 = x1 - x2
dy1 = y1 - y2
dx2 = x2 - x3
dy2 = y2 - y3
# dx3 = x2 - x3
# dy3 = y2 - y3
# dx4 = x3 - x4
# dy4 = y3 - y4
#
# dx5 = x3 - x4
# dy5 = y3 - y4
# dx6 = x4 - x5
# dy6 = y4 - y5
dotProduct1 = dx1 * dx2 + dy1 * dy2
modOfVectors1 = np.sqrt(dx1**2 + dy1**2) * np.sqrt(dx2**2 + dy2**2)
#
# dotProduct2 = dx3 * dx4 + dy3 * dy4
# modOfVectors2 = np.sqrt(dx3**2 + dy3**2) * np.sqrt(dx4**2 + dy4**2)
#
# dotProduct3 = dx5 * dx6 + dy5 * dy6
# modOfVectors3 = np.sqrt(dx5**2 + dy5**2) * np.sqrt(dx6**2 + dy6**2)
return np.degrees(np.arccos(dotProduct1/modOfVectors1))# + \
#np.degrees(np.arccos(dotProduct2/modOfVectors2)) + \
#np.degrees(np.arccos(dotProduct3/modOfVectors3))
#
# def projector_transformation_function(pixel_brightness):
# return 2900 * (pixel_brightness/255) ** 2.2 + 41
def luminance_equation(x, y):
r = np.sqrt(x**2 + y**2)
if r > 5.5: # the wall looks dark, include that, because this is the true luminance profile
return 255*((5.5 - 3) ** 2 / 9 - (r-5.5))
return 255*((r - 3) ** 2 / 9) ## 1==410 as measured with IPhone
#print(luminance_equation(0,0))
print(luminance_equation(1,0))
print(luminance_equation(2,0)) ## Dark ring area
print(luminance_equation(3,0))
print(luminance_equation(4,0))
print(luminance_equation(5,0))
# print(projector_transformation_function(luminance_equation(3.9, 0)))
# print(projector_transformation_function(luminance_equation(4, 0)))
# print(projector_transformation_function(luminance_equation(4.1, 0)))
# k = [luminance_equation(x, 0) for x in np.arange(0, 6, 0.1)]
# pl.plot(k)
# pl.show()
# sf
# fgh
#sdf
# vals = [luminance_equation(x, 0) for x in np.arange(0, 6, 0.1)]
# pl.plot(vals)
# pl.show()
# dfg
root_path = Path("/Users/arminbahl/Desktop/preprocessed data/maxwell_paper")
df = | pd.read_hdf(root_path / "all_data_deepposekit.h5", key="raw_data") | pandas.read_hdf |
import re
import datetime
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# ---------------------------------------------------
# Person data methods
# ---------------------------------------------------
class TransformGenderGetFromName:
"""Gets clients' genders from theirs russian second names.
Parameters:
column_name (str): Column name in InsolverDataFrame containing clients' names, column type is string.
column_gender (str): Column name in InsolverDataFrame for clients' genders.
gender_male (str): Return value for male gender in InsolverDataFrame, 'male' by default.
gender_female (str): Return value for female gender in InsolverDataFrame, 'female' by default.
"""
def __init__(self, column_name, column_gender, gender_male='male', gender_female='female'):
self.priority = 0
self.column_name = column_name
self.column_gender = column_gender
self.gender_male = gender_male
self.gender_female = gender_female
@staticmethod
def _gender(client_name, gender_male, gender_female):
if pd.isnull(client_name):
gender = None
elif len(client_name) < 2:
gender = None
elif client_name.upper().endswith(('ИЧ', 'ОГЛЫ')):
gender = gender_male
elif client_name.upper().endswith(('НА', 'КЫЗЫ')):
gender = gender_female
else:
gender = None
return gender
def __call__(self, df):
df[self.column_gender] = df[self.column_name].apply(self._gender, args=(self.gender_male, self.gender_female,))
return df
class TransformAgeGetFromBirthday:
"""Gets clients' ages in years from theirs birth dates and policies' start dates.
Parameters:
column_date_birth (str): Column name in InsolverDataFrame containing clients' birth dates, column type is date.
column_date_start (str): Column name in InsolverDataFrame containing policies' start dates, column type is date.
column_age (str): Column name in InsolverDataFrame for clients' ages in years, column type is int.
"""
def __init__(self, column_date_birth, column_date_start, column_age):
self.priority = 0
self.column_date_birth = column_date_birth
self.column_date_start = column_date_start
self.column_age = column_age
@staticmethod
def _age_get(datebirth_datestart):
date_birth = datebirth_datestart[0]
date_start = datebirth_datestart[1]
if pd.isnull(date_birth):
age = None
elif pd.isnull(date_start):
age = None
elif date_birth > datetime.datetime.now():
age = None
elif date_birth.year < datetime.datetime.now().year - 120:
age = None
elif date_birth > date_start:
age = None
else:
age = int((date_start - date_birth).days // 365.25)
return age
def __call__(self, df):
df[self.column_age] = df[[self.column_date_birth, self.column_date_start]].apply(self._age_get, axis=1)
return df
class TransformAge:
"""Transforms values of drivers' minimum ages in years.
Values under 'age_min' are invalid. Values over 'age_max' will be grouped.
Parameters:
column_driver_minage (str): Column name in InsolverDataFrame containing drivers' minimum ages in years,
column type is integer.
age_min (int): Minimum value of drivers' age in years, lower values are invalid, 18 by default.
age_max (int): Maximum value of drivers' age in years, bigger values will be grouped, 70 by default.
"""
def __init__(self, column_driver_minage, age_min=18, age_max=70):
self.priority = 1
self.column_driver_minage = column_driver_minage
self.age_min = age_min
self.age_max = age_max
@staticmethod
def _age(age, age_min, age_max):
if pd.isnull(age):
age = None
elif age < age_min:
age = None
elif age > age_max:
age = age_max
return age
def __call__(self, df):
df[self.column_driver_minage] = df[self.column_driver_minage].apply(self._age,
args=(self.age_min, self.age_max))
return df
class TransformAgeGender:
"""Gets intersections of drivers' minimum ages and genders.
Parameters:
column_age (str): Column name in InsolverDataFrame containing clients' ages in years, column type is integer.
column_gender (str): Column name in InsolverDataFrame containing clients' genders.
column_age_m (str): Column name in InsolverDataFrame for males' ages, for females default value is applied,
column type is integer.
column_age_f (str): Column name in InsolverDataFrame for females' ages, for males default value is applied,
column type is integer.
age_default (int): Default value of the age in years,18 by default.
gender_male: Value for male gender in InsolverDataFrame, 'male' by default.
gender_female: Value for male gender in InsolverDataFrame, 'female' by default.
"""
def __init__(self, column_age, column_gender, column_age_m, column_age_f, age_default=18,
gender_male='male', gender_female='female'):
self.priority = 2
self.column_age = column_age
self.column_gender = column_gender
self.column_age_m = column_age_m
self.column_age_f = column_age_f
self.age_default = age_default
self.gender_male = gender_male
self.gender_female = gender_female
@staticmethod
def _age_gender(age_gender, age_default, gender_male, gender_female):
age = age_gender[0]
gender = age_gender[1]
if pd.isnull(age):
age_m = None
age_f = None
elif pd.isnull(gender):
age_m = None
age_f = None
elif gender == gender_male:
age_m = age
age_f = age_default
elif gender == gender_female:
age_m = age_default
age_f = age
else:
age_m = None
age_f = None
return [age_m, age_f]
def __call__(self, df):
df[self.column_age_m], df[self.column_age_f] = zip(*df[[self.column_age, self.column_gender]].apply(
self._age_gender, axis=1, args=(self.age_default, self.gender_male, self.gender_female)).to_frame()[0])
return df
class TransformExp:
"""Transforms values of drivers' minimum experiences in years with values over 'exp_max' grouped.
Parameters:
column_driver_minexp (str): Column name in InsolverDataFrame containing drivers' minimum experiences in years,
column type is integer.
exp_max (int): Maximum value of drivers' experience in years, bigger values will be grouped, 52 by default.
"""
def __init__(self, column_driver_minexp, exp_max=52):
self.priority = 1
self.column_driver_minexp = column_driver_minexp
self.exp_max = exp_max
@staticmethod
def _exp(exp, exp_max):
if pd.isnull(exp):
exp = None
elif exp < 0:
exp = None
elif exp > exp_max:
exp = exp_max
return exp
def __call__(self, df):
df[self.column_driver_minexp] = df[self.column_driver_minexp].apply(self._exp, args=(self.exp_max,))
return df
class TransformAgeExpDiff:
"""Transforms records with difference between drivers' minimum age and minimum experience less then 'diff_min'
years, sets drivers' minimum experience equal to drivers' minimum age minus 'diff_min' years.
Parameters:
column_driver_minage (str): Column name in InsolverDataFrame containing drivers' minimum ages in years,
column type is integer.
column_driver_minexp (str): Column name in InsolverDataFrame containing drivers' minimum experiences in years,
column type is integer.
diff_min (int): Minimum allowed difference between age and experience in years.
"""
def __init__(self, column_driver_minage, column_driver_minexp, diff_min=18):
self.priority = 2
self.column_driver_minage = column_driver_minage
self.column_driver_minexp = column_driver_minexp
self.diff_min = diff_min
def __call__(self, df):
self.num_errors = len(df.loc[(df[self.column_driver_minage] - df[self.column_driver_minexp]) < self.diff_min])
df[self.column_driver_minexp].loc[(df[self.column_driver_minage] - df[self.column_driver_minexp])
< self.diff_min] = df[self.column_driver_minage] - self.diff_min
return df
class TransformNameCheck:
"""Checks if clients' first names are in special list.
Names may concatenate surnames, first names and last names.
Parameters:
column_name (str): Column name in InsolverDataFrame containing clients' names, column type is string.
name_full (bool): Sign if name is the concatenation of surname, first name and last name, False by default.
column_name_check (str): Column name in InsolverDataFrame for bool values if first names are in the list or not.
names_list (list): The list of clients' first names.
"""
def __init__(self, column_name, column_name_check, names_list, name_full=False):
self.priority = 1
self.column_name = column_name
self.name_full = name_full
self.column_name_check = column_name_check
self.names_list = [n.upper() for n in names_list]
@staticmethod
def _name_get(client_name):
tokenize_re = re.compile(r'[\w\-]+', re.I)
try:
name = tokenize_re.findall(str(client_name))[1].upper()
return name
except Exception:
return 'ERROR'
def __call__(self, df):
if not self.name_full:
df[self.column_name_check] = 1 * df[self.column_name].isin(self.names_list)
else:
df[self.column_name_check] = 1 * df[self.column_name].apply(self._name_get).isin(self.names_list)
return df
# ---------------------------------------------------
# Vehicle data methods
# ---------------------------------------------------
class TransformVehPower:
"""Transforms values of vehicles' powers.
Values under 'power_min' and over 'power_max' will be grouped.
Values between 'power_min' and 'power_max' will be grouped with step 'power_step'.
Parameters:
column_veh_power (str): Column name in InsolverDataFrame containing vehicles' powers,
column type is float.
power_min (float): Minimum value of vehicles' power, lower values will be grouped, 10 by default.
power_max (float): Maximum value of vehicles' power, bigger values will be grouped, 500 by default.
power_step (int): Values of vehicles' power will be divided by this parameter, rounded to integers,
10 by default.
"""
def __init__(self, column_veh_power, power_min=10, power_max=500, power_step=10):
self.priority = 1
self.column_veh_power = column_veh_power
self.power_min = power_min
self.power_max = power_max
self.power_step = power_step
@staticmethod
def _power(power, power_min, power_max, power_step):
if pd.isnull(power):
power = None
elif power < power_min:
power = power_min
elif power > power_max:
power = power_max
else:
power = int(round(power / power_step, 0))
return power
def __call__(self, df):
df[self.column_veh_power] = df[self.column_veh_power].apply(self._power, args=(self.power_min, self.power_max,
self.power_step,))
return df
class TransformVehAgeGetFromIssueYear:
"""Gets vehicles' ages in years from issue years and policies' start dates.
Parameters:
column_veh_issue_year (str): Column name in InsolverDataFrame containing vehicles' issue years,
column type is integer.
column_date_start (str): Column name in InsolverDataFrame containing policies' start dates, column type is date.
column_veh_age (str): Column name in InsolverDataFrame for vehicles' ages in years, column type is integer.
"""
def __init__(self, column_veh_issue_year, column_date_start, column_veh_age):
self.priority = 0
self.column_veh_issue_year = column_veh_issue_year
self.column_date_start = column_date_start
self.column_veh_age = column_veh_age
@staticmethod
def _veh_age_get(issueyear_datestart):
veh_issue_year = issueyear_datestart[0]
date_start = issueyear_datestart[1]
if | pd.isnull(veh_issue_year) | pandas.isnull |
from __future__ import absolute_import, division, unicode_literals
import datetime
import pytest
try:
import pandas as pd
import numpy as np
from pandas.testing import assert_series_equal
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
except ImportError:
pytest.skip('numpy is not available', allow_module_level=True)
import jsonpickle
import jsonpickle.ext.pandas
@pytest.fixture(scope='module', autouse=True)
def pandas_extension():
"""Initialize the numpy extension for this test module"""
jsonpickle.ext.pandas.register_handlers()
yield # control to the test function.
jsonpickle.ext.pandas.unregister_handlers()
def roundtrip(obj):
return jsonpickle.decode(jsonpickle.encode(obj))
def test_series_roundtrip():
ser = pd.Series(
{
'an_int': np.int_(1),
'a_float': np.float_(2.5),
'a_nan': np.nan,
'a_minus_inf': -np.inf,
'an_inf': np.inf,
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.datetime64('2014-01-01'),
'complex': np.complex_(1 - 2j),
# TODO: the following dtypes are not currently supported.
# 'object': np.object_({'a': 'b'}),
}
)
decoded_ser = roundtrip(ser)
assert_series_equal(decoded_ser, ser)
def test_dataframe_roundtrip():
df = pd.DataFrame(
{
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.array([np.datetime64('2014-01-01')] * 3),
'complex': np.complex_([1 - 2j, 2 - 1.2j, 3 - 1.3j]),
# TODO: the following dtypes are not currently supported.
# 'object': np.object_([{'a': 'b'}]*3),
}
)
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_multindex_dataframe_roundtrip():
df = pd.DataFrame(
{
'idx_lvl0': ['a', 'b', 'c'],
'idx_lvl1': np.int_([1, 1, 2]),
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
}
)
df = df.set_index(['idx_lvl0', 'idx_lvl1'])
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_dataframe_with_interval_index_roundtrip():
df = pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]}, index=pd.IntervalIndex.from_breaks([1, 2, 4])
)
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_index_roundtrip():
idx = pd.Index(range(5, 10))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_datetime_index_roundtrip():
idx = pd.date_range(start='2019-01-01', end='2019-02-01', freq='D')
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_ragged_datetime_index_roundtrip():
idx = | pd.DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-05']) | pandas.DatetimeIndex |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pymongo
import datetime
import json
import re
import time
import pandas as pd
from QUANTAXIS.QAUtil.QADate import QA_util_today_str
import talib
from concurrent.futures import ThreadPoolExecutor
from QUANTAXIS.QAFetch.QATushare import (QA_fetch_get_stock_day,
QA_fetch_get_stock_info,
QA_fetch_get_stock_list,
QA_fetch_get_trade_date,
QA_fetch_get_lhb)
from QUANTAXIS.QAFetch.QATusharePro import (QA_fetch_get_assetAliability,
QA_fetch_get_cashflow,
QA_fetch_get_income,
QA_fetch_get_finindicator,
QA_fetch_get_dailyindicator)
from QUANTAXIS.QAUtil import (QA_util_date_stamp, QA_util_log_info,
QA_util_time_stamp, QA_util_to_json_from_pandas,
trade_date_sse)
from QUANTAXIS.QAUtil.QASetting import DATABASE
import tushare as ts
ts.set_token('0f7da64f6c87dfa58456e0ad4c7ccf31d6c6e89458dc5b575e028c64')
def QA_SU_save_stock_terminated(client=DATABASE):
'''
获取已经被终止上市的股票列表,数据从上交所获取,目前只有在上海证券交易所交易被终止的股票。
collection:
code:股票代码 name:股票名称 oDate:上市日期 tDate:终止上市日期
:param client:
:return: None
'''
# 🛠todo 已经失效从wind 资讯里获取
# 这个函数已经失效
print("!!! tushare 这个函数已经失效!!!")
df = ts.get_terminated()
#df = ts.get_suspended()
print(" Get stock terminated from tushare,stock count is %d (终止上市股票列表)" % len(df))
coll = client.stock_terminated
client.drop_collection(coll)
json_data = json.loads(df.reset_index().to_json(orient='records'))
coll.insert(json_data)
print(" 保存终止上市股票列表 到 stock_terminated collection, OK")
def QA_SU_save_stock_daily_basic(start_day='20010101',client=DATABASE,force=False):
'''
每日行情
名称 类型 描述
ts_code str TS股票代码
trade_date str 交易日期
close float 当日收盘价
turnover_rate float 换手率(%)
turnover_rate_f float 换手率(自由流通股)
volume_ratio float 量比
pe float 市盈率(总市值/净利润)
pe_ttm float 市盈率(TTM)
pb float 市净率(总市值/净资产)
ps float 市销率
ps_ttm float 市销率(TTM)
total_share float 总股本 (万)
float_share float 流通股本 (万)
free_share float 自由流通股本 (万)
total_mv float 总市值 (万元)
circ_mv float 流通市值(万元)
add by minijjlk
在命令行工具 quantaxis 中输入 save stock_daily_basic_tushare 中的命令
:param client:
:return:
'''
pro = ts.pro_api()
df = pro.stock_basic()
if df.empty:
print("there is no stock info,stock count is %d" % len(df))
return
today = QA_util_today_str()
#days = pd.date_range(start_day, today, freq='1d').strftime('%Y-%m-%d').values
stock_daily = client.stock_daily_basic_tushare
print("##################get daily indicators start####################")
for i_ in range(0,len(df.index)):
QA_util_log_info('The %s of Total %s' % (i_, len(df.index)))
start_date = start_day
ref = stock_daily.find({'ts_code': df.iloc[i_].ts_code}).sort([('trade_date',-1)]).limit(1)
print(ref.count())
if ref.count() > 0:
start_date = pd.date_range((ref[0]['trade_date']),periods=2, freq='1d').strftime('%Y%m%d').values[-1]
print("start_date"+start_date.replace("-","")+" today"+today.replace("-",""))
if start_date.replace("-","")> today.replace("-",""):
continue
print('UPDATE stock daily basic Trying updating %s from %s to %s' % (df.iloc[i_].ts_code, start_date.replace("-",""),today.replace("-","")))
try:
daily = pro.daily_basic(ts_code=df.iloc[i_].ts_code, start_date=start_date.replace("-",""),end_date=today.replace("-",""))
except Exception as e:
time.sleep(30)
daily = pro.daily_basic(ts_code=df.iloc[i_].ts_code, start_date=start_date.replace("-", ""), end_date=today.replace("-", ""))
print(" Get stock daily basic from tushare,days count is %d" % len(daily))
if not daily.empty:
#coll = client.stock_daily_basic_tushare
#client.drop_collection(coll)
json_data = QA_util_to_json_from_pandas(daily)
#json_data = json.loads(df.reset_index().to_json(orient='records'))
stock_daily.insert_many(json_data)
print(" Save data to stock_daily_basic_tushare collection, OK")
def QA_SU_save_stock_report_income(start_day='20010101',client=DATABASE,force=False):
'''
利润表数据
输出参数
名称 类型 描述
ts_code str TS股票代码
ann_date str 公告日期
f_ann_date str 实际公告日期,即发生过数据变更的最终日期
end_date str 报告期
report_type str 报告类型: 参考下表说明
comp_type str 公司类型:1一般工商业 2银行 3保险 4证券
basic_eps float 基本每股收益
diluted_eps float 稀释每股收益
total_revenue float 营业总收入 (元,下同)
revenue float 营业收入
int_income float 利息收入
prem_earned float 已赚保费
comm_income float 手续费及佣金收入
n_commis_income float 手续费及佣金净收入
n_oth_income float 其他经营净收益
n_oth_b_income float 加:其他业务净收益
prem_income float 保险业务收入
out_prem float 减:分出保费
une_prem_reser float 提取未到期责任准备金
reins_income float 其中:分保费收入
n_sec_tb_income float 代理买卖证券业务净收入
n_sec_uw_income float 证券承销业务净收入
n_asset_mg_income float 受托客户资产管理业务净收入
oth_b_income float 其他业务收入
fv_value_chg_gain float 加:公允价值变动净收益
invest_income float 加:投资净收益
ass_invest_income float 其中:对联营企业和合营企业的投资收益
forex_gain float 加:汇兑净收益
total_cogs float 营业总成本
oper_cost float 减:营业成本
int_exp float 减:利息支出
comm_exp float 减:手续费及佣金支出
biz_tax_surchg float 减:营业税金及附加
sell_exp float 减:销售费用
admin_exp float 减:管理费用
fin_exp float 减:财务费用
assets_impair_loss float 减:资产减值损失
prem_refund float 退保金
compens_payout float 赔付总支出
reser_insur_liab float 提取保险责任准备金
div_payt float 保户红利支出
reins_exp float 分保费用
oper_exp float 营业支出
compens_payout_refu float 减:摊回赔付支出
insur_reser_refu float 减:摊回保险责任准备金
reins_cost_refund float 减:摊回分保费用
other_bus_cost float 其他业务成本
operate_profit float 营业利润
non_oper_income float 加:营业外收入
non_oper_exp float 减:营业外支出
nca_disploss float 其中:减:非流动资产处置净损失
total_profit float 利润总额
income_tax float 所得税费用
n_income float 净利润(含少数股东损益)
n_income_attr_p float 净利润(不含少数股东损益)
minority_gain float 少数股东损益
oth_compr_income float 其他综合收益
t_compr_income float 综合收益总额
compr_inc_attr_p float 归属于母公司(或股东)的综合收益总额
compr_inc_attr_m_s float 归属于少数股东的综合收益总额
ebit float 息税前利润
ebitda float 息税折旧摊销前利润
insurance_exp float 保险业务支出
undist_profit float 年初未分配利润
distable_profit float 可分配利润
主要报表类型说明
代码 类型 说明
1 合并报表 上市公司最新报表(默认)
2 单季合并 单一季度的合并报表
3 调整单季合并表 调整后的单季合并报表(如果有)
4 调整合并报表 本年度公布上年同期的财务报表数据,报告期为上年度
5 调整前合并报表 数据发生变更,将原数据进行保留,即调整前的原数据
6 母公司报表 该公司母公司的财务报表数据
7 母公司单季表 母公司的单季度表
8 母公司调整单季表 母公司调整后的单季表
9 母公司调整表 该公司母公司的本年度公布上年同期的财务报表数据
10 母公司调整前报表 母公司调整之前的原始财务报表数据
11 调整前合并报表 调整之前合并报表原数据
12 母公司调整前报表 母公司报表发生变更前保留的原数据
add by minijjlk
在命令行工具 quantaxis 中输入 save stock_income 中的命令
:param client:
:return:
'''
pro = ts.pro_api()
df = pro.stock_basic()
if df.empty:
print("there is no stock info,stock count is %d" % len(df))
return
report_income = client.stock_report_income_tushare
print("##################get income reports start####################")
for i_ in range(len(df.index)):
QA_util_log_info('The %s of Total %s' % (i_, len(df.index)))
ref = report_income.find({'ts_code': df.iloc[i_].ts_code})
if ref.count() > 0:
report_income.remove({'ts_code': df.iloc[i_].ts_code})
print('UPDATE stock income Trying updating %s' % (df.iloc[i_].ts_code))
time.sleep(1)
try:
income = pro.income(ts_code=df.iloc[i_].ts_code)
except Exception as e:
time.sleep(30)
income = pro.income(ts_code=df.iloc[i_].ts_code)
print(" Get stock income reports from tushare,reports count is %d" % len(income))
if not income.empty:
#coll = client.stock_report_income_tushare
#client.drop_collection(coll)
json_data = QA_util_to_json_from_pandas(income)
#json_data = json.loads(df.reset_index().to_json(orient='records'))
report_income.insert_many(json_data)
print(" Save data to stock_report_income_tushare collection, OK")
def QA_SU_save_stock_report_assetliability(start_day='20010101',client=DATABASE,force=False):
'''
资产负债表数据
输出参数
名称 类型 描述
ts_code str TS股票代码
ann_date str 公告日期
f_ann_date str 实际公告日期
end_date str 报告期
report_type str 报表类型:见下方详细说明
comp_type str 公司类型:1一般工商业 2银行 3保险 4证券
total_share float 期末总股本
cap_rese float 资本公积金 (元,下同)
undistr_porfit float 未分配利润
surplus_rese float 盈余公积金
special_rese float 专项储备
money_cap float 货币资金
trad_asset float 交易性金融资产
notes_receiv float 应收票据
accounts_receiv float 应收账款
oth_receiv float 其他应收款
prepayment float 预付款项
div_receiv float 应收股利
int_receiv float 应收利息
inventories float 存货
amor_exp float 长期待摊费用
nca_within_1y float 一年内到期的非流动资产
sett_rsrv float 结算备付金
loanto_oth_bank_fi float 拆出资金
premium_receiv float 应收保费
reinsur_receiv float 应收分保账款
reinsur_res_receiv float 应收分保合同准备金
pur_resale_fa float 买入返售金融资产
oth_cur_assets float 其他流动资产
total_cur_assets float 流动资产合计
fa_avail_for_sale float 可供出售金融资产
htm_invest float 持有至到期投资
lt_eqt_invest float 长期股权投资
invest_real_estate float 投资性房地产
time_deposits float 定期存款
oth_assets float 其他资产
lt_rec float 长期应收款
fix_assets float 固定资产
cip float 在建工程
const_materials float 工程物资
fixed_assets_disp float 固定资产清理
produc_bio_assets float 生产性生物资产
oil_and_gas_assets float 油气资产
intan_assets float 无形资产
r_and_d float 研发支出
goodwill float 商誉
lt_amor_exp float 长期待摊费用
defer_tax_assets float 递延所得税资产
decr_in_disbur float 发放贷款及垫款
oth_nca float 其他非流动资产
total_nca float 非流动资产合计
cash_reser_cb float 现金及存放中央银行款项
depos_in_oth_bfi float 存放同业和其它金融机构款项
prec_metals float 贵金属
deriv_assets float 衍生金融资产
rr_reins_une_prem float 应收分保未到期责任准备金
rr_reins_outstd_cla float 应收分保未决赔款准备金
rr_reins_lins_liab float 应收分保寿险责任准备金
rr_reins_lthins_liab float 应收分保长期健康险责任准备金
refund_depos float 存出保证金
ph_pledge_loans float 保户质押贷款
refund_cap_depos float 存出资本保证金
indep_acct_assets float 独立账户资产
client_depos float 其中:客户资金存款
client_prov float 其中:客户备付金
transac_seat_fee float 其中:交易席位费
invest_as_receiv float 应收款项类投资
total_assets float 资产总计
lt_borr float 长期借款
st_borr float 短期借款
cb_borr float 向中央银行借款
depos_ib_deposits float 吸收存款及同业存放
loan_oth_bank float 拆入资金
trading_fl float 交易性金融负债
notes_payable float 应付票据
acct_payable float 应付账款
adv_receipts float 预收款项
sold_for_repur_fa float 卖出回购金融资产款
comm_payable float 应付手续费及佣金
payroll_payable float 应付职工薪酬
taxes_payable float 应交税费
int_payable float 应付利息
div_payable float 应付股利
oth_payable float 其他应付款
acc_exp float 预提费用
deferred_inc float 递延收益
st_bonds_payable float 应付短期债券
payable_to_reinsurer float 应付分保账款
rsrv_insur_cont float 保险合同准备金
acting_trading_sec float 代理买卖证券款
acting_uw_sec float 代理承销证券款
non_cur_liab_due_1y float 一年内到期的非流动负债
oth_cur_liab float 其他流动负债
total_cur_liab float 流动负债合计
bond_payable float 应付债券
lt_payable float 长期应付款
specific_payables float 专项应付款
estimated_liab float 预计负债
defer_tax_liab float 递延所得税负债
defer_inc_non_cur_liab float 递延收益-非流动负债
oth_ncl float 其他非流动负债
total_ncl float 非流动负债合计
depos_oth_bfi float 同业和其它金融机构存放款项
deriv_liab float 衍生金融负债
depos float 吸收存款
agency_bus_liab float 代理业务负债
oth_liab float 其他负债
prem_receiv_adva float 预收保费
depos_received float 存入保证金
ph_invest float 保户储金及投资款
reser_une_prem float 未到期责任准备金
reser_outstd_claims float 未决赔款准备金
reser_lins_liab float 寿险责任准备金
reser_lthins_liab float 长期健康险责任准备金
indept_acc_liab float 独立账户负债
pledge_borr float 其中:质押借款
indem_payable float 应付赔付款
policy_div_payable float 应付保单红利
total_liab float 负债合计
treasury_share float 减:库存股
ordin_risk_reser float 一般风险准备
forex_differ float 外币报表折算差额
invest_loss_unconf float 未确认的投资损失
minority_int float 少数股东权益
total_hldr_eqy_exc_min_int float 股东权益合计(不含少数股东权益)
total_hldr_eqy_inc_min_int float 股东权益合计(含少数股东权益)
total_liab_hldr_eqy float 负债及股东权益总计
lt_payroll_payable float 长期应付职工薪酬
oth_comp_income float 其他综合收益
oth_eqt_tools float 其他权益工具
oth_eqt_tools_p_shr float 其他权益工具(优先股)
lending_funds float 融出资金
acc_receivable float 应收款项
st_fin_payable float 应付短期融资款
payables float 应付款项
hfs_assets float 持有待售的资产
hfs_sales float 持有待售的负债
主要报表类型说明
代码 类型 说明
1 合并报表 上市公司最新报表(默认)
2 单季合并 单一季度的合并报表
3 调整单季合并表 调整后的单季合并报表(如果有)
4 调整合并报表 本年度公布上年同期的财务报表数据,报告期为上年度
5 调整前合并报表 数据发生变更,将原数据进行保留,即调整前的原数据
6 母公司报表 该公司母公司的财务报表数据
7 母公司单季表 母公司的单季度表
8 母公司调整单季表 母公司调整后的单季表
9 母公司调整表 该公司母公司的本年度公布上年同期的财务报表数据
10 母公司调整前报表 母公司调整之前的原始财务报表数据
11 调整前合并报表 调整之前合并报表原数据
12 母公司调整前报表 母公司报表发生变更前保留的原数据
add by minijjlk
在命令行工具 quantaxis 中输入 save stock_income 中的命令
:param client:
:return:
'''
pro = ts.pro_api()
df = pro.stock_basic()
if df.empty:
print("there is no stock info,stock count is %d" % len(df))
return
today = QA_util_today_str()
report_income = client.stock_report_assetliability_tushare
print("##################get asset liability reports start####################")
for i_ in range(len(df.index)):
QA_util_log_info('The %s of Total %s' % (i_, len(df.index)))
ref = report_income.find({'ts_code': df.iloc[i_].ts_code})
if ref.count() > 0:
report_income.remove({'ts_code': df.iloc[i_].ts_code})
print('UPDATE stock asset liability Trying updating %s' % (df.iloc[i_].ts_code))
time.sleep(1)
try:
income = pro.balancesheet(ts_code=df.iloc[i_].ts_code)
except Exception as e:
time.sleep(30)
income = pro.balancesheet(ts_code=df.iloc[i_].ts_code)
print(" Get stock asset liability reports from tushare,reports count is %d" % len(income))
if not income.empty:
#coll = client.stock_report_income_tushare
#client.drop_collection(coll)
json_data = QA_util_to_json_from_pandas(income)
#json_data = json.loads(df.reset_index().to_json(orient='records'))
report_income.insert_many(json_data)
print(" Save data to stock_report_assetliability_tushare collection, OK")
def QA_SU_save_stock_report_cashflow(start_day='20010101',client=DATABASE,force=False):
'''
现金流表数据
输出参数
名称 类型 描述
ts_code str TS股票代码
ann_date str 公告日期
f_ann_date str 实际公告日期
end_date str 报告期
comp_type str 公司类型:1一般工商业 2银行 3保险 4证券
report_type str 报表类型:见下方详细说明
net_profit float 净利润 (元,下同)
finan_exp float 财务费用
c_fr_sale_sg float 销售商品、提供劳务收到的现金
recp_tax_rends float 收到的税费返还
n_depos_incr_fi float 客户存款和同业存放款项净增加额
n_incr_loans_cb float 向中央银行借款净增加额
n_inc_borr_oth_fi float 向其他金融机构拆入资金净增加额
prem_fr_orig_contr float 收到原保险合同保费取得的现金
n_incr_insured_dep float 保户储金净增加额
n_reinsur_prem float 收到再保业务现金净额
n_incr_disp_tfa float 处置交易性金融资产净增加额
ifc_cash_incr float 收取利息和手续费净增加额
n_incr_disp_faas float 处置可供出售金融资产净增加额
n_incr_loans_oth_bank float 拆入资金净增加额
n_cap_incr_repur float 回购业务资金净增加额
c_fr_oth_operate_a float 收到其他与经营活动有关的现金
c_inf_fr_operate_a float 经营活动现金流入小计
c_paid_goods_s float 购买商品、接受劳务支付的现金
c_paid_to_for_empl float 支付给职工以及为职工支付的现金
c_paid_for_taxes float 支付的各项税费
n_incr_clt_loan_adv float 客户贷款及垫款净增加额
n_incr_dep_cbob float 存放央行和同业款项净增加额
c_pay_claims_orig_inco float 支付原保险合同赔付款项的现金
pay_handling_chrg float 支付手续费的现金
pay_comm_insur_plcy float 支付保单红利的现金
oth_cash_pay_oper_act float 支付其他与经营活动有关的现金
st_cash_out_act float 经营活动现金流出小计
n_cashflow_act float 经营活动产生的现金流量净额
oth_recp_ral_inv_act float 收到其他与投资活动有关的现金
c_disp_withdrwl_invest float 收回投资收到的现金
c_recp_return_invest float 取得投资收益收到的现金
n_recp_disp_fiolta float 处置固定资产、无形资产和其他长期资产收回的现金净额
n_recp_disp_sobu float 处置子公司及其他营业单位收到的现金净额
stot_inflows_inv_act float 投资活动现金流入小计
c_pay_acq_const_fiolta float 购建固定资产、无形资产和其他长期资产支付的现金
c_paid_invest float 投资支付的现金
n_disp_subs_oth_biz float 取得子公司及其他营业单位支付的现金净额
oth_pay_ral_inv_act float 支付其他与投资活动有关的现金
n_incr_pledge_loan float 质押贷款净增加额
stot_out_inv_act float 投资活动现金流出小计
n_cashflow_inv_act float 投资活动产生的现金流量净额
c_recp_borrow float 取得借款收到的现金
proc_issue_bonds float 发行债券收到的现金
oth_cash_recp_ral_fnc_act float 收到其他与筹资活动有关的现金
stot_cash_in_fnc_act float 筹资活动现金流入小计
free_cashflow float 企业自由现金流量
c_prepay_amt_borr float 偿还债务支付的现金
c_pay_dist_dpcp_int_exp float 分配股利、利润或偿付利息支付的现金
incl_dvd_profit_paid_sc_ms float 其中:子公司支付给少数股东的股利、利润
oth_cashpay_ral_fnc_act float 支付其他与筹资活动有关的现金
stot_cashout_fnc_act float 筹资活动现金流出小计
n_cash_flows_fnc_act float 筹资活动产生的现金流量净额
eff_fx_flu_cash float 汇率变动对现金的影响
n_incr_cash_cash_equ float 现金及现金等价物净增加额
c_cash_equ_beg_period float 期初现金及现金等价物余额
c_cash_equ_end_period float 期末现金及现金等价物余额
c_recp_cap_contrib float 吸收投资收到的现金
incl_cash_rec_saims float 其中:子公司吸收少数股东投资收到的现金
uncon_invest_loss float 未确认投资损失
prov_depr_assets float 加:资产减值准备
depr_fa_coga_dpba float 固定资产折旧、油气资产折耗、生产性生物资产折旧
amort_intang_assets float 无形资产摊销
lt_amort_deferred_exp float 长期待摊费用摊销
decr_deferred_exp float 待摊费用减少
incr_acc_exp float 预提费用增加
loss_disp_fiolta float 处置固定、无形资产和其他长期资产的损失
loss_scr_fa float 固定资产报废损失
loss_fv_chg float 公允价值变动损失
invest_loss float 投资损失
decr_def_inc_tax_assets float 递延所得税资产减少
incr_def_inc_tax_liab float 递延所得税负债增加
decr_inventories float 存货的减少
decr_oper_payable float 经营性应收项目的减少
incr_oper_payable float 经营性应付项目的增加
others float 其他
im_net_cashflow_oper_act float 经营活动产生的现金流量净额(间接法)
conv_debt_into_cap float 债务转为资本
conv_copbonds_due_within_1y float 一年内到期的可转换公司债券
fa_fnc_leases float 融资租入固定资产
end_bal_cash float 现金的期末余额
beg_bal_cash float 减:现金的期初余额
end_bal_cash_equ float 加:现金等价物的期末余额
beg_bal_cash_equ float 减:现金等价物的期初余额
im_n_incr_cash_equ float 现金及现金等价物净增加额(间接法)
主要报表类型说明
代码 类型 说明
1 合并报表 上市公司最新报表(默认)
2 单季合并 单一季度的合并报表
3 调整单季合并表 调整后的单季合并报表(如果有)
4 调整合并报表 本年度公布上年同期的财务报表数据,报告期为上年度
5 调整前合并报表 数据发生变更,将原数据进行保留,即调整前的原数据
6 母公司报表 该公司母公司的财务报表数据
7 母公司单季表 母公司的单季度表
8 母公司调整单季表 母公司调整后的单季表
9 母公司调整表 该公司母公司的本年度公布上年同期的财务报表数据
10 母公司调整前报表 母公司调整之前的原始财务报表数据
11 调整前合并报表 调整之前合并报表原数据
12 母公司调整前报表 母公司报表发生变更前保留的原数据
add by minijjlk
在命令行工具 quantaxis 中输入 save stock_income 中的命令
:param client:
:return:
'''
pro = ts.pro_api()
df = pro.stock_basic()
if df.empty:
print("there is no stock info,stock count is %d" % len(df))
return
report_income = client.stock_report_cashflow_tushare
print("##################get asset cashflow reports start####################")
for i_ in range(len(df.index)):
QA_util_log_info('The %s of Total %s' % (i_, len(df.index)))
ref = report_income.find({'ts_code': df.iloc[i_].ts_code})
if ref.count() > 0:
report_income.remove({'ts_code': df.iloc[i_].ts_code})
print('UPDATE stock cashflow Trying updating %s' % (df.iloc[i_].ts_code))
time.sleep(1)
try:
income = pro.cashflow(ts_code=df.iloc[i_].ts_code)
except Exception as e:
time.sleep(30)
income = pro.cashflow(ts_code=df.iloc[i_].ts_code)
print(" Get stock cashflow reports from tushare,reports count is %d" % len(income))
if not income.empty:
#coll = client.stock_report_income_tushare
#client.drop_collection(coll)
json_data = QA_util_to_json_from_pandas(income)
#json_data = json.loads(df.reset_index().to_json(orient='records'))
report_income.insert_many(json_data)
print(" Save data to stock_report_cashflow_tushare collection, OK")
def QA_SU_save_stock_report_forecast(start_year='2001',client=DATABASE,force=False):
'''
业绩预告数据
输出参数
名称 类型 描述
ts_code str TS股票代码
ann_date str 公告日期
end_date str 报告期
type str 业绩预告类型(预增/预减/扭亏/首亏/续亏/续盈/略增/略减)
p_change_min float 预告净利润变动幅度下限(%)
p_change_max float 预告净利润变动幅度上限(%)
net_profit_min float 预告净利润下限(万元)
net_profit_max float 预告净利润上限(万元)
last_parent_net float 上年同期归属母公司净利润
first_ann_date str 首次公告日
summary str 业绩预告摘要
change_reason str 业绩变动原因
add by minijjlk
在命令行工具 quantaxis 中输入 save stock_income 中的命令
:param client:
:return:
'''
pro = ts.pro_api()
df = pro.stock_basic()
if df.empty:
print("there is no stock info,stock count is %d" % len(df))
return
today = QA_util_today_str()
report_forcast = client.stock_report_forcast_tushare
print("##################get forcast reports start####################")
season = ['0331','0630','0930','1231']
years = range(int(start_year[0,4]),int(today[0:4]))
for i_ in range(len(df.index)):
QA_util_log_info('The %s of Total %s' % (i_, len(df.index)))
start_date = start_year
time.sleep(1)
ref = report_forcast.find({'ts_code': df.iloc[i_].ts_code,'trade_date':{'$regex':'^2019'}})
if ref.count() > 0:
report_forcast.remove({'ts_code': df.iloc[i_].ts_code,'trade_date':{'$regex':'^2019'}})
print('UPDATE stock forcast report Trying updating %s from %s' % (df.iloc[i_].ts_code, start_date.replace("-","")))
forcasts = []
try:
for y in years:
for s in season:
time.sleep(1)
f = pro.forcast(ts_code=df.iloc[i_].ts_code, period=str(y) + s)
if not f.empty:
forcasts.append(f)
except Exception as e:
print(e)
time.sleep(30)
continue
print(" Get stock forcast reports from tushare,reports count is %d" % len(forcasts))
if not forcasts:
json_data = QA_util_to_json_from_pandas(pd.concat(forcasts))
report_forcast.insert_many(json_data)
print(" Save data to stock_report_forcast_tushare collection, OK")
def QA_SU_save_stock_report_express(start_day='20010101',client=DATABASE,force=False):
'''
业绩快报数据
输出参数
名称 类型 描述
ts_code str TS股票代码
ann_date str 公告日期
end_date str 报告期
revenue float 营业收入(元)
operate_profit float 营业利润(元)
total_profit float 利润总额(元)
n_income float 净利润(元)
total_assets float 总资产(元)
total_hldr_eqy_exc_min_int float 股东权益合计(不含少数股东权益)(元)
diluted_eps float 每股收益(摊薄)(元)
diluted_roe float 净资产收益率(摊薄)(%)
yoy_net_profit float 去年同期修正后净利润
bps float 每股净资产
yoy_sales float 同比增长率:营业收入
yoy_op float 同比增长率:营业利润
yoy_tp float 同比增长率:利润总额
yoy_dedu_np float 同比增长率:归属母公司股东的净利润
yoy_eps float 同比增长率:基本每股收益
yoy_roe float 同比增减:加权平均净资产收益率
growth_assets float 比年初增长率:总资产
yoy_equity float 比年初增长率:归属母公司的股东权益
growth_bps float 比年初增长率:归属于母公司股东的每股净资产
or_last_year float 去年同期营业收入
op_last_year float 去年同期营业利润
tp_last_year float 去年同期利润总额
np_last_year float 去年同期净利润
eps_last_year float 去年同期每股收益
open_net_assets float 期初净资产
open_bps float 期初每股净资产
perf_summary str 业绩简要说明
is_audit int 是否审计: 1是 0否
remark str 备注
add by minijjlk
在命令行工具 quantaxis 中输入 save stock_income 中的命令
:param client:
:return:
'''
pro = ts.pro_api()
df = pro.stock_basic()
if df.empty:
print("there is no stock info,stock count is %d" % len(df))
return
report_income = client.stock_report_express_tushare
print("##################get express reports start####################")
for i_ in range(len(df.index)):
QA_util_log_info('The %s of Total %s' % (i_, len(df.index)))
ref = report_income.find({'ts_code': df.iloc[i_].ts_code})
if ref.count() > 0:
report_income.remove({'ts_code': df.iloc[i_].ts_code})
print('UPDATE stock express Trying updating %s' % (df.iloc[i_].ts_code))
time.sleep(1)
try:
income = pro.express(ts_code=df.iloc[i_].ts_code)
except Exception as e:
time.sleep(30)
income = pro.express(ts_code=df.iloc[i_].ts_code)
print(" Get stock express reports from tushare,reports count is %d" % len(income))
if not income.empty:
#coll = client.stock_report_income_tushare
#client.drop_collection(coll)
json_data = QA_util_to_json_from_pandas(income)
#json_data = json.loads(df.reset_index().to_json(orient='records'))
report_income.insert_many(json_data)
print(" Save data to stock_report_express_tushare collection, OK")
def QA_SU_save_stock_report_dividend(start_day='20010101',client=DATABASE,force=False):
'''
分红送股数据
输出参数
名称 类型 默认显示 描述
ts_code str Y TS代码
end_date str Y 分红年度
ann_date str Y 预案公告日
div_proc str Y 实施进度
stk_div float Y 每股送转
stk_bo_rate float Y 每股送股比例
stk_co_rate float Y 每股转增比例
cash_div float Y 每股分红(税后)
cash_div_tax float Y 每股分红(税前)
record_date str Y 股权登记日
ex_date str Y 除权除息日
pay_date str Y 派息日
div_listdate str Y 红股上市日
imp_ann_date str Y 实施公告日
base_date str N 基准日
base_share float N 基准股本(万)
add by minijjlk
在命令行工具 quantaxis 中输入 save stock_income 中的命令
:param client:
:return:
'''
pro = ts.pro_api()
df = pro.stock_basic()
if df.empty:
print("there is no stock info,stock count is %d" % len(df))
return
report_income = client.stock_report_dividend_tushare
print("##################get dividend reports start####################")
for i_ in range(len(df.index)):
QA_util_log_info('The %s of Total %s' % (i_, len(df.index)))
ref = report_income.find({'ts_code': df.iloc[i_].ts_code})
if ref.count() > 0:
report_income.remove({'ts_code': df.iloc[i_].ts_code})
print('UPDATE stock dividend Trying updating %s' % (df.iloc[i_].ts_code))
time.sleep(1)
try:
income = pro.dividend(ts_code=df.iloc[i_].ts_code)
except Exception as e:
time.sleep(30)
income = pro.dividend(ts_code=df.iloc[i_].ts_code)
print(" Get stock dividend reports from tushare,reports count is %d" % len(income))
if not income.empty:
#coll = client.stock_report_income_tushare
#client.drop_collection(coll)
json_data = QA_util_to_json_from_pandas(income)
#json_data = json.loads(df.reset_index().to_json(orient='records'))
report_income.insert_many(json_data)
print(" Save data to stock_report_express_tushare collection, OK")
def QA_SU_save_stock_report_fina_indicator(start_day='20010101',client=DATABASE,force=False):
'''
财务数据
输出参数,#号默认未返回字段
名称 类型 描述
ts_code str TS代码
ann_date str 公告日期
end_date str 报告期
eps float 基本每股收益
dt_eps float 稀释每股收益
total_revenue_ps float 每股营业总收入
revenue_ps float 每股营业收入
capital_rese_ps float 每股资本公积
surplus_rese_ps float 每股盈余公积
undist_profit_ps float 每股未分配利润
extra_item float 非经常性损益
profit_dedt float 扣除非经常性损益后的净利润
gross_margin float 毛利
current_ratio float 流动比率
quick_ratio float 速动比率
cash_ratio float 保守速动比率
#invturn_days float 存货周转天数
#arturn_days float 应收账款周转天数
#inv_turn float 存货周转率
ar_turn float 应收账款周转率
ca_turn float 流动资产周转率
fa_turn float 固定资产周转率
assets_turn float 总资产周转率
op_income float 经营活动净收益
#valuechange_income float 价值变动净收益
#interst_income float 利息费用
#daa float 折旧与摊销
ebit float 息税前利润
ebitda float 息税折旧摊销前利润
fcff float 企业自由现金流量
fcfe float 股权自由现金流量
current_exint float 无息流动负债
noncurrent_exint float 无息非流动负债
interestdebt float 带息债务
netdebt float 净债务
tangible_asset float 有形资产
working_capital float 营运资金
networking_capital float 营运流动资本
invest_capital float 全部投入资本
retained_earnings float 留存收益
diluted2_eps float 期末摊薄每股收益
bps float 每股净资产
ocfps float 每股经营活动产生的现金流量净额
retainedps float 每股留存收益
cfps float 每股现金流量净额
ebit_ps float 每股息税前利润
fcff_ps float 每股企业自由现金流量
fcfe_ps float 每股股东自由现金流量
netprofit_margin float 销售净利率
grossprofit_margin float 销售毛利率
cogs_of_sales float 销售成本率
expense_of_sales float 销售期间费用率
profit_to_gr float 净利润/营业总收入
saleexp_to_gr float 销售费用/营业总收入
adminexp_of_gr float 管理费用/营业总收入
finaexp_of_gr float 财务费用/营业总收入
impai_ttm float 资产减值损失/营业总收入
gc_of_gr float 营业总成本/营业总收入
op_of_gr float 营业利润/营业总收入
ebit_of_gr float 息税前利润/营业总收入
roe float 净资产收益率
roe_waa float 加权平均净资产收益率
roe_dt float 净资产收益率(扣除非经常损益)
roa float 总资产报酬率
npta float 总资产净利润
roic float 投入资本回报率
roe_yearly float 年化净资产收益率
roa2_yearly float 年化总资产报酬率
#roe_avg float 平均净资产收益率(增发条件)
#opincome_of_ebt float 经营活动净收益/利润总额
#investincome_of_ebt float 价值变动净收益/利润总额
#n_op_profit_of_ebt float 营业外收支净额/利润总额
#tax_to_ebt float 所得税/利润总额
#dtprofit_to_profit float 扣除非经常损益后的净利润/净利润
#salescash_to_or float 销售商品提供劳务收到的现金/营业收入
#ocf_to_or float 经营活动产生的现金流量净额/营业收入
#ocf_to_opincome float 经营活动产生的现金流量净额/经营活动净收益
#capitalized_to_da float 资本支出/折旧和摊销
debt_to_assets float 资产负债率
assets_to_eqt float 权益乘数
dp_assets_to_eqt float 权益乘数(杜邦分析)
ca_to_assets float 流动资产/总资产
nca_to_assets float 非流动资产/总资产
tbassets_to_totalassets float 有形资产/总资产
int_to_talcap float 带息债务/全部投入资本
eqt_to_talcapital float 归属于母公司的股东权益/全部投入资本
currentdebt_to_debt float 流动负债/负债合计
longdeb_to_debt float 非流动负债/负债合计
ocf_to_shortdebt float 经营活动产生的现金流量净额/流动负债
debt_to_eqt float 产权比率
eqt_to_debt float 归属于母公司的股东权益/负债合计
eqt_to_interestdebt float 归属于母公司的股东权益/带息债务
tangibleasset_to_debt float 有形资产/负债合计
tangasset_to_intdebt float 有形资产/带息债务
tangibleasset_to_netdebt float 有形资产/净债务
ocf_to_debt float 经营活动产生的现金流量净额/负债合计
#ocf_to_interestdebt float 经营活动产生的现金流量净额/带息债务
#ocf_to_netdebt float 经营活动产生的现金流量净额/净债务
#ebit_to_interest float 已获利息倍数(EBIT/利息费用)
#longdebt_to_workingcapital float 长期债务与营运资金比率
#ebitda_to_debt float 息税折旧摊销前利润/负债合计
turn_days float 营业周期
roa_yearly float 年化总资产净利率
roa_dp float 总资产净利率(杜邦分析)
fixed_assets float 固定资产合计
#profit_prefin_exp float 扣除财务费用前营业利润
#non_op_profit float 非营业利润
#op_to_ebt float 营业利润/利润总额
#nop_to_ebt float 非营业利润/利润总额
#ocf_to_profit float 经营活动产生的现金流量净额/营业利润
#cash_to_liqdebt float 货币资金/流动负债
#cash_to_liqdebt_withinterest float 货币资金/带息流动负债
#op_to_liqdebt float 营业利润/流动负债
#op_to_debt float 营业利润/负债合计
#roic_yearly float 年化投入资本回报率
profit_to_op float 利润总额/营业收入
#q_opincome float 经营活动单季度净收益
#q_investincome float 价值变动单季度净收益
#q_dtprofit float 扣除非经常损益后的单季度净利润
#q_eps float 每股收益(单季度)
#q_netprofit_margin float 销售净利率(单季度)
#q_gsprofit_margin float 销售毛利率(单季度)
#q_exp_to_sales float 销售期间费用率(单季度)
#q_profit_to_gr float 净利润/营业总收入(单季度)
q_saleexp_to_gr float 销售费用/营业总收入 (单季度)
#q_adminexp_to_gr float 管理费用/营业总收入 (单季度)
#q_finaexp_to_gr float 财务费用/营业总收入 (单季度)
#q_impair_to_gr_ttm float 资产减值损失/营业总收入(单季度)
q_gc_to_gr float 营业总成本/营业总收入 (单季度)
#q_op_to_gr float 营业利润/营业总收入(单季度)
q_roe float 净资产收益率(单季度)
q_dt_roe float 净资产单季度收益率(扣除非经常损益)
q_npta float 总资产净利润(单季度)
#q_opincome_to_ebt float 经营活动净收益/利润总额(单季度)
#q_investincome_to_ebt float 价值变动净收益/利润总额(单季度)
#q_dtprofit_to_profit float 扣除非经常损益后的净利润/净利润(单季度)
#q_salescash_to_or float 销售商品提供劳务收到的现金/营业收入(单季度)
q_ocf_to_sales float 经营活动产生的现金流量净额/营业收入(单季度)
#q_ocf_to_or float 经营活动产生的现金流量净额/经营活动净收益(单季度)
basic_eps_yoy float 基本每股收益同比增长率(%)
dt_eps_yoy float 稀释每股收益同比增长率(%)
cfps_yoy float 每股经营活动产生的现金流量净额同比增长率(%)
op_yoy float 营业利润同比增长率(%)
ebt_yoy float 利润总额同比增长率(%)
netprofit_yoy float 归属母公司股东的净利润同比增长率(%)
dt_netprofit_yoy float 归属母公司股东的净利润-扣除非经常损益同比增长率(%)
ocf_yoy float 经营活动产生的现金流量净额同比增长率(%)
roe_yoy float 净资产收益率(摊薄)同比增长率(%)
bps_yoy float 每股净资产相对年初增长率(%)
assets_yoy float 资产总计相对年初增长率(%)
eqt_yoy float 归属母公司的股东权益相对年初增长率(%)
tr_yoy float 营业总收入同比增长率(%)
or_yoy float 营业收入同比增长率(%)
#q_gr_yoy float 营业总收入同比增长率(%)(单季度)
#q_gr_qoq float 营业总收入环比增长率(%)(单季度)
q_sales_yoy float 营业收入同比增长率(%)(单季度)
#q_sales_qoq float 营业收入环比增长率(%)(单季度)
#q_op_yoy float 营业利润同比增长率(%)(单季度)
q_op_qoq float 营业利润环比增长率(%)(单季度)
#q_profit_yoy float 净利润同比增长率(%)(单季度)
#q_profit_qoq float 净利润环比增长率(%)(单季度)
#q_netprofit_yoy float 归属母公司股东的净利润同比增长率(%)(单季度)
#q_netprofit_qoq float 归属母公司股东的净利润环比增长率(%)(单季度)
equity_yoy float 净资产同比增长率
#rd_exp float 研发费用
add by minijjlk
在命令行工具 quantaxis 中输入 save stock_income 中的命令
:param client:
:return:
'''
pro = ts.pro_api()
df = pro.stock_basic()
if df.empty:
print("there is no stock info,stock count is %d" % len(df))
return
report_income = client.stock_report_finindicator_tushare
print("##################get fina_indicator reports start####################")
for i_ in range(0,len(df.index)):
QA_util_log_info('The %s of Total %s' % (i_, len(df.index)))
ref = report_income.find({'ts_code': df.iloc[i_].ts_code})
if ref.count() > 0:
report_income.remove({'ts_code': df.iloc[i_].ts_code})
print('UPDATE stock fina_indicator Trying updating %s' % (df.iloc[i_].ts_code))
time.sleep(1)
try:
income = pro.fina_indicator(ts_code=df.iloc[i_].ts_code)
except Exception as e:
print(e)
time.sleep(30)
income = pro.fina_indicator(ts_code=df.iloc[i_].ts_code)
finally:
pass
print(" Get stock fina_indicator reports from tushare,reports count is %d" % len(income))
if not income.empty:
#coll = client.stock_report_income_tushare
#client.drop_collection(coll)
json_data = QA_util_to_json_from_pandas(income)
#json_data = json.loads(df.reset_index().to_json(orient='records'))
report_income.insert_many(json_data)
print(" Save data to stock_report_finindicator_tushare collection, OK")
def QA_SU_save_stock_report_audit(start_day='20010101',client=DATABASE,force=False):
'''
财务审计意见
输出参数
名称 类型 描述
ts_code str TS股票代码
ann_date str 公告日期
end_date str 报告期
audit_result str 审计结果
audit_fees float 审计总费用(元)
audit_agency str 会计事务所
audit_sign str 签字会计师
add by minijjlk
在命令行工具 quantaxis 中输入 save stock_income 中的命令
:param client:
:return:
'''
pro = ts.pro_api()
df = pro.stock_basic()
if df.empty:
print("there is no stock info,stock count is %d" % len(df))
return
report_income = client.stock_report_audit_tushare
print("##################get audit reports start####################")
for i_ in range(len(df.index)):
QA_util_log_info('The %s of Total %s' % (i_, len(df.index)))
ref = report_income.find({'ts_code': df.iloc[i_].ts_code})
if ref.count() > 0:
report_income.remove({'ts_code': df.iloc[i_].ts_code})
print('UPDATE stock audit Trying updating %s from %s to %s' % (df.iloc[i_].ts_code))
time.sleep(1)
try:
income = pro.fina_audit(ts_code=df.iloc[i_].ts_code)
except Exception as e:
time.sleep(30)
income = pro.fina_audit(ts_code=df.iloc[i_].ts_code)
print(" Get stock audit reports from tushare,reports count is %d" % len(income))
if not income.empty:
#coll = client.stock_report_income_tushare
#client.drop_collection(coll)
json_data = QA_util_to_json_from_pandas(income)
#json_data = json.loads(df.reset_index().to_json(orient='records'))
report_income.insert_many(json_data)
print(" Save data to stock_report_audit_tushare collection, OK")
def QA_SU_save_stock_report_mainbz(start_day='20010101',client=DATABASE,force=False):
'''
主营业务构成
输出参数
名称 类型 描述
ts_code str TS代码
end_date str 报告期
bz_item str 主营业务来源
bz_sales float 主营业务收入(元)
bz_profit float 主营业务利润(元)
bz_cost float 主营业务成本(元)
curr_type str 货币代码
update_flag str 是否更新
add by minijjlk
在命令行工具 quantaxis 中输入 save stock_income 中的命令
:param client:
:return:
'''
pro = ts.pro_api()
df = pro.stock_basic()
if df.empty:
print("there is no stock info,stock count is %d" % len(df))
return
report_income = client.stock_report_mainbz_tushare
print("##################get mainbz reports start####################")
for i_ in range(len(df.index)):
QA_util_log_info('The %s of Total %s' % (i_, len(df.index)))
ref = report_income.find({'ts_code': df.iloc[i_].ts_code})
if ref.count() > 0:
report_income.remove({'ts_code': df.iloc[i_].ts_code})
print('UPDATE stock mainbz Trying updating %s from %s to %s' % (df.iloc[i_].ts_code))
time.sleep(1)
try:
income = pro.fina_mainbz(ts_code=df.iloc[i_].ts_code)
except Exception as e:
time.sleep(30)
income = pro.fina_mainbz(ts_code=df.iloc[i_].ts_code)
finally:
pass
print(" Get stock mainbz reports from tushare,reports count is %d" % len(income))
if not income.empty:
#coll = client.stock_report_income_tushare
#client.drop_collection(coll)
json_data = QA_util_to_json_from_pandas(income)
#json_data = json.loads(df.reset_index().to_json(orient='records'))
report_income.insert_many(json_data)
print(" Save data to stock_report_mainbz_tushare collection, OK")
def QA_SU_save_stock_daily(start_day='20010101',client=DATABASE,force=False):
'''
每日行情
输出参数
名称 类型 描述
ts_code str 股票代码
trade_date str 交易日期
open float 开盘价
high float 最高价
low float 最低价
close float 收盘价
pre_close float 昨收价
change float 涨跌额
pct_chg float 涨跌幅 (未复权,如果是复权请用 通用行情接口 )
vol float 成交量 (手)
amount float 成交额 (千元)
add by minijjlk
在命令行工具 quantaxis 中输入 save stock_income 中的命令
:param client:
:return:
'''
pro = ts.pro_api()
df = pro.stock_basic()
if df.empty:
print("there is no stock info,stock count is %d" % len(df))
return
report_income = client.stock_daily_tushare
print("##################get mainbz reports start####################")
for i_ in range(len(df.index)):
QA_util_log_info('The %s of Total %s' % (i_, len(df.index)))
ref = report_income.find({'ts_code': df.iloc[i_].ts_code})
if ref.count() > 0:
report_income.remove({'ts_code': df.iloc[i_].ts_code})
print('UPDATE stock daily Trying updating %s from %s to %s' % (df.iloc[i_].ts_code))
time.sleep(1)
try:
income = pro.daily(ts_code=df.iloc[i_].ts_code)
except Exception as e:
time.sleep(30)
income = pro.daily(ts_code=df.iloc[i_].ts_code)
finally:
pass
print(" Get stock daily from tushare,reports count is %d" % len(income))
if not income.empty:
#coll = client.stock_report_income_tushare
#client.drop_collection(coll)
json_data = QA_util_to_json_from_pandas(income)
#json_data = json.loads(df.reset_index().to_json(orient='records'))
report_income.insert_many(json_data)
print(" Save data to stock_daily_tushare collection, OK")
def QA_SU_save_stock_adj_factor(start_day='20010101',client=DATABASE,force=False):
'''
复权因子
输出参数
名称 类型 描述
ts_code str 股票代码
trade_date str 交易日期
adj_factor float 复权因子
add by minijjlk
在命令行工具 quantaxis 中输入 save stock_income 中的命令
:param client:
:return:
'''
pro = ts.pro_api()
df = pro.stock_basic()
if df.empty:
print("there is no stock info,stock count is %d" % len(df))
return
report_income = client.stock_daily_adj_tushare
print("##################get mainbz reports start####################")
for i_ in range(len(df.index)):
QA_util_log_info('The %s of Total %s' % (i_, len(df.index)))
ref = report_income.find({'ts_code': df.iloc[i_].ts_code})
if ref.count() > 0:
report_income.remove({'ts_code': df.iloc[i_].ts_code})
print('UPDATE stock daily adj Trying updating %s from %s to %s' % (df.iloc[i_].ts_code))
time.sleep(1)
try:
income = pro.adj_factor(ts_code=df.iloc[i_].ts_code)
except Exception as e:
time.sleep(30)
income = pro.adj_factor(ts_code=df.iloc[i_].ts_code)
finally:
pass
print(" Get stock daily from tushare,reports count is %d" % len(income))
if not income.empty:
#coll = client.stock_report_income_tushare
#client.drop_collection(coll)
json_data = QA_util_to_json_from_pandas(income)
#json_data = json.loads(df.reset_index().to_json(orient='records'))
report_income.insert_many(json_data)
print(" Save data to stock_daily_adj_tushare collection, OK")
def QA_SU_save_industry_indicator(start_day='20010101',client=DATABASE,force=False):
daily_basic = client.stock_daily_basic_tushare
pro = ts.pro_api()
basic = pro.stock_basic()
times = pd.date_range(start='20010101', end=datetime.strptime(datetime.now(),'%Y%m%d'), freq='AS-JAN')
industry_daily = client.industry_daily
for i_ in range(len(times)):
end = None
if i_+1 == len(times):
end = datetime.now()
else:
end = times[i_+1]
curdaily = daily_basic.find({ "trade_date": {"$gte": times[i_],"$lte": end}})
start_1years_bf = times[i_] - pd.Timedelta(180,unit='D')
ast = QA_fetch_get_assetAliability(start_1years_bf,end)
profit = QA_fetch_get_income(start_1years_bf,end)
cash = QA_fetch_get_cashflow(start_1years_bf,end)
def _industry_indicator(data,curdaily,ast,profit,cash):
df = pd.merge(data,curdaily,on='ts_code',how='left')
first = df.groupby('ts_code', as_index=False).head(1)
uplimit = first.total_mv.describe(percentiles=[.9])[5]
first = first.sort_values(by=['total_mv'], ascending=False)
first = first[first.total > uplimit].head(10)
first.loc[:, 'total_mv_rate'] = first.total_mv / (first.total_mv.sum())
first.loc[:,'deal_mv_rate'] = first.turnover_rate_f*first.close/((first.turnover_rate_f*first.close).sum())#考虑改进一下,用sma5来计算
df = df[df.ts_code.isin(first.ts_code.values)] #取总市值前十的股票构成该行业指数
ast = ast[ast.ts_code.isin(first.ts_code.values)]
def _season(data,ast):
curast = ast[ast.ts_code==data.name]
data.loc[:,'season'] = None
for index,item in enumerate(curast):
judge = (data.trade_date >= item.ann_date)
if index+1 != len(curast):
judge = judge & (data.trade_date <curast[index+1].ann_date)
data[judge].loc[:,'season'] = item.end_date
df = df.groupby('ts_code',as_index=False).apply(_season)
df = pd.merge(df, ast, left_on=['ts_code','season'],right_on=['ts_code','end_date'],how='left')
df = | pd.merge(df, profit, left_on=['ts_code', 'season'], right_on=['ts_code', 'end_date'],how = 'left') | pandas.merge |
""" A set of helping functions used by the main functions """
import re
import urllib
import zipfile
from typing import List, Tuple
from io import TextIOWrapper, BytesIO
from pathlib import Path, PurePosixPath
import pandas as pd
from multiprocessing import Pool
import ftplib
from python_dwd.constants.column_name_mapping import GERMAN_TO_ENGLISH_COLUMNS_MAPPING, METADATA_DTYPE_MAPPING
from python_dwd.constants.access_credentials import DWD_SERVER, DWD_PATH, DWD_FOLDER_MAIN, DWD_FOLDER_METADATA
from python_dwd.constants.metadata import METADATA_COLUMNS, METADATA_MATCHSTRINGS, FILELIST_NAME, FTP_METADATA_NAME, \
ARCHIVE_FORMAT, DATA_FORMAT, METADATA_FIXED_COLUMN_WIDTH, STATIONDATA_SEP, NA_STRING, TRIES_TO_DOWNLOAD_FILE, \
STATID_REGEX, METADATA_1MIN_GEO_PREFIX, METADATA_1MIN_PAR_PREFIX
from python_dwd.enumerations.column_names_enumeration import DWDColumns
from python_dwd.download.download_services import create_remote_file_name
from python_dwd.download.ftp_handling import FTP
from python_dwd.enumerations.parameter_enumeration import Parameter
from python_dwd.enumerations.period_type_enumeration import PeriodType
from python_dwd.enumerations.time_resolution_enumeration import TimeResolution
from python_dwd.file_path_handling.path_handling import remove_old_file, create_folder
from python_dwd.additionals.functions import find_all_matchstrings_in_string
def create_metaindex(parameter: Parameter,
time_resolution: TimeResolution,
period_type: PeriodType) -> pd.DataFrame:
""" The function is used to create a simple metadata DataFrame parsed from the text files that are located in each
data section of the station data directory of the weather service.
Args:
parameter: observation measure
time_resolution: frequency/granularity of measurement interval
period_type: recent or historical files
Return:
DataFrame with parsed columns of the corresponding text file. Columns are translated into English and data is
not yet complete as file existence is not checked.
"""
server_path = PurePosixPath(DWD_PATH,
time_resolution.value,
parameter.value,
period_type.value)
try:
with FTP(DWD_SERVER) as ftp:
ftp.login()
files_server = ftp.list_files(remote_path=str(server_path), also_subfolders=False)
except ftplib.all_errors as e:
raise ftplib.all_errors("Error: couldn't retrieve filelist from server.\n"
f"{str(e)}")
metafile_server = [file for file in files_server
if find_all_matchstrings_in_string(file.lower(), METADATA_MATCHSTRINGS)].pop(0)
metafile_server = create_remote_file_name(metafile_server.lstrip(DWD_PATH))
try:
with urllib.request.urlopen(metafile_server) as request:
file = BytesIO(request.read())
except urllib.error.URLError as e:
raise urllib.error.URLError("Error: reading metadata file failed.\n"
f"{str(e)}")
metaindex = pd.read_fwf(filepath_or_buffer=file,
colspecs=METADATA_FIXED_COLUMN_WIDTH,
skiprows=[1],
dtype=str,
encoding="ISO-8859-1")
# Fix column names, as header is not aligned to fixed column widths
metaindex.columns = "".join(
[column for column in metaindex.columns if "unnamed" not in column.lower()]).split(" ")
metaindex = metaindex.rename(columns=str.upper).rename(columns=GERMAN_TO_ENGLISH_COLUMNS_MAPPING)
return metaindex.astype(METADATA_DTYPE_MAPPING)
def metaindex_for_1minute_data(parameter: Parameter,
time_resolution: TimeResolution) -> pd.DataFrame:
"""
A helping function to create a raw index of metadata for stations of the set of
parameters as given. This raw metadata is then used by other functions. This
second/alternative function must be used for high resolution data, where the
metadata is not available as file but instead saved in external files per each
station.
- especially for precipitation/1_minute/historical!
"""
assert time_resolution == TimeResolution.MINUTE_1, \
"Wrong TimeResolution, only 1 minute is valid "
metadata_path = PurePosixPath(DWD_PATH,
time_resolution.value,
parameter.value,
FTP_METADATA_NAME)
with FTP(DWD_SERVER) as ftp:
ftp.login()
metadata_filepaths = ftp.list_files(remote_path=str(metadata_path), also_subfolders=False)
metadata_filepaths = [create_remote_file_name(file.lstrip(DWD_PATH)) for file in metadata_filepaths]
statids = [re.findall(STATID_REGEX, file).pop(0) for file in metadata_filepaths]
metaindex_df = | pd.DataFrame(None, columns=METADATA_COLUMNS) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed May 03 15:01:31 2017
@author: jdkern
"""
import pandas as pd
import numpy as np
def setup(year,hist,hist_year,operating_horizon,perfect_foresight):
# year = 0
# hist = 0
# hist_year = 2010
#read generator parameters into DataFrame
df_gen = pd.read_csv('CA_data_file/generators.csv',header=0)
#read transmission path parameters into DataFrame
df_paths = pd.read_csv('CA_data_file/paths.csv',header=0)
#calendar
df_calendar = pd.read_excel('CA_data_file/calendar.xlsx',header=0)
#list zones
zones = ['PGE_valley', 'PGE_bay', 'SCE', 'SDGE']
##time series of load for each zone
df_load = pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/Sim_hourly_load.csv',header=0)
df_load = df_load[zones]
df_load = df_load.loc[year*8760:year*8760+8759]
df_load = df_load.reset_index(drop=True)
##time series of operational reserves for each zone
rv= df_load.values
reserves = np.zeros((len(rv),1))
for i in range(0,len(rv)):
reserves[i] = np.sum(rv[i,:])*.04
df_reserves = pd.DataFrame(reserves)
df_reserves.columns = ['reserves']
##daily hydropower availability
df_hydro_PGE = pd.read_csv('Hydro_setup/CA_dispatchable_PGE.csv',header=0)
df_hydro_SCE = pd.read_csv('Hydro_setup/CA_dispatchable_SCE.csv',header=0)
##time series of wind generation for each zone
df_wind = pd.read_csv('../Stochastic_engine/Synthetic_wind_power/wind_power_sim.csv',header=0)
df_wind = df_wind.loc[:,'CAISO']
df_wind = df_wind.loc[year*8760:year*8760+8759]
df_wind = df_wind.reset_index()
wind_caps = pd.read_excel('CA_data_file/wind_caps.xlsx')
##time series solar for each TAC
df_solar = pd.read_csv('../Stochastic_engine/Synthetic_solar_power/solar_power_sim.csv',header=0)
df_solar = df_solar.loc[year*8760:year*8760+8759]
df_solar = df_solar.reset_index()
solar_caps = pd.read_excel('CA_data_file/solar_caps.xlsx')
##daily time series of dispatchable imports by path
forecast_days = ['fd1','fd2','fd3','fd4','fd5','fd6','fd7']
df_imports66 = pd.read_csv('Path_setup/CA_dispatchable_66.csv',header=0)
df_imports61 = pd.read_csv('Path_setup/CA_dispatchable_61.csv',header=0)
df_imports45 = pd.read_csv('Path_setup/CA_dispatchable_45.csv',header=0)
df_imports46 = pd.read_csv('Path_setup/CA_dispatchable_46.csv',header=0)
df_imports42 = pd.read_csv('Path_setup/CA_dispatchable_42.csv',header=0)
df_imports24 = pd.read_csv('Path_setup/CA_dispatchable_24.csv',header=0)
##hourly time series of exports by zone
df_exports24 = pd.read_csv('Path_setup/CA_exports24.csv',header=0)
df_exports42 = pd.read_csv('Path_setup/CA_exports42.csv',header=0)
df_exports45 = pd.read_csv('Path_setup/CA_exports45.csv',header=0)
df_exports66 = pd.read_csv('Path_setup/CA_exports66.csv',header=0)
#must run resources (LFG,ag_waste,nuclear)
df_must = pd.read_excel('CA_data_file/must_run.xlsx',header=0)
#natural gas prices
df_ng = pd.read_excel('../Stochastic_engine/Gas_prices/NG.xlsx', header=0)
df_ng = df_ng[zones]
df_ng = df_ng.loc[year*365:year*365+364,:]
df_ng = df_ng.reset_index()
#california imports hourly minimum flows
df_CA_import_mins42 = pd.read_csv('Path_setup/CA_path_mins42.csv', header=0)
df_CA_import_mins46 = | pd.read_csv('Path_setup/CA_path_mins46.csv', header=0) | pandas.read_csv |
import requests
import dateutil
import datetime
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import warnings
import statsmodels.api as sm
import time
tsa = sm.tsa
# Read recession data. First try to parse html table at nber.org
try:
# Read HTML table at nber.org
tables = pd.read_html('https://www.nber.org/research/data/us-business-cycle-expansions-and-contractions')
# Remove the first row because there is no starting peak
cycle_dates = tables[0]
# Remove the first row that has not peak date
cycle_dates = cycle_dates.drop(0).reset_index(drop=True)
# Is a recession currently underway? Set today as the trough
if not cycle_dates['Business Cycle Reference Dates','Trough Year'].iloc[-1].isdigit():
today = pd.to_datetime('today').date()
cycle_dates.loc[cycle_dates.index[-1],('Business Cycle Reference Dates','Trough Year')] = str(today.year)
cycle_dates.loc[cycle_dates.index[-1],('Business Cycle Reference Dates','Trough Month')] = str(today.month_name())
# Join month and year columns to produce datetime columns for peak and trough dates
cycle_dates['peaks'] = pd.to_datetime(cycle_dates['Business Cycle Reference Dates','Peak Year'].astype(str).astype(str).str.replace('*','',regex=False) + '-' + cycle_dates['Business Cycle Reference Dates','Peak Month'].astype(str).str.replace('*','',regex=False))
cycle_dates['troughs'] = pd.to_datetime(cycle_dates['Business Cycle Reference Dates','Trough Year'].astype(str).astype(str).str.replace('*','',regex=False) + '-' + cycle_dates['Business Cycle Reference Dates','Trough Month'].astype(str).str.replace('*','',regex=False))
# Drop unnecessary columns
cycle_dates = cycle_dates[['peaks','troughs']]
except:
try:
# Read table of NBER peak/trough dates on my GitHub page
cycle_dates = pd.read_csv('https://raw.githubusercontent.com/letsgoexploring/fredpy-package/gh-pages/business%20cycle%20dates/business_cycle_dates.csv')
# Is a recession currently underway? Set today as the trough
if pd.isna(cycle_dates.troughs.iloc[-1]):
cycle_dates.troughs.iloc[-1] = pd.to_datetime('today').strftime('%Y-%m-%d')
# Overwrite original columns with datetime values
cycle_dates['peaks'] = pd.to_datetime(cycle_dates.peaks)
cycle_dates['troughs'] = pd.to_datetime(cycle_dates.troughs)
except:
print('Internet connection required. Check connection.')
# API key attribute needs to be set
# Try to find file in OSX home directory
try:
items = os.getcwd().split('/')[:3]
items.append('fred_api_key.txt')
path = '/'.join(items)
with open(path,'r') as api_key_file:
api_key = api_key_file.readline()
except:
api_key=None
def load_api_key(path):
try:
# Try to load file from currect working directory
with open(path,'r') as api_key_file:
return api_key_file.readline()
except:
try:
# Try to find file in OSX home directory
items = os.getcwd().split('/')[:3]
items.append(path)
path = '/'.join(items)
with open(path,'r') as api_key_file:
return api_key_file.readline()
except:
path = os.path.join(os.getcwd(),path)
with open(path,'r') as api_key_file:
return api_key_file.readline()
# Initialize cache dictionary
series_cache = {}
######################################################################################################
# The series class and methods
class series:
'''Defines a class for downloading, storing, and manipulating data from FRED.'''
def __init__(self,series_id=None,observation_date=None,cache=True):
'''Initializes an instance of the series class.
Args:
series_id (string): unique FRED series ID. If series_id equals None, an empty series
object is created.
observation_date (string): MM-DD-YYYY or YYYY-MM-DD formatted state string. Indicates the final
date at which the series is observed. I.e., excludes revisions made
after observation_date.
cache (bool): Whether to store a copy of the series for the current session to reduce
queries to the FRED API. Default: True
Returns:
None
Attributes:
data: (Pandas Series) data values with dates as index.
date_range: (string) specifies the dates of the first and last observations.
frequency: (string) data frequency. 'Daily', 'Weekly', 'Monthly', 'Quarterly', 'Semiannual', or 'Annual'.
frequency_short: (string) data frequency. Abbreviated. 'D', 'W', 'M', 'Q', 'SA, or 'A'.
last_updated: (string) date series was last updated.
notes: (string) details about series. Not available for all series.
observation_date: (string) vintage date at which data are observed. YYYY-MM-DD
release: (string) statistical release containing data.
seasonal_adjustment: (string) specifies whether the data has been seasonally adjusted.
seasonal_adjustment_short: (string) specifies whether the data has been seasonally adjusted. Abbreviated.
series_id: (string) unique FRED series ID code.
source: (string) original source of the data.
t: (int) number corresponding to frequency: 365 for daily, 52 for weekly, 12 for monthly, 4 for quarterly, and 1 for annual.
title: (string) title of the data series.
units: (string) units of the data series.
units_short: (string) units of the data series. Abbreviated.
'''
# Verify API key is stored
if api_key is None:
raise ValueError('fredpy.api_key value not assigned. You need to provide your key for the FRED API.')
# Observation date for request
if observation_date is None:
observation_date = datetime.datetime.today().strftime('%Y-%m-%d')
else:
observation_date = pd.to_datetime(observation_date).strftime('%Y-%m-%d')
if type(series_id) == str:
if series_id+'_'+observation_date in series_cache.keys() and cache:
# self = series_cache[series_id+'_'+observation_date].copy()
cached = series_cache[series_id+'_'+observation_date].copy()
self.date_range = cached.date_range
self.data = cached.data
self.frequency = cached.frequency
self.frequency_short = cached.frequency_short
self.last_updated = cached.last_updated
self.notes = cached.notes
self.observation_date = cached.observation_date
self.release = cached.release
self.seasonal_adjustment = cached.seasonal_adjustment
self.seasonal_adjustment_short = cached.seasonal_adjustment_short
self.series_id = cached.series_id
self.source = cached.source
self.t = cached.t
self.title = cached.title
self.units = cached.units
self.units_short = cached.units_short
else:
path = 'fred/series'
parameters = {'series_id':series_id,
'realtime_start':observation_date,
'realtime_end':observation_date,
'file_type':'json'
}
r = fred_api_request(api_key=api_key,path=path,parameters=parameters)
results = r.json()
self.series_id = series_id
self.title = results['seriess'][0]['title']
self.frequency = results['seriess'][0]['frequency']
self.frequency_short = results['seriess'][0]['frequency_short']
self.observation_date = datetime.datetime.strptime(observation_date,"%Y-%m-%d").strftime('%B %d, %Y')
self.units = results['seriess'][0]['units']
self.units_short = results['seriess'][0]['units_short']
self.seasonal_adjustment = results['seriess'][0]['seasonal_adjustment']
self.seasonal_adjustment_short = results['seriess'][0]['seasonal_adjustment_short']
self.last_updated = results['seriess'][0]['last_updated']
try:
self.notes = results['seriess'][0]['notes']
except:
self.notes = ''
obs_per_year = {'D':365,'W':52,'M':12,'Q':4,'SA':2,'A':1}
try:
self.t = obs_per_year[self.frequency_short]
except:
self.t = np.nan
path = 'fred/series/observations'
parameters = {'series_id':series_id,
'realtime_start':observation_date,
'realtime_end':observation_date,
'file_type':'json'
}
r = fred_api_request(api_key=api_key,path=path,parameters=parameters)
results = r.json()
data = pd.DataFrame(results['observations'],columns =['date','value'])
data = data.replace('.', np.nan)
data['date'] = pd.to_datetime(data['date'])
data = data.set_index('date')['value'].astype(float)
# Try to infer frequency:
try:
data = data.asfreq(pd.infer_freq(data.index))
except:
pass
self.data = data
self.date_range = 'Range: '+str(self.data.index[0])[:10]+' to '+str(self.data.index[-1])[:10]
path = 'fred/series/release'
parameters = {'series_id':series_id,
'realtime_start':observation_date,
'realtime_end':observation_date,
'file_type':'json'
}
r = fred_api_request(api_key=api_key,path=path,parameters=parameters)
results = r.json()
self.release = results['releases'][0]['name']
release_id = results['releases'][0]['id']
path = 'fred/release/sources'
parameters = {'series_id':series_id,
'release_id':release_id,
'file_type':'json'
}
r = fred_api_request(api_key=api_key,path=path,parameters=parameters)
results = r.json()
self.source = results['sources'][0]['name']
if cache:
series_cache[series_id+'_'+observation_date] = self.copy()
else:
self.date_range = ''
self.data = pd.Series([],pd.to_datetime([]))
self.frequency = ''
self.frequency_short = ''
self.last_updated = ''
self.notes = ''
self.observation_date = ''
self.release = ''
self.seasonal_adjustment = ''
self.seasonal_adjustment_short = ''
self.series_id = ''
self.source = ''
self.t = 0
self.title = ''
self.units = ''
self.units_short = ''
def apc(self,log=False,backward=True):
'''Computes the percentage change in the data over one year.
Args:
log (bool): If True, computes the percentage change as 100⋅log[x(t)/x(t-k)], where k is
the number of observations per year.
If False (default), compute the percentage change as 100⋅[(x(t)/x(k−1)−1].
backward (bool): If True (default), compute percentage change from the previous year.
If False, compute percentage change from current to next year.
Returns:
fredpy series
'''
new_series = self.copy()
T = len(self.data)
if backward==True:
ratio = self.data/self.data.shift(self.t)
else:
ratio = self.data.shift(-self.t)/self.data
if log==True:
new_series.data = 100*np.log(ratio).dropna()
else:
new_series.data = 100*(ratio-1).dropna()
new_series.units = 'Percent'
new_series.units_short = '%'
new_series.title = 'Annual Percentage Change in '+self.title
new_series.date_range = 'Range: '+str(new_series.data.index[0])[:10]+' to '+str(new_series.data.index[-1])[:10]
return new_series
def as_frequency(self,freq=None,method='mean'):
'''Convert a fredpy series to a lower frequency.
Args:
freq (string): Abbreviation of desired frequency: 'D','W','M','Q','A'
method (string): How to resample the data: 'first', 'last', 'mean' (default), 'median',
'min', 'max', 'sum'
Returns:
fredpy series
'''
new_series = self.copy()
obs_per_year = {'D':365,'W':52,'M':12,'Q':4,'A':1}
map_of_freqency_abbreviations = {'D':'Daily','W':'Weekly','M':'Monthly','Q':'Quarterly','A':'Annual'}
try:
new_series.t = obs_per_year[freq]
new_series.frequency_short=freq
new_series.frequency=map_of_freqency_abbreviations[freq]
except:
raise ValueError("freq must be 'D', 'W', 'M', 'Q', or 'A'")
if self.t<new_series.t:
warnings.warn('Warning: You are converting series to a higher frequency and this method may not behave as you expect.')
map_to_pandas_frequencies = {'D':'D','W':'W','M':'MS','Q':'QS','A':'AS'}
if method == 'first':
new_series.data = self.data.resample(map_to_pandas_frequencies[freq]).first()
elif method == 'last':
new_series.data = self.data.resample(map_to_pandas_frequencies[freq]).last()
elif method == 'mean':
new_series.data = self.data.resample(map_to_pandas_frequencies[freq]).mean()
elif method == 'median':
new_series.data = self.data.resample(map_to_pandas_frequencies[freq]).median()
elif method == 'min':
new_series.data = self.data.resample(map_to_pandas_frequencies[freq]).min()
elif method == 'max':
new_series.data = self.data.resample(map_to_pandas_frequencies[freq]).max()
elif method == 'sum':
new_series.data = self.data.resample(map_to_pandas_frequencies[freq]).sum()
new_series.date_range = 'Range: '+str(new_series.data.index[0])[:10]+' to '+str(new_series.data.index[-1])[:10]
return new_series
def bp_filter(self,low=None,high=None,K=None):
'''Computes the bandpass (Baxter-King) filter of the data. Returns two fredpy.series
instances containing the cyclical and trend components of the data:
new_series_cycle,new_series_trend
.. Note:
In computing the bandpass filter, K observations are lost from each end of the
original series to the data are 2K elements shorter than in the original series.
Args:
low (int): Minimum period for oscillations. Default: None, recommendation used.
high (int): Maximum period for oscillations. Default: None, recommendation used.
K (int): Lead-lag length of the filter. Default: None, recommendation used.
Recommendations:
Monthly data: low=24, high=84, K=84
Quarterly data: low=6, high=32, K=12
Annual data: low=1.5, high=8, K=3
Returns:
two fredpy.series instances
'''
new_series_cycle = self.copy()
new_series_trend = self.copy()
if all(v is None for v in [low, high, K]) and self.frequency_short=='M':
low=24
high=84
K=84
elif all(v is None for v in [low, high, K]) and self.frequency_short=='Q':
low=6
high=32
K=12
elif all(v is None for v in [low, high, K]) and self.frequency_short=='A':
low=1.5
high=8
K=3
# if low==6 and high==32 and K==12 and self.t !=4:
# print('Warning: data frequency is not quarterly!')
# elif low==3 and high==8 and K==1.5 and self.t !=1:
# print('Warning: data frequency is not annual!')
cycle = tsa.filters.bkfilter(self.data,low=low,high=high,K=K)
actual = self.data.iloc[K:-K]
trend = actual - cycle
new_series_cycle.data = cycle
new_series_cycle.units = 'Deviation relative to trend'
new_series_cycle.units_short = 'Dev. rel. to trend'
new_series_cycle.title = self.title+' - deviation relative to trend (bandpass filtered)'
new_series_cycle.date_range = 'Range: '+str(new_series_cycle.data.index[0])[:10]+' to '+str(new_series_cycle.data.index[-1])[:10]
new_series_trend.data = trend
new_series_trend.title = self.title+' - trend (bandpass filtered)'
new_series_trend.date_range = 'Range: '+str(new_series_trend.data.index[0])[:10]+' to '+str(new_series_trend.data.index[-1])[:10]
return new_series_cycle,new_series_trend
def cf_filter(self,low=None,high=None):
'''Computes the Christiano-Fitzgerald (CF) filter of the data. Returns two fredpy.series
instances containing the cyclical and trend components of the data:
new_series_cycle,new_series_trend
Args:
low (int): Minimum period for oscillations. Default: None. 18 for monthly data, 6 for quarterly
data, and 2 for annual data.
high (int): Maximum period for oscillations. Default: None. 96 for monthly data, 32 for quarterly
data, and 8 for annual data.
Recommendations:
Monthly data: low=18, high=96
Quarterly data: low=6, high=32
Annual data: low=2, high=8
Returns:
two fredpy.series instances
'''
new_series_cycle = self.copy()
new_series_trend = self.copy()
if all(v is None for v in [low, high]) and self.frequency_short=='M':
low=18
high=96
elif all(v is None for v in [low, high]) and self.frequency_short=='Q':
low=6
high=32
elif all(v is None for v in [low, high]) and self.frequency_short=='A':
low=2
high=8
# if low==6 and high==32 and self.t !=4:
# print('Warning: data frequency is not quarterly!')
# elif low==1.5 and high==8 and self.t !=4:
# print('Warning: data frequency is not quarterly!')
actual = self.data
cycle, trend = tsa.filters.cffilter(self.data,low=low, high=high, drift=False)
new_series_cycle.data = cycle
new_series_cycle.units = 'Deviation relative to trend'
new_series_cycle.units_short = 'Dev. rel. to trend'
new_series_cycle.title = self.title+' - deviation relative to trend (CF filtered)'
new_series_trend.data = trend
new_series_trend.title = self.title+' - trend (CF filtered)'
return new_series_cycle,new_series_trend
def copy(self):
'''Returns a copy of a series object.
Args:
Returs:
fredpy series
'''
new_series = series()
new_series.data = self.data
new_series.date_range = self.date_range
new_series.frequency = self.frequency
new_series.frequency_short = self.frequency_short
new_series.last_updated = self.last_updated
new_series.notes = self.notes
new_series.release = self.release
new_series.seasonal_adjustment = self.seasonal_adjustment
new_series.seasonal_adjustment_short = self.seasonal_adjustment_short
new_series.series_id = self.series_id
new_series.source = self.source
new_series.t = self.t
new_series.title = self.title
new_series.units = self.units
new_series.units_short = self.units_short
return new_series
def diff_filter(self):
'''Computes the first difference filter of original series. Returns two fredpy.series
instances containing the cyclical and trend components of the data:
new_series_cycle,new_series_trend
Note:
In computing the first difference filter, the first observation from the original series is
lost so data are 1 element shorter than in the original series.
Args:
Returns:
two fredpy.series instances
'''
new_series_cycle = self.copy()
new_series_trend = self.copy()
new_series_cycle.data = self.data.diff().dropna() - self.data.diff().dropna().mean()
new_series_cycle.units = 'Deviation relative to trend'
new_series_cycle.units_short = 'Dev. rel. to trend'
new_series_cycle.date_range = 'Range: '+str(new_series_cycle.data.index[0])[:10]+' to '+str(new_series_cycle.data.index[-1])[:10]
new_series_trend.data = self.data.shift(1).dropna()
new_series_trend.title = self.title+' - trend (first difference filtered)'
new_series_trend.date_range = 'Range: '+str(new_series_trend.data.index[0])[:10]+' to '+str(new_series_trend.data.index[-1])[:10]
return new_series_cycle,new_series_trend
def divide(self,object2):
'''Divides the data from the current fredpy series by the data from object2.
Args:
object2 (int, float, numpy ndarray, or similar or fredpy series)
Note:
You are responsibile for making sure that dividing the series makes sense.
Returns:
fredpy series
'''
return divide(self,object2)
def drop_nan(self):
'''Removes missing (NaN) values.
Args:
Returns:
fredpy series
'''
new_series = self.copy()
new_series.data = new_series.data.dropna()
new_series.date_range = 'Range: '+str(new_series.data.index[0])[:10]+' to '+str(new_series.data.index[-1])[:10]
return new_series
def hp_filter(self,lamb=None):
'''Computes the Hodrick-Prescott (HP) filter of the data. Returns two fredpy.series
instances containing the cyclical and trend components of the data:
new_series_cycle,new_series_trend
Args:
lamb (int): The Hodrick-Prescott smoothing parameter. Default: None. Recommendations:
104976000000 for daily data.
129600 for monthly data,
1600 for quarterly data,
6.25 for annual data,
In general, set lambda to: 1600*[number of observations per quarter]**4
Returns:
two fredpy.series instances
'''
new_series_cycle = self.copy()
new_series_trend = self.copy()
if lamb is None and self.frequency_short=='M':
lamb = 129600
elif lamb is None and self.frequency_short=='Q':
lamb = 1600
elif lamb is None and self.frequency_short=='A':
lamb = 6.25
elif lamb is None and self.frequency_short=='D':
lamb = 104976000000
# if lamb==1600 and self.t !=4:
# print('Warning: data frequency is not quarterly!')
# elif lamb==129600 and self.t !=12:
# print('Warning: data frequency is not monthly!')
# elif lamb==6.25 and self.t !=1:
# print('Warning: data frequency is not annual!')
cycle, trend = tsa.filters.hpfilter(self.data,lamb=lamb)
new_series_cycle.data = cycle
new_series_cycle.units = 'Deviation relative to trend'
new_series_cycle.units_short = 'Dev. rel. to trend'
new_series_cycle.title = self.title+' - deviation relative to trend (HP filtered)'
new_series_trend.title = self.title+' - trend (HP filtered)'
new_series_trend.data = trend
return new_series_cycle,new_series_trend
def linear_filter(self):
'''Computes a simple linear filter of the data using OLS. Returns two fredpy.series
instances containing the cyclical and trend components of the data:
new_series_cycle,new_series_trend
Args:
Returns:
two fredpy.series instances
'''
new_series_cycle = self.copy()
new_series_trend = self.copy()
y = self.data
time = np.arange(len(self.data))
x = np.column_stack([time])
x = sm.add_constant(x)
model = sm.OLS(y,x)
result= model.fit()
pred = result.predict(x)
cycle= y-pred
trend= pd.Series(pred,index=self.data.index)
new_series_cycle.data = cycle
new_series_cycle.units = 'Deviation relative to trend'
new_series_cycle.units_short = 'Dev. rel. to trend'
new_series_cycle.title = self.title+' - deviation relative to trend (linearly filtered via OLS)'
new_series_trend.title = self.title+' - trend (linearly filtered via OLS)'
new_series_trend.data = trend
return new_series_cycle,new_series_trend
def log(self):
'''Computes the natural log of the data
Args:
Returns:
fredpy series
'''
new_series = self.copy()
new_series.data = np.log(new_series.data)
new_series.units = 'Log '+new_series.units
new_series.units_short = 'Log '+new_series.units_short
new_series.title = 'Log '+new_series.title
return new_series
def ma(self,length,center=False):
'''Computes a moving average with window equal to length. If center is True, then the
two-sided moving average is computed. Otherwise, the moving average will be one-sided.
Args:
length (int): window length of the one-sided moving average.
center (bool): False (default) - one-sided MA. True - two-sided MA.
Returns:
fredpy series
'''
new_series = self.copy()
new_series.data = new_series.data.rolling(window=length,center=center).mean().dropna()
new_series.date_range = 'Range: '+str(new_series.data.index[0])[:10]+' to '+str(new_series.data.index[-1])[:10]
if center:
new_series.title = self.title+' (: one-sided moving average)'
else:
new_series.title = self.title+' (: two-sided moving average)'
return new_series
def minus(self,object2):
'''Subtracts the data from object2 from the data from the current fredpy series.
Args:
object2 (int, float, numpy ndarray, or similar or fredpy series)
Note:
You are responsibile for making sure that adding the series makes sense.
Returns:
fredpy series
'''
return minus(self,object2)
def pc(self,log=False,backward=True,annualized=False):
'''Computes the percentage change in the data from the preceding period.
Args:
log (bool): If True, computes the percentage change as 100⋅log[x(t)/x(t-1)].
If False (default), compute the percentage change as 100⋅[x(t)/x(t−1)−1].
backward (bool): If True (default), compute percentage change from the previous period.
If False, compute percentage change from current to next period.
annualized (bool): Default: False: If True, percentage change is computed at an annual rate.
E.g., if the data were monthly and log==True, then the annualized
percentage change would be:
100⋅12⋅log[x(t)/x(t−1)].
Returns:
fredpy series
'''
new_series = self.copy()
T = len(self.data)
if annualized:
t = self.t
else:
t = 1
if backward==True:
ratio = self.data/self.data.shift(1)
else:
ratio = self.data.shift(-1)/self.data
if log==True:
new_series.data = 100*t*np.log(ratio).dropna()
else:
new_series.data = 100*(ratio**t-1).dropna()
new_series.units = 'Percent'
new_series.units_short = '%'
new_series.title = 'Percentage Change in '+self.title
new_series.date_range = 'Range: '+str(new_series.data.index[0])[:10]+' to '+str(new_series.data.index[-1])[:10]
return new_series
def per_capita(self,civ_pop = True):
'''Transforms the data into per capita terms by dividing by a measure of the
total population of the United States:
Args:
civ_pop (string): If civ_pop == True, use Civilian noninstitutional population defined as
persons 16 years of age and older (Default). Else, use the total US
population.
Returns:
fredpy series
'''
new_series = self.copy()
if civ_pop ==True:
population= series('CNP16OV').as_frequency(new_series.frequency_short)
else:
population= series('POP').as_frequency(new_series.frequency_short)
new_series,population = window_equalize([new_series,population])
new_series.data = new_series.data/population.data
new_series.title = new_series.title+' Per Capita'
new_series.units = new_series.units+' Per Thousand People'
new_series.units_short = new_series.units_short+' Per Thousand People'
new_series.date_range = 'Range: '+str(new_series.data.index[0])[:10]+' to '+str(new_series.data.index[-1])[:10]
return new_series
def plot(self,**kwargs):
'''Equivalent to calling .plot() method on the self.data Pandas Series object.'''
self.data.plot(**kwargs)
def plus(self,object2):
'''Adds the data from the current fredpy series to the data from object2.
Args:
object2 (int, float, numpy ndarray, or similar or fredpy series)
Note:
You are responsibile for making sure that adding the series makes sense.
Returns:
fredpy series
'''
return plus(self,object2)
def recent(self,N):
'''Restrict the data to the most recent N observations.
Args:
N (int): Number of periods to include in the data window.
Returns:
fredpy series
'''
new_series = self.copy()
new_series.data =new_series.data.iloc[-N:]
new_series.date_range = 'Range: '+str(new_series.data.index[0])[:10]+' to '+str(new_series.data.index[-1])[:10]
return new_series
def recessions(self,ax=None,color='0.5',alpha=0.5):
'''Creates recession bars for plots. Unless 'ax' is specified, be used after
a plot has been made but before either (1) a new plot is created or (2) a
show command is issued.
Args:
ax (matplotlib.axes._subplots.AxesSubplot): Matplotlib axis object to plot recession bars. Default: None
color (string): Color of the bars. Default: '0.5'
alpha (float): Transparency of the recession bars. Must be between 0 and 1
Default: 0.5
Returns:
'''
series_peaks = []
series_troughs = []
start = self.data.index[0]
end = self.data.index[-1]
recessions(start=start,end=end,ax=ax,color=color,alpha=alpha)
# for k in range(len(cycle_dates)):
# if cycle_dates['peaks'].loc[k]<date_begin and date_begin < cycle_dates['troughs'].loc[k]:
# series_peaks.append(date_begin)
# series_troughs.append(cycle_dates['troughs'].loc[k])
# elif date_begin < cycle_dates['peaks'].loc[k] and date_end > cycle_dates['troughs'].loc[k]:
# series_peaks.append(cycle_dates['peaks'].loc[k])
# series_troughs.append(cycle_dates['troughs'].loc[k])
# elif cycle_dates['peaks'].loc[k]<date_end and cycle_dates['troughs'].loc[k] > date_end:
# series_peaks.append(cycle_dates['peaks'].loc[k])
# series_troughs.append(date_end)
# for k in range(len(series_peaks)):
# plt.axvspan(series_peaks[k], series_troughs[k], edgecolor= color, facecolor=color, alpha=alpha)
def times(self,object2):
'''Multiplies the data from the current fredpy series with the data from object2.
Args:
object2 (int, float, numpy ndarray, or similar or fredpy series)
Note:
You are responsibile for making sure that adding the series makes sense.
Returns:
fredpy series
'''
return times(self,object2)
def window(self,start_end):
'''Restricts the data to a specified date window.
Args:
start_end (list): is an ordered pair: start_end = [start, end]
start is the date of the minimum date
end is the date of the maximum date
both are strings in either 'yyyy-mm-dd' or 'mm-dd-yyyy' format
Returns:
fredpy series
'''
new_series = self.copy()
new_series.data = new_series.data.loc[start_end[0]:start_end[1]]
if len(new_series.data)>0:
new_series.date_range = 'Range: '+str(new_series.data.index[0])[:10]+' to '+str(new_series.data.index[-1])[:10]
else:
new_series.date_range = 'Range: Null'
return new_series
######################################################################################################
# Additional functions
def divide(object1,object2):
'''Divides the data from the object1 by the data from object2.
Args:
object1 (int, float, numpy ndarray, or similar or fredpy series)
object2 (int, float, numpy ndarray, or similar or fredpy series)
Note:
You are responsibile for making sure that adding the series makes sense.
Returns:
fredpy series
'''
if not isinstance(object1, series) and not isinstance(object2, series):
return object1/object2
elif not isinstance(object1, series) and isinstance(object2, series):
new_series = object2.copy()
new_series.data = object1/new_series.data
return new_series
elif not isinstance(object2, series) and isinstance(object1, series):
new_series = object1.copy()
new_series.data = new_series.data/object2
return new_series
else:
if not object1.data.index.equals(object2.data.index):
raise ValueError('object1 and object2 do not have the same observation dates')
else:
new_series = series()
new_series.title = object1.title +' divided by '+object2.title
if object1.source == object2.source:
new_series.source = object1.source
else:
new_series.source = object1.source +' and '+object2.source
new_series.frequency = object1.frequency
new_series.frequency_short = object1.frequency_short
new_series.units = object1.units +' / '+object2.units
new_series.units_short = object1.units_short +' / '+object2.units_short
new_series.t = object1.t
new_series.date_range = object1.date_range
if object1.seasonal_adjustment == object2.seasonal_adjustment:
new_series.seasonal_adjustment = object1.seasonal_adjustment
new_series.seasonal_adjustment_short = object1.seasonal_adjustment_short
else:
new_series.seasonal_adjustment = object1.seasonal_adjustment +' and '+object2.seasonal_adjustment
new_series.seasonal_adjustment_short = object1.seasonal_adjustment_short +' and '+object2.seasonal_adjustment_short
if object1.last_updated == object2.last_updated:
new_series.last_updated = object1.last_updated
else:
new_series.last_updated = object1.last_updated +' and '+object2.last_updated
if object1.release == object2.release:
new_series.release = object1.release
else:
new_series.release = object1.release +' and '+object2.release
new_series.series_id = object1.series_id +' and '+object2.series_id
new_series.data = object1.data/object2.data
return new_series
def fred_api_request(api_key,path,parameters):
'''Queries the FRED API. Returns a requests.models.Response object if successful, otherwise will
raise an error with a message that is hopefully helpful. Reference for API querries:
https://fred.stlouisfed.org/docs/api/fred/
Args:
api_key (string): 32-character alpha-numeric string.
path (string): API path. List of available paths here:
Returns:
requests.models.Response
Attributes:
None
'''
status_code = None
request_count = 0
while request_count <= 10:
request_url = 'https://api.stlouisfed.org/'+path+'?'+'api_key='+str(api_key)+'&'
for key in parameters.keys():
request_url+=key+'='+str(parameters[key])+'&'
r = requests.get(request_url)
status_code = r.status_code
if status_code == 200:
break
elif status_code == 429:
print('FRED API error: API limit exceeded in API query (status code: '+str(status_code)+'). Retry in '+str(5+request_count)+' seconds.')
time.sleep(5+request_count)
elif status_code == 504:
print('FRED API error: Gateway Time-out< in API query (status code: '+str(status_code)+'). Retry in '+str(5+request_count)+' seconds.')
time.sleep(5+request_count)
else:
r.raise_for_status()
request_count+=1
if request_count >10 and status_code != 200:
raise Exception('Unknown FRED API error. Status code: ',status_code)
return r
def get_vintage_dates(series_id):
'''Returns vintage dates for series available from ALFRED.
Args:
series_id (string): unique FRED series ID.
Returns:
list'''
request_url = 'https://api.stlouisfed.org/fred/series/vintagedates?series_id=GDPDEF&api_key='+api_key+'&file_type=json'
r = requests.get(request_url)
results = r.json()
return results['vintage_dates']
def minus(object1,object2):
'''Subtracts the data from object2 from the data from object1.
Args:
object1 (int, float, numpy ndarray, or similar or fredpy series)
object2 (int, float, numpy ndarray, or similar or fredpy series)
Note:
You are responsibile for making sure that adding the series makes sense.
Returns:
fredpy series
'''
if not isinstance(object1, series) and not isinstance(object2, series):
return object1-object2
elif not isinstance(object1, series) and isinstance(object2, series):
new_series = object2.copy()
new_series.data = object1-new_series.data
return new_series
elif not isinstance(object2, series) and isinstance(object1, series):
new_series = object1.copy()
new_series.data = new_series.data-object2
return new_series
else:
if not object1.data.index.equals(object2.data.index):
raise ValueError('object1 and object2 do not have the same observation dates')
else:
new_series = series()
new_series.title = object1.title +' minus '+object2.title
if object1.source == object2.source:
new_series.source = object1.source
else:
new_series.source = object1.source +' and '+object2.source
new_series.frequency = object1.frequency
new_series.frequency_short = object1.frequency_short
new_series.units = object1.units +' - '+object2.units
new_series.units_short = object1.units_short +' - '+object2.units_short
new_series.t = object1.t
new_series.date_range = object1.date_range
if object1.seasonal_adjustment == object2.seasonal_adjustment:
new_series.seasonal_adjustment = object1.seasonal_adjustment
new_series.seasonal_adjustment_short = object1.seasonal_adjustment_short
else:
new_series.seasonal_adjustment = object1.seasonal_adjustment +' and '+object2.seasonal_adjustment
new_series.seasonal_adjustment_short = object1.seasonal_adjustment_short +' and '+object2.seasonal_adjustment_short
if object1.last_updated == object2.last_updated:
new_series.last_updated = object1.last_updated
else:
new_series.last_updated = object1.last_updated +' and '+object2.last_updated
if object1.release == object2.release:
new_series.release = object1.release
else:
new_series.release = object1.release +' and '+object2.release
new_series.series_id = object1.series_id +' and '+object2.series_id
new_series.data = object1.data-object2.data
return new_series
def plus(object1,object2):
'''Adds the data from object1 to the data from object2.
Args:
object1 (int, float, numpy ndarray, or similar or fredpy series)
object2 (int, float, numpy ndarray, or similar or fredpy series)
Note:
You are responsibile for making sure that adding the series makes sense.
Returns:
fredpy series
'''
if not isinstance(object1, series) and not isinstance(object2, series):
return object1+object2
elif not isinstance(object1, series) and isinstance(object2, series):
new_series = object2.copy()
new_series.data = new_series.data+object1
return new_series
elif not isinstance(object2, series) and isinstance(object1, series):
new_series = object1.copy()
new_series.data = new_series.data+object2
return new_series
else:
if not object1.data.index.equals(object2.data.index):
raise ValueError('object1 and object2 do not have the same observation dates')
else:
new_series = series()
new_series.title = object1.title +' plus '+object2.title
if object1.source == object2.source:
new_series.source = object1.source
else:
new_series.source = object1.source +' and '+object2.source
new_series.frequency = object1.frequency
new_series.frequency_short = object1.frequency_short
new_series.units = object1.units +' + '+object2.units
new_series.units_short = object1.units_short +' + '+object2.units_short
new_series.t = object1.t
new_series.date_range = object1.date_range
if object1.seasonal_adjustment == object2.seasonal_adjustment:
new_series.seasonal_adjustment = object1.seasonal_adjustment
new_series.seasonal_adjustment_short = object1.seasonal_adjustment_short
else:
new_series.seasonal_adjustment = object1.seasonal_adjustment +' and '+object2.seasonal_adjustment
new_series.seasonal_adjustment_short = object1.seasonal_adjustment_short +' and '+object2.seasonal_adjustment_short
if object1.last_updated == object2.last_updated:
new_series.last_updated = object1.last_updated
else:
new_series.last_updated = object1.last_updated +' and '+object2.last_updated
if object1.release == object2.release:
new_series.release = object1.release
else:
new_series.release = object1.release +' and '+object2.release
new_series.series_id = object1.series_id +' and '+object2.series_id
new_series.data = object1.data+object2.data
return new_series
def recessions(start=None,end=None,ax=None,color='0.5',alpha=0.5):
'''Creates recession bars for time series plots.
Args:
start (NoneType, string, or Timestamp): Starting date. Default: None
end (NoneType, string, or Timestamp): Ending date. Default: None
ax (matplotlib.axes._subplots.AxesSubplot): Matplotlib axis object to plot recession bars. Default: None
color (string): Color of the bars. Default: '0.5'
alpha (float): Transparency of the recession bars. Must be between 0 and 1
Default: 0.5
Returns:
'''
series_peaks = []
series_troughs = []
if start is None:
start = cycle_dates.iloc[0]['peaks']
elif type(start) == str:
start = pd.to_datetime(start)
if end is None:
end = | pd.to_datetime('today') | pandas.to_datetime |
from sklearn import metrics
import random
from sklearn import metrics
from scipy.stats import wasserstein_distance
import datatable as dt
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import mutual_info_classif
from sklearn.model_selection import KFold, StratifiedKFold
import datatable as dt
import matplotlib.pyplot as plt
def ML_classification(allFeatures, train_ml, test_ml, df_ml, classification_model, language_model, folds, pooled_text):
"""
Function to train classification models on features provided by language models
Example use: classification_model=RandomForestClassifier(n_estimators=100, max_depth=7, min_samples_split=2,
min_samples_leaf=1, max_features='auto', n_jobs=-1, random_state=2020)
language_model=
possible options for language model list are: "Term Frequency", "LIWC", "Pooled FastText", "Pooled RoBERTa"
or "Universal Sentence Encoder"
"""
# list of analyzed language models
model = classification_model
preds = list()
trues = list()
print("Now training: ", language_model, pooled_text, " ", type(model).__name__)
# for each fold
for fold in range(folds):
# chose appropriate features and data
features = set(allFeatures[language_model][fold])
train_index = train_ml[fold]
test_index = test_ml[fold]
train_data = df_ml[features].iloc[train_index]
target_train_data = df_ml["target_ml"].iloc[train_index]
test_data = df_ml[features].iloc[test_index]
target_test_data = df_ml.iloc[test_index]["target_ml"]
model.fit(train_data, target_train_data)
preds.append(model.predict(test_data).tolist())
trues.append(target_test_data.tolist())
return sum(preds, []), sum(trues, [])
def compute_metrics(dependent_variable, test_run, data_file_name, pooled_text, binarize):
# code borrowed from https://gist.github.com/nickynicolson/202fe765c99af49acb20ea9f77b6255e
def cm2df(cm, labels):
df = pd.DataFrame()
# rows
for i, row_label in enumerate(labels):
rowdata = {}
# columns
for j, col_label in enumerate(labels):
rowdata[col_label] = cm[i, j]
df = df.append(pd.DataFrame.from_dict({row_label: rowdata}, orient='index'))
return df[labels]
# read result files to compute metrics including SemEval2017-specific
preds = dt.fread(f"./data/partial_results/{dependent_variable}_{data_file_name}_{test_run}{pooled_text}"
f"_predictions.csv").to_pandas()
trues = dt.fread(f"./data/partial_results/{dependent_variable}_{data_file_name}_{test_run}{pooled_text}"
f"_trues.csv").to_pandas()
modelColNames = preds.columns.to_list()
modelColNames.remove("C0")
# define classes and indexes of true values for each class. For each model the true index values are the
# same since the test set was the same.
classes = set(trues[f"{modelColNames[0]}"])
cls_index = dict()
for cls in classes:
cls_index[cls] = trues[trues[f"{modelColNames[0]}"] == cls].index.to_list()
# for each model compute the metrics
allmetrics = dict()
for model in modelColNames:
model_metrics = dict()
mcc = metrics.matthews_corrcoef(y_true=trues[f"{model}"], y_pred=preds[f"{model}"])
f1 = metrics.f1_score(y_true=trues[f"{model}"], y_pred=preds[f"{model}"], average="macro")
cm = metrics.confusion_matrix(y_true=trues[f"{model}"], y_pred=preds[f"{model}"])
if binarize:
cm_as_df = cm2df(cm, [0, 1])
else:
cm_as_df = cm2df(cm, [0, 1, 2, 3, 4])
cm_as_df.to_excel(
f'./results/confusion_matrix_{dependent_variable}_{data_file_name}_{test_run}{pooled_text}.xlsx')
_metrics = {"MCC": mcc, "F1": f1}
for metric in _metrics.keys():
model_metrics[metric] = _metrics[metric]
allmetrics[model] = model_metrics
dfmetrics = pd.DataFrame.from_dict(allmetrics)
dfmetrics.to_csv(f"./results/{dependent_variable}_{data_file_name}_{test_run}{pooled_text}_metric_results.csv")
print(dfmetrics)
def term_frequency(train_ml, df, allFeatures, dependent_variable):
foldTFfeatures = {}
allWords = []
for fold, rows in train_ml.items():
vectorizer = CountVectorizer(min_df=4, binary=True)
tf = vectorizer.fit_transform(df.iloc[rows]["text"])
dftf = pd.DataFrame(tf.A, columns=vectorizer.get_feature_names())
mi_imps = list(zip(mutual_info_classif(dftf, df.iloc[rows][dependent_variable], discrete_features=True),
dftf.columns))
mi_imps = sorted(mi_imps, reverse=True)
topFeaturesN = 300
foldTFfeatures[fold] = [f"TF_{y}" for x, y in mi_imps[0:topFeaturesN]].copy()
# save all words found by TF models as important features
allWords.extend([y for x, y in mi_imps[0:topFeaturesN]].copy())
# add the Term Frequency language model key to dictionary with allFeatures from various language models
allFeatures["TF"] = foldTFfeatures
# Create TF features for all the text instances and create a corresponding data frame
allWords = list(set(allWords))
vectorizer = CountVectorizer(min_df=4, binary=True, vocabulary=allWords)
tf = vectorizer.fit_transform(df["text"])
dftf = pd.DataFrame(tf.A, columns=vectorizer.get_feature_names())
dftf.columns = [f"TF_{x}" for x in dftf.columns]
return dftf, allFeatures
def skf_splitter(folds, df, dependent_variable):
# this split (with folds=5) results in: 20% test, 10% val, 70% train for Flair framework
# and the same 20% test and 80 % train for Machine Learning
indexes = list(range(0, len(df)))
y = df[dependent_variable]
X = df['text']
# setup random state and folds
np.random.seed(13)
kf = StratifiedKFold(n_splits=folds, random_state=13, shuffle=True)
kf = kf.split(X, y)
return kf
def read_liwc():
dfliwc = | pd.read_excel(f"./data/source_data/LIWC_5k_final_leadership_values.xlsx", converters={'id': str}) | pandas.read_excel |
#!/bin/python
# Copyright 2018 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
import csv
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import numpy as np
import pandas as pd
###############################################################################
def create_layers_range(config):
"""
Calculate the router id range for each layer of NoC.
Parameters
----------
config : [type]
Configuration
Returns
-------
[type]
list of router id range
"""
layers_range = []
router_counter = 0
for x, y in zip(config.x, config.y):
layers_range.append(range(router_counter, router_counter+x*y))
router_counter += x*y
return layers_range
def find_layer_id(layers_range, router_id):
"""
Find layer id of the router.
Parameters
----------
layers_range : [type]
list of router id range
router_id : int
[description]
Returns
-------
int
layer id
"""
for itr, layer_range in enumerate(layers_range):
if router_id in layer_range:
return itr
def init_data_structure(config):
"""
Initialize the data structure named 'layers' which is a list (the length of the list
is the number of NoC layer) of dictionaries that contains the flit transfer direction
in the form of pandas data frame.
Parameters
----------
config : [type]
Configuration
Returns
-------
[type]
The initilazed data structure
"""
layer_temp = {'Up': pd.DataFrame(), 'Down': pd.DataFrame(),
'North': pd.DataFrame(), 'South': pd.DataFrame(),
'East': pd.DataFrame(), 'West': pd.DataFrame()}
layers = [layer_temp.copy() for itr in range(config.z)]
return layers
def read_dataframe(data, path, layer_id, direction):
"""
Read a data frame from csv file then accumulate the data.
Parameters
----------
data : list
The list of flit traffic direction and this is the data that needs to be updated
path : str
the path of the csv file to be read.
layer_id : int
layer id of the router located
direction : str
Direction of the flit that is moving. Available options are
Up, Down, East, West, North and South
Returns
-------
list
The updated list of flit traffic direction. Remain unchange if the csv file does not exist
"""
temp = pd.read_csv(path, index_col=0)
if temp.empty:
return data
data[layer_id][direction] = data[layer_id][direction].add(
temp, fill_value=0)
return data
def get_latencies(latencies_results_file):
"""
Read the resulting latencies from the csv file.
Parameters:
- results_file: the path to the result file.
Return:
- A list of the filt, packet and network latencies.
"""
latencies = []
try:
with open(latencies_results_file, newline='') as f:
spamreader = csv.reader(f, delimiter=' ', quotechar='|')
for row in spamreader:
latencies.append(row[1])
except Exception:
# Add dummy values to latencies, -1.
latencies.append(-1)
latencies.append(-1)
latencies.append(-1)
return latencies
def combine_vc_hists(directory, config):
"""[summary]
Combine the VC histograms from csv files.
Parameters
----------
directory : [type]
the path of the directory that contains the files.
config : [type]
[description]
Returns
-------
[type]
A dataframe object of the combined csv files,
or None if the directory doesn't exist.
"""
if not os.path.exists(directory):
return None
data = [ | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import random
import tensorflow.keras as keras
from sklearn.model_selection import train_test_split
def read_data(random_state=42,
otu_filename='../../Datasets/otu_table_all_80.csv',
metadata_filename='../../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[['age',
'Temperature',
'Precipitation3Days',
'INBREDS',
'Maize_Line']]
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['INBREDS', 'Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
data_domain = df[domain.columns].to_numpy(dtype=np.float32)
data_microbioma_train, data_microbioma_test, data_domain_train, data_domain_test = \
train_test_split(data_microbioma, data_domain, test_size=0.1, random_state=random_state)
return data_microbioma_train, data_microbioma_test, data_domain_train, data_domain_test, otu.columns, domain.columns
def read_df_with_transfer_learning_subset_fewerDomainFeatures(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.drop(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
#data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
#data_domain = df[domain.columns].to_numpy(dtype=np.float32)
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
# Transfer learning subset
df_microbioma_test, df_microbioma_transfer_learning, df_domain_test, df_domain_transfer_learning = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma_transfer_learning, df_domain_transfer_learning, test_size=0.3, random_state=random_state)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu.columns, domain.columns
def read_df_with_transfer_learning_subset(random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[['age',
'Temperature',
'Precipitation3Days',
'INBREDS',
'Maize_Line']]
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['INBREDS', 'Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
#data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
#data_domain = df[domain.columns].to_numpy(dtype=np.float32)
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
df_microbioma_test, df_microbioma_transfer_learning, df_domain_test, df_domain_transfer_learning = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma_transfer_learning, df_domain_transfer_learning, test_size=0.3, random_state=random_state)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu.columns, domain.columns
def read_df_with_transfer_learning_subset_stratified_by_maize_line(random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[['age',
'Temperature',
'Precipitation3Days',
'INBREDS',
'Maize_Line']]
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['INBREDS', 'Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
#data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
#data_domain = df[domain.columns].to_numpy(dtype=np.float32)
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
df_microbioma_test, df_microbioma_transfer_learning, df_domain_test, df_domain_transfer_learning = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
df_temp=df_domain_transfer_learning
col_stratify=df_temp.iloc[:,30:36][df==1].stack().reset_index().loc[:,'level_1']
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma_transfer_learning, df_domain_transfer_learning, test_size=0.3, random_state=random_state, stratify = col_stratify)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu.columns, domain.columns
def read_df_with_transfer_learning_2otufiles_fewerDomainFeatures(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv',
otu_transfer_filename='../Datasets/Walters5yearsLater/otu_table_Walters5yearsLater.csv',
metadata_transfer_filename='../Datasets/Walters5yearsLater/metadata_table_Walters5yearsLater.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.drop(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
df_microbioma_test, _, df_domain_test, _ = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
otu_columns = otu.columns
domain_columns = domain.columns
# TRANSFER LEARNING SUBSETS
otu = pd.read_csv(otu_transfer_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_transfer_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.drop(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma, df_domain, test_size=0.3, random_state=random_state)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu_columns, domain_columns
def read_df_with_transfer_learning_2otufiles_differentDomainFeatures(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv',
metadata_names_transfer=['pH', 'Nmin', 'N', 'C', 'C.N', 'Corg', 'soil_type', 'clay_fration', 'water_holding_capacity'],
otu_transfer_filename='../Datasets/Maarastawi2018/otu_table_Order_Maarastawi2018.csv',
metadata_transfer_filename='../Datasets/Maarastawi2018/metadata_table_Maarastawi2018.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.drop(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
df_microbioma_test, _, df_domain_test, _ = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
otu_columns = otu.columns
domain_columns = domain.columns
# TRANSFER LEARNING SUBSETS
otu = pd.read_csv(otu_transfer_filename, index_col=0, header=None, sep='\t').T
#otu = otu.set_index('otuids')
otu = otu.reset_index()
otu = otu.drop(['otuids','index'],axis=1)
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_transfer_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names_transfer]
if 'soil_type' in metadata_names_transfer:
domain = pd.concat([domain, | pd.get_dummies(domain['soil_type'], prefix='soil_type') | pandas.get_dummies |
# -*- coding: utf-8 -*-
import os
import pandas as pd
from collections import defaultdict
os.chdir("/home/jana/Documents/PhD/CompBio/")
herds = pd.read_table("/home/jana/Documents/PhD/CompBio/TestingGBLUP/PedCows_HERDS.txt", sep=" ")
IndGeno = pd.read_table("/home/jana/Documents/PhD/CompBio/IndForGeno_5gen.txt", header=None)
RefAmean = defaultdict()
number = 1
for herd1 in range(1, 101):
for herd2 in range(herd1,101):
ref = sorted(list(herds.Indiv[herds.cluster.isin([herd1, herd2])])) #tukaj odberi živali v obeh čredah
pd.DataFrame({"ID": ref}).to_csv("/home/jana/Documents/PhD/CompBio/IndMatrix.txt", index=None, header=None)
os.system("grep -Fwf IndMatrix.txt PedigreeNrm.txt > RefMatrix")
a = pd.read_table("/home/jana/Documents/PhD/CompBio/RefMatrix", sep="\s+", header=None)
a.columns = ["Indiv"] + list(IndGeno.loc[:,0])
refA = a.loc[:, ref]
meanRef = mean(refA).mean()
RefAmean[number] = [herd1, herd2, meanRef]
number = number + 1
RefDF = pd.DataFrame.from_dict(RefAmean, orient="index")
RefADF = RefDF.drop_duplicates()
RefADF.columns = ["Herd1", "Herd2", "A"]
RefADF.to_csv("RefADF_mean.csv", index=None)
ped = pd.read_table("/home/jana/Documents/PhD/CompBio/PedigreeAndGeneticValues_cat.txt", sep=" ")
nr = ped.Indiv[ped.cat.isin(['potomciNP'])]
pb = ped.Indiv[ped.cat == 'pb']
NapAmean = defaultdict()
PbAmean = defaultdict()
number = 1
for herd in range(1,101):
ref = sorted(list(herds.Indiv[herds.cluster == herd])) #tukaj odberi živali v obeh čredah
pd.DataFrame({"ID": ref}).to_csv("/home/jana/Documents/PhD/CompBio/IndHerd.txt", index=None, header=None)
os.system("grep -Fwf IndHerd.txt PedigreeNrm.txt > HerdMatrix")
a = pd.read_table("/home/jana/Documents/PhD/CompBio/HerdMatrix", sep="\s+", header=None)
a.columns = ["Indiv"] + list(IndGeno.loc[:,0])
refnapA = a.loc[:, list(nr)] # sorodstvo z napovedno populacijo
refpbA = a.loc[:, list(pb)] # orodstvo s plemenskimi biki
meanRefNap = mean(refnapA).mean()
meanRefPb = mean(refpbA).mean()
NapAmean[number] = [herd, meanRefNap]
PbAmean[number] = [herd, meanRefPb]
number = number + 1
NapADF = | pd.DataFrame.from_dict(NapAmean, orient="index") | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
uGrid "Macro" Code
@author: Phy
"""
from __future__ import division
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from technical_tools_PC_3 import Tech_total
from economic_tools_PC_3 import Econ_total
import time
if __name__ == "__main__":
close_fds=False
start_time = time.time()
#PSO Parameters
PSO_Parameters = pd.read_excel('uGrid_Input.xlsx', sheet_name = 'PSO')
maxGen = PSO_Parameters['maxGen'][0]
numInd = PSO_Parameters['numInd'][0]
X_tariff_multiplier = PSO_Parameters['X_tariff_multiplier'][0]
stopLimit = PSO_Parameters['stopLimit'][0]
convergenceRequirement = PSO_Parameters['convergenceRequirement'][0]
lowTestLim = PSO_Parameters['lowTestLim'][0]
highTestLim = PSO_Parameters['highTestLim'][0]
roundDownSize = PSO_Parameters['roundDownSize'][0]
C1 = PSO_Parameters['C1'][0]
C2 = PSO_Parameters['C2'][0]
CF = PSO_Parameters['CF'][0]
W = PSO_Parameters['W'][0]
VF = PSO_Parameters['VF'][0]
#Parameter limits: Battery, PV
#These will be changed so the solutions are more flexible for sites (not input)
#Could make these scaled off of load input
lower_bounds = [1,1]
upper_bounds = [10,5]
#Initialize matrixes and parameters
Parameters_test = np.zeros(2)
Parameters_dev = np.zeros(2)
Parameters = np.zeros((2,numInd,maxGen)) #this is the result for each parameter, for each month, for each individual
Propane_ec = np.zeros((numInd,maxGen))
Batt_kWh_tot_ec = np.zeros((numInd,maxGen))
#Create random initial guesses for Parameters
for i in range(2):
for k in range(numInd):
rn = np.random.uniform(lower_bounds[i],upper_bounds[i])
if rn < roundDownSize: #Constraint for minimum sizes
rn = 0
Parameters[i,k,0] = np.copy(rn)
else:
Parameters[i,k,0] = np.copy(rn)
#Initialize Economic Parameters
tariff = np.zeros((numInd,maxGen))
Batt_life_yrs = np.zeros((numInd,maxGen))
#Initialize Global Bests
#global best: best known postions ever, personal best: best known position for each particle(out of all generations), gbest_change: best in generation (out of all individuals)
#Global Best
gB_propane = 999
gB_tariff = 999
gB_tariff_plus = gB_tariff*(1+X_tariff_multiplier)
gB_parameters = np.zeros(2)
LoadKW_MAK = pd.read_excel('LoadKW_MAK.xlsx',index_col=None, header=None)
hmax = len(LoadKW_MAK)
gB_Cost = np.zeros(hmax)
data_plot_variables = np.zeros((hmax,13))
gB_plot_variables = | pd.DataFrame(data = data_plot_variables ,columns=['Batt_SOC', 'Charge', 'LoadkW', 'genLoad', 'Batt_Power_to_Load', 'Batt_Power_to_Load_neg', 'PV_Power', 'PV_Batt_Change_Power', 'dumpload', 'Batt_frac', 'Gen_Batt_Charge_Power', 'Genset_fuel', 'Fuel_kW']) | pandas.DataFrame |
import os, datetime
from glob import glob
import pandas as pd
import numpy as np
from datetime import timedelta
pd.options.mode.chained_assignment = None # default='warn'
PROB_WEAR = 'PROB_WEAR'
PROB_SLEEP = 'PROB_SLEEP'
PROB_NWEAR = 'PROB_NWEAR'
MHEALTH_TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S"
def mhealth_timestamp_parser(val):
return datetime.datetime.strptime(val, MHEALTH_TIMESTAMP_FORMAT)
def contigous_regions_usingOri(condition):
d = np.floor(np.absolute(np.diff(condition)))
idx, = d.nonzero()
idx += 1
idx = np.r_[0, idx - 1]
idx = np.r_[idx, condition.size - 1]
bout_lis = []
for i in range(len(idx) - 1):
if i == 0:
first = idx[i]
else:
first = idx[i] + 1
second = idx[i + 1]
bout_lis = bout_lis + [[first, second]]
this_ar = np.asarray(bout_lis)
return this_ar
def contigous_regions(condition):
d = np.diff(condition)
idx, = d.nonzero()
idx += 1
idx = np.r_[0, idx - 1]
idx = np.r_[idx, condition.size - 1]
bout_lis = []
for i in range(len(idx) - 1):
if i == 0:
first = idx[i]
else:
first = idx[i] + 1
second = idx[i + 1]
bout_lis = bout_lis + [[first, second]]
this_ar = np.asarray(bout_lis)
return this_ar
def filterUsingZori(bout_array, fil_df, lab_str, ref_str, prob_wear, prob_sleep, prob_nwear):
o_fdf = fil_df.copy(deep=True)
fdf = fil_df.copy(deep=True)
tmp_fdf = fil_df.copy(deep=True)
for n in range(len(bout_array)):
ar_sub = o_fdf[bout_array[n][0]:bout_array[n][1] + 1]
ar_sub_pred = ar_sub[lab_str].values[0]
ar_sub_start = bout_array[n][0]
ar_sub_ori = ar_sub[ref_str].values
bout_array_sub = contigous_regions_usingOri(ar_sub_ori)
bout_array_sub_final = bout_array_sub + ar_sub_start
for m in range(len(bout_array_sub_final)):
start = bout_array_sub_final[m][0]
end = bout_array_sub_final[m][1]
if ar_sub_pred == 0:
if start == end:
fdf.loc[start, 'PREDICTED_SMOOTH'] = 0
fdf.loc[start, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start][prob_wear]
fdf.loc[start, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start][prob_sleep]
fdf.loc[start, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start][prob_nwear]
else:
fdf.loc[start:end, 'PREDICTED_SMOOTH'] = 1
fdf.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_sleep]
fdf.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start:end][prob_wear]
fdf.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_nwear]
elif ar_sub_pred == 1:
if start == end:
fdf.loc[start, 'PREDICTED_SMOOTH'] = 0
fdf.loc[start, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start][prob_sleep]
fdf.loc[start, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start][prob_wear]
fdf.loc[start, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start][prob_nwear]
else:
fdf.loc[start:end, 'PREDICTED_SMOOTH'] = 1
fdf.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_wear]
fdf.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start:end][prob_sleep]
fdf.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_nwear]
elif ar_sub_pred == 2:
if start == end:
fdf.loc[start, 'PREDICTED_SMOOTH'] = 0
fdf.loc[start, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start][prob_nwear]
fdf.loc[start, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start][prob_sleep]
fdf.loc[start]['PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start][prob_wear]
else:
fdf.loc[start:end, 'PREDICTED_SMOOTH'] = 2
fdf.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_wear]
fdf.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start:end][prob_sleep]
fdf.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_nwear]
return fdf
def lookBeforeAfter(lo_df):
global new_lab
df = lo_df.copy()
tmp_df = lo_df.copy()
tmp_ar = tmp_df['PREDICTED_SMOOTH'].values
ff_obout_array = contigous_regions(tmp_ar)
bout_df = | pd.DataFrame(ff_obout_array, columns=['START_IND', 'STOP_IND']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
| tm.assert_almost_equal(result, exp) | pandas.util.testing.assert_almost_equal |
"""
BootstrapChainLadder implementation.
"""
import functools
import warnings
import numpy as np
import pandas as pd
from numpy.random import RandomState
from scipy import stats
from .base import BaseRangeEstimator, BaseRangeEstimatorResult
class BootstrapChainLadder(BaseRangeEstimator):
"""
The purpose of the bootstrap technique is to estimate the predicition
error of the total reserve estimate and to approximate the predictive
distribution. It is often impractical to obtain the prediction error
using an analytical approach due to the complexity of reserve estimators.
Predicition error is comprised of two components: process error
and estimation error (Prediction Error = Estimation Error + Process Error).
The estimation error (parameter error) represents the uncertainty in the
parameter estimates given that the model is correctly specified. The
process error is analogous to the variance of a random variable,
representing the uncertainty in future outcomes.
The procedure used to generate the predicitive distribution of reserve
estimates is based on Leong et al. Appendix A, assuming the starting point
is a triangle of cumulative losses:
1. Calculate the all-year volume-weighted age-to-age factors.
2. Estimate the fitted historical cumulative paid loss and ALAE
using the latest diagonal of the original triangle and the
age-to-age factors from [1] to un-develop the losses.
3. Calculate the unscaled Pearson residuals, degrees of freedom
and scale parameter.
4. Calculate the adjusted Pearson residuals.
5. Sample with replacement from the adjusted Pearson residuals.
6. Calculate the triangle of sampled incremental losses
(I^ = m + r_adj * sqrt(m)), where I^ = Resampled incremental loss,
m = Incremental fitted loss (from [2]) and r_adj = Adjusted Pearson
residuals.
7. Using the triangle from [6], project future losses using the
Chain Ladder method.
8. Include Process variance by simulating each incremental future
loss from a Gamma distribution with mean = I^ and
variance = I^ * scale parameter.
9. Estimate unpaid losses using the Chain Ladder technique.
10. Repeat for the number of cycles specified.
The collection of projected ultimates for each origin year over all
bootstrap cycles comprises the predictive distribtuion of reserve
estimates.
Note that the estimate of the distribution of losses assumes
development is complete by the final development period. This is
to avoid the complication associated with modeling a tail factor.
References
----------
1. <NAME>., and <NAME>, (2002), *Stochastic Claims Reserving in General
Insurance*, British Actuarial Journal 8(3): 443-518.
2. CAS Working Party on Quantifying Variability in Reserve Estimates,
*The Analysis and Estimation of Loss & ALAE Variability: A Summary Report*,
Casualty Actuarial Society Forum, Fall 2005.
3. Leong et al., (2012), *Back-Testing the ODP Bootstrap of the Paid
Chain-Ladder Model with Actual Historical Claims Data*, Casualty Actuarial
Society E-Forum.
4. Kirschner, et al., *Two Approaches to Calculating Correlated Reserve
Indications Across Multiple Lines of Business* Appendix III, Variance
Journal, Volume 2/Issue 1.
5. <NAME>., (2016), *Using the ODP Bootstrap Model: A
Practicioner's Guide*, CAS Monograph Series Number 4: Casualty Actuarial
Society, 2016.
"""
def __init__(self, cumtri):
"""
The BootstrapChainLadder class definition.
Parameters
----------
cumtri: triangle.CumTriangle
A cumulative triangle instance.
"""
super().__init__(cumtri=cumtri)
self._dfrlvi = None
self._dof = None
def __call__(self, sims=1000, q=[.75, .95], procdist="gamma", parametric=False,
two_sided=False, interpolation="linear", random_state=None):
"""
``BootstrapChainLadder`` simulation initializer. Generates predictive
distribution of reserve outcomes by origin and in total.
The estimated distribution of losses assumes development is complete
by the final development period in order to avoid the complication of
modeling a tail factor.
Parameters
----------
sims: int
The number of bootstrap simulations to perform. Defaults to 1000.
q: array_like of float or float
Quantile or sequence of quantiles to compute, which must be
between 0 and 1 inclusive.
procdist: str
The distribution used to incorporate process variance. Currently,
this can only be set to "gamma".
two_sided: bool
Whether the two_sided interval should be included in summary
output. For example, if ``two_sided==True`` and ``q=.95``, then
the 2.5th and 97.5th quantiles of the bootstrapped reserve
distribution will be returned [(1 - .95) / 2, (1 + .95) / 2]. When
False, only the specified quantile(s) will be computed. Defaults
to False.
parametric: bool
If True, fit standardized residuals to a normal distribution, and
sample from this parameterized distribution. Otherwise, bootstrap
procedure samples with replacement from the collection of
standardized residuals. Defaults to False.
interpolation: {"linear", "lower", "higher", "midpoint", "nearest"}
This optional parameter specifies the interpolation method to use
when the desired quantile lies between two data points i < j. See
``numpy.quantile`` for more information. Default value is "linear".
random_state: np.random.RandomState
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by np.random.
Returns
-------
BootstrapChainLadderResult
"""
ldfs = self._ldfs(sel="all-weighted")
cldfs = self._cldfs(ldfs=ldfs)
maturity = self.tri.maturity.astype(str)
latest = self.tri.latest_by_origin
trisqrd = self._trisqrd(ldfs=ldfs)
# Obtain reference to BootstrapChainLadder estimates.
tri_fit_cum = self._tri_fit_cum(ldfs=ldfs)
tri_fit_incr = self._tri_fit_incr(fitted_tri_cum=tri_fit_cum)
unscld_residuals = self._resid_us(fitted_tri_incr=tri_fit_incr)
adjust_residuals = self._resid_adj(resid_us=unscld_residuals)
scale_param = self._scale_param(resid_us=unscld_residuals)
sampling_dist = self._sampling_dist(resid_adj=adjust_residuals)
dfsamples = self._bs_samples(
sampling_dist=sampling_dist, fitted_tri_incr=tri_fit_incr,
sims=sims, parametric=parametric,
random_state=random_state
)
dfldfs = self._bs_ldfs(dfsamples=dfsamples)
dfforecasts = self._bs_forecasts(dfsamples, dfldfs, scale_param)
dfprocerror = self._bs_process_error(
dfforecasts=dfforecasts, scale_param=scale_param, procdist=procdist,
random_state=random_state
)
dfreserves = self._bs_reserves(dfprocerror=dfprocerror)
ultimates = dfreserves.groupby(["origin"])["ultimate"].mean()
ultimates[latest.index.min()] = latest[latest.index.min()]
reserves = pd.Series(ultimates - latest, name="reserve")
std_error = self._bs_std_error(dfreserves)
cv = pd.Series(std_error / reserves, name="cv")
qtls, qtlhdrs = self._qtls_formatter(q=q, two_sided=two_sided)
# Compile Chain Ladder point estimate summary.
dfmatur = maturity.to_frame().reset_index(drop=False).rename({"index": "origin"}, axis=1)
dfcldfs = cldfs.to_frame().reset_index(drop=False).rename({"index": "maturity"}, axis=1)
dfcldfs["maturity"] = dfcldfs["maturity"].astype(str)
dfcldfs["emergence"] = 1 / dfcldfs["cldf"]
dfsumm = dfmatur.merge(dfcldfs, on=["maturity"], how="left").set_index("origin")
dfsumm.index.name = None
dflatest = latest.to_frame().rename({"latest_by_origin": "latest"}, axis=1)
dfsumm = functools.reduce(
lambda df1, df2: df1.join(df2),
(dflatest, ultimates.to_frame(), reserves.to_frame(), std_error.to_frame(), cv.to_frame()),
dfsumm
)
# Add "Total" index and set to NaN fields that shouldn't be aggregated.
dfsumm.loc["total"] = dfsumm.sum()
dfsumm.loc["total", "maturity"] = ""
dfsumm.loc["total", ["cldf", "emergence"]] = np.NaN
dfsumm.loc["total", "std_error"] = std_error["total"]
dfsumm.loc["total", "cv"] = std_error["total"] / dfsumm.loc["total", "reserve"]
# Attach quantiles.
dftotal_res = dfreserves.groupby(["sim"], as_index=False).sum()
dftotal_res["origin"] = "total"
dfreserves = pd.concat([dfreserves, dftotal_res])
for ii, jj in zip(qtls, qtlhdrs):
dfsumm[jj] = dfsumm.index.map(
lambda v: np.percentile(
dfreserves[dfreserves.origin == v]["reserve"].values,
100 * ii, interpolation=interpolation
)
)
bcl_result = BootstrapChainLadderResult(
summary=dfsumm, tri=self.tri, ldfs=ldfs, tail=1.0, trisqrd=trisqrd,
reserve_dist=dfreserves, sims_data=dfprocerror, scale_param=scale_param,
dof=self.dof, unscaled_residuals=unscld_residuals,
adjusted_residuals=adjust_residuals,
sampling_dist=None if parametric else sampling_dist,
fitted_tri_cum=tri_fit_cum, fitted_tri_incr=tri_fit_incr, sims=sims,
procdist=procdist, parametric=parametric, q=q, interpolation=interpolation
)
return(bcl_result)
@property
def dfrlvi(self):
"""
Transform triangle's last valid origin index into DataFrame format.
Returns
-------
pd.DataFrame
"""
if self._dfrlvi is None:
df = self.tri.rlvi.reset_index(drop=False)
df = df.rename({"index": "origin", "dev": "l_act_dev"}, axis=1)
self._dfrlvi = df.drop("col_offset", axis=1)
return(self._dfrlvi)
def _get_dfcombined(self, dfsamples, dfldfs):
"""
Merge output of ``self._bs_samples`` and ``self._bs_ldfs``.
Parameters
----------
dfsamples: pd.DataFrame
Output from ``self._bs_samples``.
dfldfs: pd.DataFrame
Output from ``self._bs_ldfs``.
Returns
-------
pd.DataFrame
"""
dfcombined = dfsamples.merge(dfldfs, on=["sim", "dev"], how="left")
dfcombined = dfcombined.merge(self.dfrlvi, on=["origin"], how="left")
return(dfcombined.reset_index(drop=True).sort_values(by=["sim", "origin", "dev"]))
@property
def dof(self):
"""
Return the degress of freedom.
Returns
-------
int
"""
if self._dof is None:
self._dof = self.tri.nbr_cells - (self.tri.columns.size - 1) + self.tri.index.size
return(self._dof)
def _scale_param(self, resid_us):
"""
Return the scale parameter, which is the sum of the squared unscaled
Pearson residuals over the degrees of freedom. This method is intended
for internal use only.
Parameters
----------
resid_us: pd.DataFrame
Unscaled Pearson residuals, typically output by
``self._resid_us``.
Returns
-------
float
"""
return((resid_us**2).sum().sum() / self.dof)
def _tri_fit_cum(self, ldfs):
"""
Return the cumulative fitted triangle using backwards recursion,
starting with the observed cumulative paid/incurred-to-date along the
latest diagonal.
Parameters
----------
ldfs: pd.Series
Selected ldfs, typically the output of calling ``self._ldfs``.
Returns
-------
pd.DataFrame
"""
fitted_tri_cum = self.tri.copy(deep=True)
for ii in range(fitted_tri_cum.shape[0]):
iterrow = fitted_tri_cum.iloc[ii, :]
if iterrow.isnull().any():
# Find first NaN element in iterrow.
nan_hdr = iterrow.isnull()[iterrow.isnull() == True].index[0]
nan_idx = fitted_tri_cum.columns.tolist().index(nan_hdr)
init_idx = nan_idx - 1
else:
# If here, iterrow is the most mature exposure period.
init_idx = fitted_tri_cum.shape[1] - 1
# Set to NaN any development periods earlier than init_idx.
fitted_tri_cum.iloc[ii, :init_idx] = np.NaN
# Iterate over rows, undeveloping triangle from latest diagonal.
for jj in range(fitted_tri_cum.iloc[ii, :init_idx].size, 0, -1):
prev_col_idx, curr_col_idx, curr_ldf_idx = jj, jj - 1, jj - 1
prev_col_val = fitted_tri_cum.iloc[ii, prev_col_idx]
curr_ldf_val = ldfs.iloc[curr_ldf_idx]
fitted_tri_cum.iloc[ii, curr_col_idx] = (prev_col_val / curr_ldf_val)
return(fitted_tri_cum)
@staticmethod
def _tri_fit_incr(fitted_tri_cum):
"""
Return a fitted incremental triangle.
Parameters
----------
fitted_tri_cum: pd.DataFrame
Typically the output from ``self._tri_fit_cum``.
Returns
-------
pd.DataFrame
"""
tri = fitted_tri_cum.diff(axis=1)
tri.iloc[:, 0] = fitted_tri_cum.iloc[:, 0]
return(tri)
def _resid_us(self, fitted_tri_incr):
"""
Return unscaled Pearson residuals, given by
:math:`r_{us} = \\frac{I - m}{\\sqrt{|m|}}`, where :math:`r_{us}` represents the
unscaled Pearson residuals, :math:`I` the actual incremental losses and :math:`m`
fitted incremental losses.
Parameters
----------
fitted_tri_incr: pd.DataFrame
Typically the output from ``self._tri_fit_incr``.
Returns
-------
pd.DataFrame
"""
# I represents actual incremental losses, m fitted incremental losses.
I = pd.DataFrame(self.tri.to_incr())
m = fitted_tri_incr
return((I - m) / np.sqrt(m.abs()))
def _resid_adj(self, resid_us):
"""
Compute and return the adjusted Pearson residuals, given by
:math:`r_{adj} = \\sqrt{\\frac{N}{dof}} * r_{us}`, where *r_adj*
represents the adjusted Pearson residuals, *N* the number of triangle cells,
*dof* the degress of freedom and *r_us* the unscaled Pearson residuals.
Parameters
----------
resid_us: pd.DataFrame
Unscaled Pearson residuals, typically output by ``self._resid_us``.
Returns
-------
pd.DataFrame
"""
return(np.sqrt(self.tri.nbr_cells / self.dof) * resid_us)
@staticmethod
def _sampling_dist(resid_adj):
"""
Return ``resid_adj`` as a 1-dimensional array, which will be sampled
from with replacement in order to produce synthetic triangles for
bootstrapping. Any NaN's and 0's present in ``resid_adj`` will not be
present in the returned array.
Parameters
----------
resid_adj: pd.DataFrame
Adjusted Pearson residuals, typically output by ``self._resid_adj``.
Returns
-------
np.ndarray
"""
resid_ = resid_adj.iloc[:-1, :-1].values.ravel()
return(resid_[np.logical_and(~np.isnan(resid_), resid_ != 0)])
def _bs_samples(self, sampling_dist, fitted_tri_incr, sims=1000, parametric=False,
random_state=None):
"""
Return DataFrame containing sims resampled-with-replacement
incremental loss triangles if ``parametric=False``, otherwise
random variates from a normal distribution with mean zero and
variance derived from ``resid_adj``. Randomly generated incremental
data gets cumulated in preparation for ldf calculation in next
step.
Parameters
----------
sampling_dist: np.ndarray
The residuals from the fitted incremental triangle coerced
into a one-dimensional numpy array.
fitted_tri_incr: pd.DataFrame
The incremental triangle fitted using backwards recursion.
Typically the output of ``self._tri_fit_incr``.
sims: int
The number of bootstrap simulations to run. Defaults to 1000.
parametric: bool
If True, fit standardized residuals to a normal distribution, and
sample from the parameterized distribution. Otherwise, bootstrap
procedure proceeds by sampling with replacement from the array
of standardized residuals. Defaults to False.
random_state: np.random.RandomState
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by np.random.
Returns
-------
pd.DataFrame
"""
if random_state is not None:
if isinstance(random_state, int):
prng = RandomState(random_state)
elif isinstance(random_state, RandomState):
prng = random_state
else:
prng = RandomState()
sampling_dist = sampling_dist.flatten()
fti = fitted_tri_incr.reset_index(drop=False).rename({"index": "origin"}, axis=1)
dfm = pd.melt(fti, id_vars=["origin"], var_name="dev", value_name="value")
dfm = dfm[~np.isnan(dfm["value"])].astype({"origin": int, "dev": int, "value": float})
# Make positive any first development period negative values.
min_devp = dfm["dev"].min()
dfm["value"] = np.where(
np.logical_and(dfm["dev"].values == min_devp, dfm["value"].values < 0),
1., dfm["value"].values
)
dfi = self.tri.to_tbl(dropna=False).drop("value", axis=1)
dfp = dfi.merge(dfm, how="outer", on=["origin", "dev"])
dfp["rectype"] = np.where(np.isnan(dfp["value"].values), "forecast", "actual")
dfp = dfp.rename({"value": "incr"}, axis=1)
dfp["incr_sqrt"] = np.sqrt(dfp["incr"].values)
dfrtypes = {"origin": int, "dev": int, "incr": float,
"incr_sqrt": float, "rectype": str}
dfrcols = ["origin", "dev", "incr", "rectype", "incr_sqrt"]
# Replicate dfp sims times then redefine datatypes.
dfr = pd.DataFrame(np.tile(dfp, (sims, 1)), columns=dfrcols).astype(dfrtypes)
# Assign simulation identifier to each record in dfr.
dfr["sim"] = np.divmod(dfr.index, self.tri.shape[0] * self.tri.shape[1])[0]
sample_size = dfr.shape[0]
if parametric:
# Sample random standard normal residuals.
dfr["resid"] = prng.normal(loc=0, scale=sampling_dist.std(ddof=1), size=sample_size)
else:
# Randomly sample residuals from sampling_dist.
dfr["resid"] = prng.choice(sampling_dist, sample_size, replace=True)
# Calcuate resampled incremental and cumulative losses.
dfr["resid"] = np.where(dfr["rectype"].values == "forecast", np.NaN, dfr["resid"].values)
dfr = dfr.sort_values(by=["sim", "origin", "dev"]).reset_index(drop=True)
dfr["samp_incr"] = dfr["incr"].values + dfr["resid"].values * dfr["incr_sqrt"].values
dfr["samp_cum"] = dfr.groupby(["sim", "origin"], as_index=False)["samp_incr"].cumsum()
return(dfr.reset_index(drop=True))
def _bs_ldfs(self, dfsamples):
"""
Compute and return loss development factors for each set of synthetic
loss data.
Parameters
----------
dfsamples: pd.DataFrame
Output from ``self._bs_samples``.
Returns
-------
pd.DataFrame
"""
keepcols = ["sim", "origin", "dev", "samp_cum", "last_origin"]
new_col_names = {"index": "dev", "origin": "last_origin", "row_offset": "origin_offset"}
dflvi = self.tri.clvi.reset_index(drop=False).rename(new_col_names, axis=1)
dfinit = dfsamples.merge(dflvi, how="left", on=["dev"])
dfinit = dfinit[keepcols].sort_values(by=["sim", "dev", "origin"])
df = dfinit[~np.isnan(dfinit["samp_cum"])].reset_index(drop=True)
df["_aggdev2"] = np.where(df["origin"].values == df["last_origin"].values, 0, df["samp_cum"].values)
df2 = df.groupby(["sim", "dev"], as_index=False)[["samp_cum", "_aggdev2"]].sum().rename(
{"samp_cum": "_aggdev1"}, axis=1)
df2["_aggdev2"] = df2["_aggdev2"].shift(periods=1)
df2["dev"] = df2["dev"].shift(periods=1)
dfldfs = df2[df2["_aggdev2"] != 0].dropna(how="any")
dfldfs["dev"] = dfldfs["dev"].astype(int)
dfldfs["ldf"] = dfldfs["_aggdev1"] / dfldfs["_aggdev2"]
return(dfldfs[["sim", "dev", "ldf"]].reset_index(drop=True))
def _bs_forecasts(self, dfsamples, dfldfs, scale_param):
"""
Populate lower-right of each simulated triangle using values from
``self._bs_samples`` and development factors from ``self._bs_ldfs``.
Parameters
----------
Parameters
----------
dfsamples: pd.DataFrame
Output from ``self._bs_samples``.
dfldfs: pd.DataFrame
Output from ``self._bs_ldfs``.
scale_param: float
the sum of the squared unscaled Pearson residuals over the
degrees of freedom. Output from ``self._scale_param``.
Returns
-------
pd.DataFrame
"""
dfcombined = self._get_dfcombined(dfsamples, dfldfs)
min_origin_year = dfcombined["origin"].min()
dfcombined["_l_init_indx"] = np.where(
dfcombined["dev"].values >= dfcombined["l_act_dev"].values,
dfcombined.index.values, -1
)
dfacts = dfcombined[(dfcombined["origin"].values == min_origin_year) |
(dfcombined["_l_init_indx"].values == -1)]
dffcst = dfcombined[~dfcombined.index.isin(dfacts.index)].sort_values(
by=["sim", "origin", "dev"])
dffcst["_l_act_indx"] = dffcst.groupby(["sim", "origin"])["_l_init_indx"].transform("min")
l_act_cum = dffcst.loc[dffcst["_l_act_indx"], "samp_cum"].values
dffcst["l_act_cum"] = l_act_cum
dffcst["_cum_ldf"] = dffcst.groupby(["sim", "origin"])["ldf"].transform(
"cumprod").shift(periods=1)
dffcst["_samp_cum2"] = dffcst["l_act_cum"].values * dffcst["_cum_ldf"].values
dffcst["_samp_cum2"] = np.where(
np.isnan(dffcst["_samp_cum2"].values), 0, dffcst["_samp_cum2"].values
)
dffcst["cum_final"] = np.where(
np.isnan(dffcst["samp_cum"].values), 0,
dffcst["samp_cum"].values) + dffcst["_samp_cum2"].values
# Combine forecasts with actuals then compute incremental losses by sim and origin.
dffcst = dffcst.drop(labels=["samp_cum", "samp_incr"], axis=1).rename(
columns={"cum_final": "samp_cum"})
dfsqrd = pd.concat([dffcst, dfacts], sort=True).sort_values(
by=["sim", "origin", "dev"])
dfsqrd["_dev1_ind"] = (dfsqrd["dev"].values == 1) * 1
dfsqrd["_incr_dev1"] = dfsqrd["_dev1_ind"].values * dfsqrd["samp_cum"].values
dfsqrd["_incr_dev2"] = dfsqrd.groupby(["sim", "origin"])["samp_cum"].diff(periods=1)
dfsqrd["_incr_dev2"] = np.where(
np.isnan(dfsqrd["_incr_dev2"].values), 0, dfsqrd["_incr_dev2"].values
)
dfsqrd["samp_incr"] = dfsqrd["_incr_dev1"].values + dfsqrd["_incr_dev2"].values
dfsqrd["var"] = np.abs(dfsqrd["samp_incr"].values * scale_param)
dfsqrd["sign"] = np.where(dfsqrd["samp_incr"].values > 0, 1, -1)
dfsqrd = dfsqrd.drop(
labels=[ii for ii in dfsqrd.columns if ii.startswith("_")], axis=1)
return(dfsqrd.sort_values(by=["sim", "origin", "dev"]).reset_index(drop=True))
@staticmethod
def _bs_process_error(dfforecasts, scale_param, procdist="gamma", random_state=None):
"""
Incorporate process error by simulating each incremental future
loss from ``procdist``. The mean is set to the forecast incremental
loss amount and variance to `mean x self.scale_param`.
The parameters for ``procdist`` must be positive. Since the mean
and variance used to parameterize ``procdist`` depend on the
resampled incremental losses, it is necessary to incorporate logic
to address the possibility of negative incremental losses arising
in the resampling stage. The approach used to handle negative
incremental values is described in Shapland[1], and replaces the
distribution mean with the absolute value of the mean, and the
variance with the absolute value of the mean multiplied by ``scale_param``.
Parameters
----------
dfforecasts: pd.DataFrame
DateFrame of bootstraps forecasts generated within
``self._bs_forecasts``.
scale_param: float
the sum of the squared unscaled Pearson residuals over the
degrees of freedom. Available in ``self._scale_param``.
procdist: str
Specifies the distribution used to incorporate process error.
Currently, can only be set to "gamma". Any other distribution
will result in an error.
random_state: np.random.RandomState
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by np.random.
Returns
-------
pd.DataFrame
"""
# Initialize pseudo random number generator.
if random_state is not None:
if isinstance(random_state, int):
prng = RandomState(random_state)
elif isinstance(random_state, RandomState):
prng = random_state
else:
prng = RandomState()
# Parameterize distribution for the incorporation of process variance.
if procdist.strip().lower() == "gamma":
dfforecasts["param2"] = scale_param
dfforecasts["param1"] = np.abs(dfforecasts["samp_incr"].values / dfforecasts["param2"].values)
def fdist(param1, param2):
"""
gamma.rvs(a=param1, scale=param2, size=1, random_state=None)
"""
return(prng.gamma(param1, param2))
else:
raise ValueError("Invalid procdist specification: `{}`".format(procdist))
dfforecasts["final_incr"] = np.where(
dfforecasts["rectype"].values == "forecast",
fdist(dfforecasts["param1"].values, dfforecasts["param2"].values) * dfforecasts["sign"].values,
dfforecasts["samp_incr"].values
)
dfforecasts["final_cum"] = dfforecasts.groupby(["sim", "origin"])["final_incr"].cumsum()
dfforecasts = dfforecasts.rename({"final_cum": "ultimate", "l_act_cum": "latest"}, axis=1)
return(dfforecasts.sort_values(by=["sim", "origin", "dev"]).reset_index(drop=True))
@staticmethod
def _bs_reserves(dfprocerror):
"""
Compute unpaid loss reserve estimate using output from
``self._bs_process_error``.
Parameters
----------
dfprocerror: pd.DataFrame
Output from ``self._bs_process_error``.
Returns
-------
pd.DataFrame
"""
keepcols = ["sim", "origin", "latest", "ultimate", "reserve"]
max_devp = dfprocerror["dev"].values.max()
dfprocerror["reserve"] = dfprocerror["ultimate"] - dfprocerror["latest"]
dfreserves = dfprocerror[dfprocerror["dev"].values == max_devp][keepcols].drop_duplicates()
dfreserves["latest"] = np.where(
np.isnan(dfreserves["latest"].values),
dfreserves["ultimate"].values, dfreserves["latest"].values
)
dfreserves["reserve"] = np.nan_to_num(dfreserves["reserve"].values, 0)
return(dfreserves.sort_values(by=["origin", "sim"]).reset_index(drop=True))
@staticmethod
def _bs_std_error(dfreserves):
"""
Compute standard error of bootstrapped reserves by origin and in aggregate.
Parameters
----------
dfreserves: pd.DataFrame
Output from ``self._bs_reserves``.
Returns
-------
pd.Series
"""
# Compute standard deviation of bootstrap samples by origin.
dforigin_std = dfreserves.groupby(["origin"], as_index=False)["reserve"].std(ddof=1)
origin_se = pd.Series(
data=dforigin_std["reserve"].values, index=dforigin_std["origin"].values,
name="std_error")
dftotal = dfreserves.groupby(["sim"], as_index=False)["reserve"].sum()
total_se = pd.Series(
data=dftotal["reserve"].std(ddof=1), index=["total"], name="std_error"
)
return(origin_se.append(total_se))
class BootstrapChainLadderResult(BaseRangeEstimatorResult):
"""
Container class for ``BootstrapChainLadder`` output.
Parameters
----------
summary: pd.DataFrame
Chain Ladder summary compilation.
reserve_dist: pd.DataFrame
The predicitive distribution of reserve estimates generated via
bootstrapping. ``reserve_dist`` is a five column DataFrame
consisting of the simulation number, origin period, the latest
loss amount for the associated origin period, and the predictive
distribution of ultimates and reserves.
sims_data: pd.DataFrame
A DataFrame consiting of all simulated values an intermediate
fields. When a large number of bootstrap iterations are run,
``sims_data`` will be correspondingly large. The fields include:
- dev:
The simulated development period.
- incr:
The actual incremental loss amount obtain from the fitted triangle.
- incr_sqrt:
The square root of incr.
- l_act_cum:
The latest actual cumulative loss amount for dev/origin.
- l_act_dev:
The latest dev period with actual losses for a given origin period.
- ldf:
Loss development factors computed on syntehtic triangle data.
- origin:
The simulated origin period.
- rectype:
Whether the dev/origin combination represent actual or forecast data
in the squared triangle.
- resid:
The resampled adjusted residuals if ``parametric=False``, otherwise a
random sampling from a normal distribution with mean zero and variance
based on the variance of the adjusted residuals.
- samp_cum:
A syntehtic cumulative loss amount.
- samp_incr:
A synthetic incremental loss amount.
- sim:
Bootstrap iteration.
- var:
The variance, computed as scale_param x samp_incr.
- sign:
The sign of samp_incr.
- param2/param1:
Parameters for the process error distribution.
- final_incr:
Final simulated incremetnal loss amount after the incorporation of
process error.
- final_cum:
Final simulated cumulative loss amount after the incorporation of
process error.
tri: trikit.triangle.CumTriangle
A cumulative triangle instance.
ldfs: pd.Series
Loss development factors.
scale_param: float
The the sum of the squared unscaled Pearson residuals over the triangle's
degrees of freedom.
dof: int
Triangle degrees of freedom.
unscaled_residuals: pd.DataFrame
The unscaled residuals.
adjusted_residuals: pd.DataFrame
The adjusted residuals.
sampling_dist: np.ndarray
Same as ``adjusted_residuals`` but as a numpy array with NaN's and 0's
removed. None if ``parametric=True``.
fitted_tri_cum: pd.DataFrame
Cumulative triangle fit using backwards recursion.
fitted_tri_incr: pd.DataFrame
Incremental triangle fit using backwards recursion.
sims: int
Number of bootstrap iterations performed.
procdist: str
Distribution used to incorporate process variance. Currently "gamma" is
the only option.
parametric: bool
Whether parametric or non-parametric bootstrap was performed.
q: float or array_like of float
Quantiles over which to evaluate reserve distribution in summary output.
interpolation: {"linear", "lower", "higher", "midpoint", "nearest"}
Optional parameter which specifies the interpolation method to use
when the desired quantile lies between two data points i < j. See
``numpy.quantile`` for more information. Default value is "linear".
kwargs: dict
Additional keyword arguments passed into ``BootstrapChainLadder``'s
``__call__`` method.
"""
def __init__(self, summary, tri, ldfs, tail, trisqrd, reserve_dist, sims_data,
scale_param, dof, unscaled_residuals, adjusted_residuals,
sampling_dist, fitted_tri_cum, fitted_tri_incr, sims, procdist,
parametric, q, interpolation, **kwargs):
super().__init__(summary=summary, tri=tri, ldfs=ldfs, tail=tail,
trisqrd=trisqrd, process_error=None, parameter_error=None)
self.unscaled_residuals = unscaled_residuals
self.adjusted_residuals = adjusted_residuals
self.fitted_tri_incr = fitted_tri_incr
self.fitted_tri_cum = fitted_tri_cum
self.sampling_dist = sampling_dist
self.interpolation = interpolation
self.reserve_dist = reserve_dist
self.scale_param = scale_param
self.parametric = parametric
self.sims_data = sims_data
self.procdist = procdist
self.sims = sims
self.dof = dof
self.q = q
if kwargs is not None:
for kk in kwargs:
setattr(self, kk, kwargs[kk])
qtlsfields = [ii for ii in self.summary.columns if ii.endswith("%")]
self.qtlhdrs = {ii: "{:,.0f}".format for ii in qtlsfields}
self._summspecs.update(self.qtlhdrs)
# Properties.
self._residuals_detail = None
self._fit_assessment = None
self._origin_dist = None
self._agg_dist = None
@property
def origin_dist(self):
"""
Return distribution of bootstrapped ultimates/reserves by origin period.
Returns
-------
pd.DataFrame
"""
if self._origin_dist is None:
dist_columns = ["latest", "ultimate", "reserve"]
self._origin_dist = self.reserve_dist.groupby(
["sim", "origin"], as_index=False)[dist_columns].sum()
return(self._origin_dist)
@property
def residuals_detail(self):
"""
Summary statistics based on triangle residuals.
Returns
-------
pd.DataFrame
"""
if self._residuals_detail is None:
if not self.parametric:
unscaled = self.unscaled_residuals.values.ravel()
adjusted = self.adjusted_residuals.values.ravel()
unscaled = unscaled[~np.isnan(unscaled)]
adjusted = adjusted[~np.isnan(adjusted)]
unscaled = unscaled[unscaled != 0]
adjusted = adjusted[adjusted != 0]
unscaled_size = unscaled.size
unscaled_sum = unscaled.sum(axis=0)
unscaled_ssqr = np.sum(unscaled**2, axis=0)
unscaled_min = unscaled.min(axis=0)
unscaled_max = unscaled.max(axis=0)
unscaled_mean = unscaled.mean(axis=0)
unscaled_skew = stats.skew(unscaled, axis=0, nan_policy="omit")
unscaled_mode = stats.mode(unscaled, axis=0, nan_policy="omit").mode[0]
unscaled_cvar = stats.variation(unscaled, axis=0, nan_policy="omit")
unscaled_kurt = stats.kurtosis(unscaled, axis=0, nan_policy="omit")
unscaled_var = unscaled.var(ddof=1, axis=0)
unscaled_std = unscaled.std(ddof=1, axis=0)
unscaled_med = np.median(unscaled, axis=0)
adjusted_size = adjusted.size
adjusted_sum = adjusted.sum(axis=0)
adjusted_ssqr = np.sum(adjusted**2, axis=0)
adjusted_min = adjusted.min(axis=0)
adjusted_max = adjusted.max(axis=0)
adjusted_mean = adjusted.mean(axis=0)
adjusted_skew = stats.skew(adjusted, axis=0, nan_policy="omit")
adjusted_mode = stats.mode(adjusted, axis=0, nan_policy="omit").mode[0]
adjusted_cvar = stats.variation(adjusted, axis=0, nan_policy="omit")
adjusted_kurt = stats.kurtosis(adjusted, axis=0, nan_policy="omit")
adjusted_var = adjusted.var(ddof=1, axis=0)
adjusted_std = adjusted.std(ddof=1, axis=0)
adjusted_med = np.median(adjusted, axis=0)
self._residuals_detail = pd.DataFrame({
"unscaled": [
unscaled_size, unscaled_sum , unscaled_ssqr, unscaled_min,
unscaled_max, unscaled_mean, unscaled_skew, unscaled_mode,
unscaled_cvar, unscaled_kurt, unscaled_var , unscaled_std,
unscaled_med
],
"adjusted": [
adjusted_size, adjusted_sum , adjusted_ssqr, adjusted_min,
adjusted_max, adjusted_mean, adjusted_skew, adjusted_mode,
adjusted_cvar, adjusted_kurt, adjusted_var , adjusted_std,
adjusted_med
],
},
index=[
"size", "sum", "sum_of_squares", "minimum", "maximum", "mean",
"skew", "mode", "cov", "kurtosis", "variance",
"standard_deviation", "median"
]
)
return(self._residuals_detail)
def _bs_data_transform(self, qtls, qtlhdrs):
"""
Starts with BaseChainLadderResult's ``_data_transform``, and performs additional
pre-processing in order to generate plot of bootstrapped reserve ranges by
origin period.
Returns
-------
pd.DataFrame
"""
data0 = self._data_transform()
data0 = data0[data0["origin"] != "total"]
data1 = self._get_quantiles_by_devp(qtls, qtlhdrs)
data1 = data1[data1["origin"] != "total"]
data = data0.merge(data1, on=["origin", "dev"], how="left")
# Remove qtlhdrs values where rectype=="actual".
for qtlhdr in qtlhdrs:
data[qtlhdr] = np.where(
data["rectype"].values == "actual", np.NaN, data[qtlhdr].values
)
# Determine the first forecast period by origin, and set q-fields to actuals.
increment = np.unique(self.ldfs.index[1:] - self.ldfs.index[:-1])[0]
data["_ff"] = np.where(
data["rectype"].values == "forecast",
data["dev"].values, data["dev"].values.max() + increment
)
data["_minf"] = data.groupby(["origin"])["_ff"].transform("min")
for hdr in qtlhdrs:
data[hdr] = np.where(
np.logical_and(
data["rectype"].values == "forecast",
data["_minf"].values == data["dev"].values
), data["loss"].values, data[hdr].values
)
data = data.drop(["_ff", "_minf"], axis=1).reset_index(drop=True)
dfv = data[["origin", "dev", "rectype", "loss"]]
dfl = data[["origin", "dev", "rectype", qtlhdrs[0]]]
dfu = data[["origin", "dev", "rectype", qtlhdrs[-1]]]
dfl["rectype"] = qtlhdrs[0]
dfl = dfl.rename({qtlhdrs[0]: "loss"}, axis=1)
dfu["rectype"] = qtlhdrs[-1]
dfu = dfu.rename({qtlhdrs[-1]: "loss"}, axis=1)
return(pd.concat([dfv, dfl, dfu]).sort_index().reset_index(drop=True))
def _get_quantiles_by_devp(self, qtls, qtlhdrs):
"""
Get quantile of boostrapped reserve distribution for an individual origin
period and in total.
Parameters
----------
q: array_like
A length-2 sequence representing to upper and lower bounds of
the estimated reserve distribution.
Returns
-------
pd.DataFrame
"""
dfsims = self.sims_data[["origin", "dev", "ultimate"]]
dfults = dfsims[dfsims.dev == dfsims.dev.max()].reset_index(drop=True)
dev_increment = np.unique(self.ldfs.index[1:] - self.ldfs.index[:-1])[0]
dfults["dev"] = self.ldfs.index.max() + dev_increment
dfsims = pd.concat([dfsims, dfults])
dftotal_keys = dfsims[dfsims.origin == dfsims.origin.min()][["origin", "dev"]].drop_duplicates()
dftotal_keys["origin"] = "total"
dfqtls_keys = pd.concat(
[dfsims[["origin", "dev"]].drop_duplicates(), dftotal_keys]
).reset_index(drop=True
)
# Get total reserve across all origin periods.
dftotal = dfsims.copy(deep=True)
dftotal["origin"] = "total"
dftotal = dftotal.groupby(["origin", "dev"], as_index=False)
dflist = []
for ii, jj in zip(qtls, qtlhdrs):
dfqtl = dfsims.groupby(["origin", "dev"], as_index=False).aggregate(
"quantile", q=ii, interpolation="linear").rename(
{"ultimate": jj}, axis=1
)
dftotal_qtl = dftotal.aggregate(
"quantile", q=ii, interpolation="linear").rename({"ultimate": jj},
axis=1
)
dflist.append(pd.concat([dfqtl, dftotal_qtl]))
# Combine DataFrames in dflist into single table.
dfqtls = functools.reduce(
lambda df1, df2: df1.merge(df2, on=["origin", "dev"], how="left"),
dflist, dfqtls_keys).reset_index(drop=True)
return(dfqtls)
def get_quantiles(self, q, interpolation="linear", lb=None):
"""
Get quantiles of bootstrapped reserve distribution for an individual origin
periods and in total. Returns a DataFrame, with columns representing the
percentiles of interest.
Parameters
----------
q: array_like of float or float
Quantile or sequence of quantiles to compute, which must be between 0 and 1
inclusive.
interpolation: {"linear", "lower", "higher", "midpoint", "nearest"}
Optional parameter which specifies the interpolation method to use
when the desired quantile lies between two data points i < j. See
``numpy.quantile`` for more information. Default value is "linear".
lb: float
Lower bound of simulated values. If ``lb`` is not None, quantiles less
than ``lb`` will be set to ``lb``. To eliminate negative quantiles,
set ``lb=0``.
Returns
-------
pd.DataFrame
"""
qarr = np.asarray(q, dtype=float)
if np.any(np.logical_and(qarr > 1, qarr < 0)):
raise ValueError("q values must fall within [0, 1].")
else:
qtls, qtlhdrs = self._qtls_formatter(q=q)
qtl_pairs = [(qtlhdrs[ii], qtls[ii]) for ii in range(len(qtls))]
dqq = {
str(ii[0]): [
np.percentile(
self.reserve_dist[self.reserve_dist.origin == origin]["reserve"].values,
100 * ii[-1], interpolation=interpolation
) for origin in self.summary.index] for ii in qtl_pairs
}
dfqq = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""System operating cost plots.
This module plots figures related to the cost of operating the power system.
Plots can be broken down by cost categories, generator types etc.
@author: <NAME>
"""
import logging
import pandas as pd
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""production_cost MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The production_cost.py module contains methods that are
related related to the cost of operating the power system.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
def prod_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Plots total system net revenue and cost normalized by the installed capacity of the area.
Total revenue is made up of reserve and energy revenues which are displayed in a stacked
bar plot with total generation cost. Net revensue is represented by a dot.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Total_Generation_Cost", self.Scenarios),
(True, "generator_Pool_Revenue", self.Scenarios),
(True, "generator_Reserves_Revenue", self.Scenarios),
(True, "generator_Installed_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Installed_Capacity = self["generator_Installed_Capacity"].get(scenario)
#Check if zone has installed generation, if not skips
try:
Total_Installed_Capacity = Total_Installed_Capacity.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No installed capacity in : {zone_input}")
continue
Total_Installed_Capacity = self.df_process_gen_inputs(Total_Installed_Capacity)
Total_Installed_Capacity.reset_index(drop=True, inplace=True)
Total_Installed_Capacity = Total_Installed_Capacity.iloc[0]
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
Total_Gen_Cost = self.df_process_gen_inputs(Total_Gen_Cost)
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)*-1
# Total_Gen_Cost = Total_Gen_Cost/Total_Installed_Capacity #Change to $/MW-year
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Pool_Revenues = self["generator_Pool_Revenue"].get(scenario)
Pool_Revenues = Pool_Revenues.xs(zone_input,level=self.AGG_BY)
Pool_Revenues = self.df_process_gen_inputs(Pool_Revenues)
Pool_Revenues = Pool_Revenues.sum(axis=0)
# Pool_Revenues = Pool_Revenues/Total_Installed_Capacity #Change to $/MW-year
Pool_Revenues.rename("Energy_Revenues", inplace=True)
### Might change to Net Reserve Revenue at later date
Reserve_Revenues = self["generator_Reserves_Revenue"].get(scenario)
Reserve_Revenues = Reserve_Revenues.xs(zone_input,level=self.AGG_BY)
Reserve_Revenues = self.df_process_gen_inputs(Reserve_Revenues)
Reserve_Revenues = Reserve_Revenues.sum(axis=0)
# Reserve_Revenues = Reserve_Revenues/Total_Installed_Capacity #Change to $/MW-year
Reserve_Revenues.rename("Reserve_Revenues", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost,
Pool_Revenues, Reserve_Revenues],
axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost = Total_Systems_Cost.sum(axis=0)
Total_Systems_Cost = Total_Systems_Cost.rename(scenario)
total_cost_chunk.append(Total_Systems_Cost)
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=1, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out.T
Total_Systems_Cost_Out.index = Total_Systems_Cost_Out.index.str.replace('_',' ')
# Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000 #Change to $/kW-year
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1e6 #Convert cost to millions
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
Net_Revenue = Total_Systems_Cost_Out.sum(axis=1)
#Checks if Net_Revenue contains data, if not skips zone and does not return a plot
if Net_Revenue.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# Data table of values to return to main program
Data_Table_Out = Total_Systems_Cost_Out.add_suffix(" (Million $)")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Systems_Cost_Out.index
mplt.barplot(Total_Systems_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.plot(Net_Revenue.index, Net_Revenue.values,
color='black', linestyle='None', marker='o',
label='Net Revenue')
ax.set_ylabel('Total System Net Rev, Rev, & Cost ($/KW-yr)', color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def sys_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates a stacked bar plot of Total Generation Cost and Cost of Unserved Energy.
Plot only shows totals and is NOT broken down into technology or cost type
specific values.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Total_Generation_Cost",self.Scenarios),
(False,f"{agg}_Cost_Unserved_Energy",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
try:
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Cost_Unserved_Energy = self[f"{agg}_Cost_Unserved_Energy"][scenario]
if Cost_Unserved_Energy.empty:
Cost_Unserved_Energy = self["generator_Total_Generation_Cost"][scenario].copy()
Cost_Unserved_Energy.iloc[:,0] = 0
Cost_Unserved_Energy = Cost_Unserved_Energy.xs(zone_input,level=self.AGG_BY)
Cost_Unserved_Energy = Cost_Unserved_Energy.sum(axis=0)
Cost_Unserved_Energy.rename("Cost_Unserved_Energy", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost, Cost_Unserved_Energy],
axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost.rename({0:scenario}, axis='index', inplace=True)
total_cost_chunk.append(Total_Systems_Cost)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not total_cost_chunk:
outputs[zone_input] = MissingZoneData()
continue
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=0, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000000 #Convert cost to millions
Total_Systems_Cost_Out.index = Total_Systems_Cost_Out.index.str.replace('_',' ')
#Checks if Total_Systems_Cost_Out contains data, if not skips zone and does not return a plot
if Total_Systems_Cost_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
# Data table of values to return to main program
Data_Table_Out = Total_Systems_Cost_Out.add_suffix(" (Million $)")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Systems_Cost_Out.index
mplt.barplot(Total_Systems_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.set_ylabel('Total System Cost (Million $)',
color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
cost_totals = Total_Systems_Cost_Out.sum(axis=1) #holds total of each bar
#inserts values into bar stacks
for patch in ax.patches:
width, height = patch.get_width(), patch.get_height()
if height<=1:
continue
x, y = patch.get_xy()
ax.text(x+width/2,
y+height/2,
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=12)
#inserts total bar value above each bar
for k, patch in enumerate(ax.patches):
height = cost_totals[k]
width = patch.get_width()
x, y = patch.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=15, color='red')
if k>=len(cost_totals)-1:
break
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def detailed_gen_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates stacked bar plot of total generation cost by cost type (fuel, emission, start cost etc.)
Creates a more deatiled system cost plot.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Fuel_Cost",self.Scenarios),
(True,"generator_VO&M_Cost",self.Scenarios),
(True,"generator_Start_&_Shutdown_Cost",self.Scenarios),
(False,"generator_Emissions_Cost",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"Zone = {zone_input}")
gen_cost_out_chunks = []
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Fuel_Cost = self["generator_Fuel_Cost"].get(scenario)
# Check if Fuel_cost contains zone_input, skips if not
try:
Fuel_Cost = Fuel_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for: {zone_input}")
continue
Fuel_Cost = Fuel_Cost.sum(axis=0)
Fuel_Cost.rename("Fuel_Cost", inplace=True)
VOM_Cost = self["generator_VO&M_Cost"].get(scenario)
VOM_Cost = VOM_Cost.xs(zone_input,level=self.AGG_BY)
VOM_Cost[0].values[VOM_Cost[0].values < 0] = 0
VOM_Cost = VOM_Cost.sum(axis=0)
VOM_Cost.rename("VO&M_Cost", inplace=True)
Start_Shutdown_Cost = self["generator_Start_&_Shutdown_Cost"].get(scenario)
Start_Shutdown_Cost = Start_Shutdown_Cost.xs(zone_input,level=self.AGG_BY)
Start_Shutdown_Cost = Start_Shutdown_Cost.sum(axis=0)
Start_Shutdown_Cost.rename("Start_&_Shutdown_Cost", inplace=True)
Emissions_Cost = self["generator_Emissions_Cost"][scenario]
if Emissions_Cost.empty:
self.logger.warning(f"generator_Emissions_Cost not included in {scenario} results, Emissions_Cost will not be included in plot")
Emissions_Cost = self["generator_Start_&_Shutdown_Cost"][scenario].copy()
Emissions_Cost.iloc[:,0] = 0
Emissions_Cost = Emissions_Cost.xs(zone_input,level=self.AGG_BY)
Emissions_Cost = Emissions_Cost.sum(axis=0)
Emissions_Cost.rename("Emissions_Cost", inplace=True)
Detailed_Gen_Cost = pd.concat([Fuel_Cost, VOM_Cost, Start_Shutdown_Cost, Emissions_Cost], axis=1, sort=False)
Detailed_Gen_Cost.columns = Detailed_Gen_Cost.columns.str.replace('_',' ')
Detailed_Gen_Cost = Detailed_Gen_Cost.sum(axis=0)
Detailed_Gen_Cost = Detailed_Gen_Cost.rename(scenario)
gen_cost_out_chunks.append(Detailed_Gen_Cost)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not gen_cost_out_chunks:
outputs[zone_input] = MissingZoneData()
continue
Detailed_Gen_Cost_Out = pd.concat(gen_cost_out_chunks, axis=1, sort=False)
Detailed_Gen_Cost_Out = Detailed_Gen_Cost_Out.T/1000000 #Convert cost to millions
Detailed_Gen_Cost_Out.index = Detailed_Gen_Cost_Out.index.str.replace('_',' ')
# Deletes columns that are all 0
Detailed_Gen_Cost_Out = Detailed_Gen_Cost_Out.loc[:, (Detailed_Gen_Cost_Out != 0).any(axis=0)]
# Checks if Detailed_Gen_Cost_Out contains data, if not skips zone and does not return a plot
if Detailed_Gen_Cost_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
# Data table of values to return to main program
Data_Table_Out = Detailed_Gen_Cost_Out.add_suffix(" (Million $)")
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Detailed_Gen_Cost_Out.index
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
mplt.barplot(Detailed_Gen_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.axhline(y=0)
ax.set_ylabel('Total Generation Cost (Million $)',
color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
cost_totals = Detailed_Gen_Cost_Out.sum(axis=1) #holds total of each bar
#inserts values into bar stacks
for patch in ax.patches:
width, height = patch.get_width(), patch.get_height()
if height<=2:
continue
x, y = patch.get_xy()
ax.text(x+width/2,
y+height/2,
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=12)
#inserts total bar value above each bar
for k, patch in enumerate(ax.patches):
height = cost_totals[k]
width = patch.get_width()
x, y = patch.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=15, color='red')
if k>=len(cost_totals)-1:
break
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def sys_cost_type(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates stacked bar plot of total generation cost by generator technology type.
Another way to represent total generation cost, this time by tech type,
i.e Coal, Gas, Hydro etc.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
# Create Dictionary to hold Datframes for each scenario
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Total_Generation_Cost",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
gen_cost_out_chunks = []
self.logger.info(f"Zone = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Gen_Stack = self["generator_Total_Generation_Cost"].get(scenario)
# Check if Total_Gen_Stack contains zone_input, skips if not
try:
Total_Gen_Stack = Total_Gen_Stack.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Stack = self.df_process_gen_inputs(Total_Gen_Stack)
Total_Gen_Stack = Total_Gen_Stack.sum(axis=0)
Total_Gen_Stack.rename(scenario, inplace=True)
gen_cost_out_chunks.append(Total_Gen_Stack)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not gen_cost_out_chunks:
outputs[zone_input] = MissingZoneData()
continue
Total_Generation_Stack_Out = pd.concat(gen_cost_out_chunks, axis=1, sort=False).fillna(0)
Total_Generation_Stack_Out = self.create_categorical_tech_index(Total_Generation_Stack_Out)
Total_Generation_Stack_Out = Total_Generation_Stack_Out.T/1000000 #Convert to millions
Total_Generation_Stack_Out = Total_Generation_Stack_Out.loc[:, (Total_Generation_Stack_Out != 0).any(axis=0)]
# Checks if Total_Generation_Stack_Out contains data, if not skips zone and does not return a plot
if Total_Generation_Stack_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
if | pd.notna(custom_data_file_path) | pandas.notna |
import pandas as pd
import numpy as np
from random import randrange
from datetime import date,timedelta
def random_date(start, end):
"""
This function returns a random datetime between two datetime
objects
"""
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = randrange(int_delta)
return start + timedelta(seconds=random_second)
def create_random_data_set(timeframe, num_clients, n, num_products, avg_sale):
"""
create_random_data_set simulates a sample to set to play with. The function replicates
data based on given parameters
"""
frame_out = pd.DataFrame(index=range(0, n))
frame_out['sales'] = np.random.rand(n, 1) * avg_sale
frame_out['date'] = [random_date(pd.to_datetime(timeframe[0]), pd.to_datetime(timeframe[1])) for i in range(n)]
frame_out['client_id'] = [np.random.randint(0, num_clients) for i in range(n)]
frame_out['product_id'] = [np.random.randint(0, num_products) for i in range(n)]
frame_out['client_name'] = 'Generic name'
frame_out = frame_out.sort_values('date')
return frame_out
def assign_segment(frame_in):
"""
assign_segment performs propietary algortihm to assign a meaningful segment to each client
according to their customer behavior
:param
frame_in: Pandas DataFrame object with RFM tags
:return:
frame_out: pandas DataFrame with client_id and assigned segment
"""
segment_names = [name + str(i) for i, name in enumerate(['segment_'] * 9)]
frame_out = pd.DataFrame(list(frame_in['client_id'].unique()), columns=['client_id'])
frame_out['segment'] = np.random.choice(segment_names, len(frame_in['client_id'].unique()))
return pd.merge(frame_in, frame_out, on='client_id')
def run_RFM_analysis(frame, n_groups, alpha):
"""
run_RFM_analysis performs basic analysis in a two stage process
:param
frame: Pandas DataFrame with core client info.
Columns are: (sales,date,etc,etc)
:return:
scores
"""
scores = create_scores(frame, n_groups, alpha)
scores = assign_segment(scores)
other_vars = create_other_vars(frame)
return pd.merge(scores, other_vars, on='client_id', how='inner', validate='1:1')
def create_other_vars(frame_in):
other_vars = frame_in.groupby('client_id').sum()['sales'].to_frame(name='sales')
other_vars.reset_index(inplace=True)
return other_vars
def create_scores(frame_in, groups, weights):
"""
create_scores creates RFM scores for sales date (frame_in)
:param
frame_in: Pandas DataFrame with core client info
:return:
scores:
"""
today = pd.to_datetime(date.today())
first_date = frame_in.groupby('client_id').min()['date'].to_frame(name='first_purchase')
last_date = frame_in.groupby('client_id').max()['date'].to_frame(name='last_purchase')
time_since_last = (today - last_date['last_purchase']).apply(lambda x: int(x.days / 30)).to_frame(
name='months_since_last')
# Verify calculation
recency = (today - last_date).apply(lambda x: int(x[0].days / 30), axis=1).to_frame(name='recency')
age = (today - first_date).apply(lambda x: int(x[0].days / 30), axis=1).to_frame(name='age')
monetary = frame_in.groupby('client_id').max()['sales'].to_frame(name='monetary')
# products = frame_in.groupby('client_id').agg({'product_id':np.size})['product_id'].to_frame(name='products')
frequency = (((today - first_date).apply(lambda x: int(x[0].days / 30), axis=1)) / (
frame_in.groupby('client_id').size())).to_frame(name='frequency')
scores = pd.concat([recency, frequency, monetary, age], axis=1).apply(
lambda x: pd.qcut(x, q=groups, labels=[i for i in range(1, groups + 1)], duplicates='raise').astype(int),
axis=0)
metrics = pd.concat([recency, frequency, monetary, age], axis=1)
metrics.columns = [col + '_value' for col in metrics.columns]
scores = | pd.concat([scores, metrics], axis=1) | pandas.concat |
import pandas as pd
data_from_db = '../data/from_db/'
cleaned_data_path = '../data/cleaned/'
def print_summary(name, df):
print(f'\n\n=============={name}==============\n\n')
print(df.head())
print(f'\nWymiary df: {df.shape}')
print(f'Rozmiar danych:')
df.info(memory_usage='deep')
def data_mining(name, df):
print(f'\n\n==============OCZYSZCZANIE TABELI {name}==============\n\n')
if name == 'projects':
df.drop(['deleted', 'ext_ref_id', 'url', 'owner_id', 'description', 'forked_from'],
axis=1, inplace=True)
df.dropna(subset=['language'], how='any', inplace=True)
df['language'] = df['language'].astype('category')
df['created_at'] = df['created_at'].astype('datetime64[ns]')
df.rename(columns={'id': 'project_id'}, inplace=True)
elif name == 'commits':
df.drop(['sha', 'author_id', 'ext_ref_id'], axis=1, inplace=True)
projects = pd.read_pickle(cleaned_data_path + 'projects.pkl')
df = df[df['project_id'].isin(projects['project_id'])].copy()
df.rename(columns={'id': 'commit_id'}, inplace=True)
elif name == 'commit_comments':
df.drop(['user_id', 'line', 'position', 'ext_ref_id', 'comment_id', 'body'],
axis=1, inplace=True)
commits = | pd.read_pickle(cleaned_data_path + 'commits.pkl') | pandas.read_pickle |
from kivy.config import Config
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.popup import Popup
from kivy.uix.label import Label
import matplotlib.pyplot as plt
import pandas as pd
from multiprocessing import Process
class ActiveGridLayout(GridLayout):
gold_model = ''
active_result = []
@staticmethod
def get_path():
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
return filedialog.askopenfilename()
@staticmethod
def visualize_dataframe(filename):
import PySimpleGUI as sg
# Header=None means you directly pass the columns names to the dataframe
df = | pd.read_csv(filename, sep=',', engine='python', header=None) | pandas.read_csv |
"""
Limited dependent variable and qualitative variables.
Includes binary outcomes, count data, (ordered) ordinal data and limited
dependent variables.
General References
--------------------
<NAME> and <NAME>. `Regression Analysis of Count Data`.
Cambridge, 1998
<NAME>. `Limited-Dependent and Qualitative Variables in Econometrics`.
Cambridge, 1983.
<NAME>. `Econometric Analysis`. Prentice Hall, 5th. edition. 2003.
"""
__all__ = ["Poisson", "Logit", "Probit", "MNLogit", "NegativeBinomial",
"GeneralizedPoisson", "NegativeBinomialP", "CountModel"]
from statsmodels.compat.pandas import Appender
import warnings
import numpy as np
from pandas import MultiIndex, get_dummies
from scipy import special, stats
from scipy.special import digamma, gammaln, loggamma, polygamma
from scipy.stats import nbinom
from statsmodels.base.data import handle_data # for mnlogit
from statsmodels.base.l1_slsqp import fit_l1_slsqp
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.distributions import genpoisson_p
import statsmodels.regression.linear_model as lm
from statsmodels.tools import data as data_tools, tools
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.numdiff import approx_fprime_cs
from statsmodels.tools.sm_exceptions import (
PerfectSeparationError,
SpecificationWarning,
)
try:
import cvxopt # noqa:F401
have_cvxopt = True
except ImportError:
have_cvxopt = False
# TODO: When we eventually get user-settable precision, we need to change
# this
FLOAT_EPS = np.finfo(float).eps
# Limit for exponentials to avoid overflow
EXP_UPPER_LIMIT = np.log(np.finfo(np.float64).max) - 1.0
# TODO: add options for the parameter covariance/variance
# ie., OIM, EIM, and BHHH see Green 21.4
_discrete_models_docs = """
"""
_discrete_results_docs = """
%(one_line_description)s
Parameters
----------
model : A DiscreteModel instance
params : array_like
The parameters of a fitted model.
hessian : array_like
The hessian of the fitted model.
scale : float
A scale parameter for the covariance matrix.
Attributes
----------
df_resid : float
See model definition.
df_model : float
See model definition.
llf : float
Value of the loglikelihood
%(extra_attr)s"""
_l1_results_attr = """ nnz_params : int
The number of nonzero parameters in the model. Train with
trim_params == True or else numerical error will distort this.
trimmed : bool array
trimmed[i] == True if the ith parameter was trimmed from the model."""
_get_start_params_null_docs = """
Compute one-step moment estimator for null (constant-only) model
This is a preliminary estimator used as start_params.
Returns
-------
params : ndarray
parameter estimate based one one-step moment matching
"""
_check_rank_doc = """
check_rank : bool
Check exog rank to determine model degrees of freedom. Default is
True. Setting to False reduces model initialization time when
exog.shape[1] is large.
"""
# helper for MNLogit (will be generally useful later)
def _numpy_to_dummies(endog):
if endog.ndim == 2 and endog.dtype.kind not in ["S", "O"]:
endog_dummies = endog
ynames = range(endog.shape[1])
else:
dummies = | get_dummies(endog, drop_first=False) | pandas.get_dummies |
#get ap original information which will be exported to apinfo.csv
#get name and serial infomation, add nessisary columns which renaming workflow needs, also change the ap_name as site+"AP"+model+number, the info will be exported to csv_file.csv.
import http.client
import pandas as pd
import json
import pprint as pp
conn = http.client.HTTPSConnection("internal-apigw.central.arubanetworks.com")
payload = ''
#change the below access_token
headers = {
'Authorization': '<access_token>'
}
conn.request("GET", "/monitoring/v1/aps", payload, headers)
res = conn.getresponse()
data = res.read()
data_json=json.loads(data)
df = pd.DataFrame(data_json["aps"])
df[['name','serial']].to_csv("apinfo.csv")
df['achannel']=""
df['atxpower']=""
df['gtxpower']=""
df['gchannel']=""
df['dot11a_radio_disable']=""
df['dot11g_radio_disable']=""
df['usb_port_disable']=""
df['zonename']=""
ap_count=int( | pd.DataFrame(data_json) | pandas.DataFrame |
import pandas as pd
from statsmodels.distributions.empirical_distribution import ECDF
from statsmodels.stats.multitest import multipletests
if __name__ == '__main__':
cov_sig = | pd.read_csv(snakemake.input[0], sep="\t", index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Imersao_dados.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/11hUX4kVtP3llYH7c83fiSCeCtP8MGTm0
"""
import pandas as pd
import matplotlib.pyplot as plt
url_date = "https://github.com/alura-cursos/imersaodados3/blob/main/dados/dados_experimentos.zip?raw=true"
dados = pd.read_csv(url_date, compression= 'zip')
"""#AULA 1: Análise de dados, python, pandas e novos fármacos
"""
dados
"""## Desafio 2: 5 ultimos"""
dados.tail()
"""## Desafio 3: % de com droga e com controle (como eu fiz)
"""
com_droga = (list(dados['tratamento'].value_counts())[0]/dados['tratamento'].count())*100
com_controle = (list(dados['tratamento'].value_counts())[1]/dados['tratamento'].count())*100
print(f"Com droga {com_droga:.2f} %")
print(f"Com controle {com_controle:.2f} %")
"""##eles"""
dados['tratamento'].value_counts(normalize=True)
"""## Desafio 4: Quant. de classes"""
print(len(dados['droga'].unique()))
dados['droga'].value_counts().count()
dados['tratamento'].value_counts().plot(kind= 'pie')
plt.title('TRATAMENTO')
plt.show()
dados['dose'].value_counts().plot(kind='pie')
plt.title('DOSE')
plt.show()
dados['tempo'].value_counts(normalize=True)
"""### Desafio 7: Titulo no grafico"""
dados['tempo'].value_counts().plot(kind='pie')
plt.title('TEMPO')
plt.show()
dados['tempo'].value_counts().plot(kind='bar')
plt.title('TEMPO')
plt.show()
"""## Mascara"""
dados[dados['g-0']> 0] # remove valor < 0
"""## Desafio 5: Nomear colunas"""
new = list()
old = list()
for dt in dados.columns:
if "-" in dt:
old.append(dt)
new.append(dt.replace("-", ""))
else:
old.append(dt)
new.append(dt)
dados.columns = new
dados.head()
map = {'droga': 'composto'}
dados.rename(columns=map, inplace=True)
dados
cod_composto = dados['composto'].value_counts().index[:5]
"""## VAR out usar @"""
dados.query('composto in @cod_composto')
import seaborn as sns
plt.figure(figsize=(10, 8))
sns.set()
ax = sns.countplot(x = 'composto', data=dados.query('composto in @cod_composto'), order=cod_composto)
ax.set(xlabel='Nome dos compostos', ylabel='Quantidade de ocorrẽncia')
ax.set_title("Top 5 compostos", fontsize=30)
plt.show()
print("Total", len(dados['g0'].unique()))
print("Max", dados['g0'].min())
print("Min", dados['g0'].max())
"""# AULA 2: Estatísticas, visualização de dados e distribuições"""
dados['g0'].describe()
dados['g0'].hist(bins= 80)
dados['g19'].hist(bins= 80)
dados
dados.loc[:,'g0':'g771'].describe()
dados.loc[:,'g0':'g771'].describe().T['mean'].hist(bins=50)
dados.loc[:,'g0':'g771'].describe().T['min'].hist(bins=50)
dados.loc[:,'g0':'g771'].describe().T['max'].hist(bins=50)
dados.loc[:,'g0':'g771'].describe().T['std'].hist(bins=50)
dados.loc[:,'g0':'g771'].describe().T['50%'].hist(bins=50)
sns.boxplot(x = 'g0', data=dados)
sns.boxplot(x = 'g0', y = 'tratamento', data=dados)
plt.figure(figsize=(10,10))
sns.boxplot(x = 'tratamento', y = 'g0', data=dados)
"""#AULA 3: Correlações, causalidade e relações entre genes."""
pd.crosstab([dados['dose'], dados['tempo']], dados['tratamento'], normalize='index')
pd.crosstab([dados['dose'], dados['tempo']], dados['tratamento'], normalize='index', values=dados['g0'], aggfunc='mean')
"""## Desafio 2: Normalizar columns"""
| pd.crosstab([dados['dose'], dados['tempo']], dados['tratamento'], normalize='columns', values=dados['g0'], aggfunc='mean') | pandas.crosstab |
"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from pandas._libs.tslibs import timezones
from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import pandas._testing as tm
from pandas.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=pd.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
date_range(start=pd.NaT, end="2016-01-01", freq="D")
def test_date_range_multiplication_overflow(self):
# GH#24255
# check that overflows in calculating `addend = periods * stride`
# are caught
with tm.assert_produces_warning(None):
# we should _not_ be seeing a overflow RuntimeWarning
dti = date_range(start="1677-09-22", periods=213503, freq="D")
assert dti[0] == Timestamp("1677-09-22")
assert len(dti) == 213503
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("1969-05-04", periods=200000000, freq="30000D")
def test_date_range_unsigned_overflow_handling(self):
# GH#24255
# case where `addend = periods * stride` overflows int64 bounds
# but not uint64 bounds
dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
dti2 = date_range(start=dti[0], periods=len(dti), freq="D")
assert dti2.equals(dti)
dti3 = date_range(end=dti[-1], periods=len(dti), freq="D")
assert dti3.equals(dti)
def test_date_range_int64_overflow_non_recoverable(self):
# GH#24255
# case with start later than 1970-01-01, overflow int64 but not uint64
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start="1970-02-01", periods=106752 * 24, freq="H")
# case with end before 1970-01-01, overflow int64 but not uint64
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1969-11-14", periods=106752 * 24, freq="H")
def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
# cases where stride * periods overflow int64 and stride/endpoint
# have different signs
start = Timestamp("2262-02-23")
end = Timestamp("1969-11-14")
expected = date_range(start=start, end=end, freq="-1H")
assert expected[0] == start
assert expected[-1] == end
dti = date_range(end=end, periods=len(expected), freq="-1H")
tm.assert_index_equal(dti, expected)
start2 = Timestamp("1970-02-01")
end2 = Timestamp("1677-10-22")
expected2 = date_range(start=start2, end=end2, freq="-1H")
assert expected2[0] == start2
assert expected2[-1] == end2
dti2 = date_range(start=start2, periods=len(expected2), freq="-1H")
tm.assert_index_equal(dti2, expected2)
def test_date_range_out_of_bounds(self):
# GH#14187
msg = "Cannot generate range"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("2016-01-01", periods=100000, freq="D")
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1763-10-12", periods=100000, freq="D")
def test_date_range_gen_error(self):
rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min")
assert len(rng) == 4
@pytest.mark.parametrize("freq", ["AS", "YS"])
def test_begin_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
freq=freq,
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["A", "Y"])
def test_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["BA", "BY"])
def test_business_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq(self):
# GH 11018
rng = date_range("2011-12-31", freq="-2A", periods=3)
exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2A"
rng = date_range("2011-01-31", freq="-2M", periods=3)
exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2M"
def test_date_range_bms_bug(self):
# #1645
rng = date_range("1/1/2000", periods=10, freq="BMS")
ex_first = Timestamp("2000-01-03")
assert rng[0] == ex_first
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq="2D")
offset = timedelta(2)
values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)
tm.assert_index_equal(rng, values)
rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B")
the_time = time(8, 15)
for val in rng:
assert val.time() == the_time
def test_date_range_fy5252(self):
dr = date_range(
start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"),
)
assert dr[0] == Timestamp("2013-01-31")
assert dr[1] == Timestamp("2014-01-30")
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start, end, periods=10, freq="s")
def test_date_range_convenience_periods(self):
# GH 20808
result = date_range("2018-04-24", "2018-04-27", periods=3)
expected = DatetimeIndex(
["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"],
freq=None,
)
tm.assert_index_equal(result, expected)
# Test if spacing remains linear if tz changes to dst in range
result = date_range(
"2018-04-01 01:00:00",
"2018-04-01 04:00:00",
tz="Australia/Sydney",
periods=3,
)
expected = DatetimeIndex(
[
Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"),
Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"),
Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"),
]
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start,end,result_tz",
[
["20180101", "20180103", "US/Eastern"],
[datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"],
[Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
"US/Eastern",
],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
None,
],
],
)
def test_date_range_linspacing_tz(self, start, end, result_tz):
# GH 20983
result = date_range(start, end, periods=3, tz=result_tz)
expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_date_range_businesshour(self):
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH")
rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
"2014-07-08 11:00",
"2014-07-08 12:00",
"2014-07-08 13:00",
"2014-07-08 14:00",
"2014-07-08 15:00",
"2014-07-08 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
def test_range_misspecified(self):
# GH #1095
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(periods=10)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(periods=10, freq="H")
with pytest.raises(ValueError, match=msg):
date_range()
def test_compat_replace(self):
# https://github.com/statsmodels/statsmodels/issues/3349
# replace should take ints/longs for compat
result = date_range(
Timestamp("1960-04-01 00:00:00", freq="QS-JAN"), periods=76, freq="QS-JAN"
)
assert len(result) == 76
def test_catch_infinite_loop(self):
offset = offsets.DateOffset(minute=5)
# blow up, don't loop forever
msg = "Offset <DateOffset: minute=5> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset)
@pytest.mark.parametrize("periods", (1, 2))
def test_wom_len(self, periods):
# https://github.com/pandas-dev/pandas/issues/20517
res = date_range(start="20110101", periods=periods, freq="WOM-1MON")
assert len(res) == periods
def test_construct_over_dst(self):
# GH 20854
pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=True
)
pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=False
)
expect_data = [
Timestamp("2010-11-07 00:00:00", tz="US/Pacific"),
pre_dst,
pst_dst,
]
expected = DatetimeIndex(expect_data, freq="H")
result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific")
tm.assert_index_equal(result, expected)
def test_construct_with_different_start_end_string_format(self):
# GH 12064
result = date_range(
"2013-01-01 00:00:00+09:00", "2013/01/01 02:00:00+09:00", freq="H"
)
expected = DatetimeIndex(
[
Timestamp("2013-01-01 00:00:00+09:00"),
Timestamp("2013-01-01 01:00:00+09:00"),
Timestamp("2013-01-01 02:00:00+09:00"),
],
freq="H",
)
tm.assert_index_equal(result, expected)
def test_error_with_zero_monthends(self):
msg = r"Offset <0 \* MonthEnds> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0))
def test_range_bug(self):
# GH #770
offset = DateOffset(months=3)
result = date_range("2011-1-1", "2012-1-31", freq=offset)
start = datetime(2011, 1, 1)
expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset)
tm.assert_index_equal(result, expected)
def test_range_tz_pytz(self):
# see gh-2906
tz = timezone("US/Eastern")
start = tz.localize(datetime(2011, 1, 1))
end = tz.localize(datetime(2011, 1, 3))
dr = date_range(start=start, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize(
"start, end",
[
[
Timestamp(datetime(2014, 3, 6), tz="US/Eastern"),
Timestamp(datetime(2014, 3, 12), tz="US/Eastern"),
],
[
Timestamp(datetime(2013, 11, 1), tz="US/Eastern"),
Timestamp(datetime(2013, 11, 6), tz="US/Eastern"),
],
],
)
def test_range_tz_dst_straddle_pytz(self, start, end):
dr = date_range(start, end, freq="D")
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(start, end, freq="D", tz="US/Eastern")
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(
start.replace(tzinfo=None),
end.replace(tzinfo=None),
freq="D",
tz="US/Eastern",
)
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
def test_range_tz_dateutil(self):
# see gh-2906
# Use maybe_get_tz to fix filename in tz under dateutil.
from pandas._libs.tslibs.timezones import maybe_get_tz
tz = lambda x: maybe_get_tz("dateutil/" + x)
start = datetime(2011, 1, 1, tzinfo=tz("US/Eastern"))
end = datetime(2011, 1, 3, tzinfo=tz("US/Eastern"))
dr = date_range(start=start, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"])
def test_range_closed(self, freq):
begin = datetime(2011, 1, 1)
end = datetime(2014, 1, 1)
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
def test_range_closed_with_tz_aware_start_end(self):
# GH12409, GH12684
begin = Timestamp("2011/1/1", tz="US/Eastern")
end = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
begin = Timestamp("2011/1/1")
end = Timestamp("2014/1/1")
begintz = Timestamp("2011/1/1", tz="US/Eastern")
endtz = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq, tz="US/Eastern")
left = date_range(begin, end, closed="left", freq=freq, tz="US/Eastern")
right = date_range(begin, end, closed="right", freq=freq, tz="US/Eastern")
expected_left = left
expected_right = right
if endtz == closed[-1]:
expected_left = closed[:-1]
if begintz == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
@pytest.mark.parametrize("closed", ["right", "left", None])
def test_range_closed_boundary(self, closed):
# GH#11804
right_boundary = date_range(
"2015-09-12", "2015-12-01", freq="QS-MAR", closed=closed
)
left_boundary = date_range(
"2015-09-01", "2015-09-12", freq="QS-MAR", closed=closed
)
both_boundary = date_range(
"2015-09-01", "2015-12-01", freq="QS-MAR", closed=closed
)
expected_right = expected_left = expected_both = both_boundary
if closed == "right":
expected_left = both_boundary[1:]
if closed == "left":
expected_right = both_boundary[:-1]
if closed is None:
expected_right = both_boundary[1:]
expected_left = both_boundary[:-1]
tm.assert_index_equal(right_boundary, expected_right)
tm.assert_index_equal(left_boundary, expected_left)
tm.assert_index_equal(both_boundary, expected_both)
def test_years_only(self):
# GH 6961
dr = date_range("2014", "2015", freq="M")
assert dr[0] == datetime(2014, 1, 31)
assert dr[-1] == datetime(2014, 12, 31)
def test_freq_divides_end_in_nanos(self):
# GH 10885
result_1 = date_range("2005-01-12 10:00", "2005-01-12 16:00", freq="345min")
result_2 = date_range("2005-01-13 10:00", "2005-01-13 16:00", freq="345min")
expected_1 = DatetimeIndex(
["2005-01-12 10:00:00", "2005-01-12 15:45:00"],
dtype="datetime64[ns]",
freq="345T",
tz=None,
)
expected_2 = DatetimeIndex(
["2005-01-13 10:00:00", "2005-01-13 15:45:00"],
dtype="datetime64[ns]",
freq="345T",
tz=None,
)
tm.assert_index_equal(result_1, expected_1)
tm.assert_index_equal(result_2, expected_2)
def test_cached_range_bug(self):
rng = date_range("2010-09-01 05:00:00", periods=50, freq=DateOffset(hours=6))
assert len(rng) == 50
assert rng[0] == datetime(2010, 9, 1, 5)
def test_timezone_comparaison_bug(self):
# smoke test
start = Timestamp("20130220 10:00", tz="US/Eastern")
result = date_range(start, periods=2, tz="US/Eastern")
assert len(result) == 2
def test_timezone_comparaison_assert(self):
start = Timestamp("20130220 10:00", tz="US/Eastern")
msg = "Inferred time zone not equal to passed time zone"
with pytest.raises(AssertionError, match=msg):
date_range(start, periods=2, tz="Europe/Berlin")
def test_negative_non_tick_frequency_descending_dates(self, tz_aware_fixture):
# GH 23270
tz = tz_aware_fixture
result = date_range(start="2011-06-01", end="2011-01-01", freq="-1MS", tz=tz)
expected = date_range(end="2011-06-01", start="2011-01-01", freq="1MS", tz=tz)[
::-1
]
tm.assert_index_equal(result, expected)
class TestDateRangeTZ:
"""Tests for date_range with timezones"""
def test_hongkong_tz_convert(self):
# GH#1673 smoke test
dr = date_range("2012-01-01", "2012-01-10", freq="D", tz="Hongkong")
# it works!
dr.hour
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_span_dst_transition(self, tzstr):
# GH#1778
# Standard -> Daylight Savings Time
dr = date_range("03/06/2012 00:00", periods=200, freq="W-FRI", tz="US/Eastern")
assert (dr.hour == 0).all()
dr = date_range("2012-11-02", periods=10, tz=tzstr)
result = dr.hour
expected = pd.Index([0] * 10)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_timezone_str_argument(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
result = date_range("1/1/2000", periods=10, tz=tzstr)
expected = date_range("1/1/2000", periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_date_range_with_fixedoffset_noname(self):
from pandas.tests.indexes.datetimes.test_timezones import fixed_off_no_name
off = fixed_off_no_name
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
idx = pd.Index([start, end])
assert off == idx.tz
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_with_tz(self, tzstr):
stamp = Timestamp("3/11/2012 05:00", tz=tzstr)
assert stamp.hour == 5
rng = date_range("3/11/2012 04:00", periods=10, freq="H", tz=tzstr)
assert stamp == rng[1]
class TestGenRangeGeneration:
def test_generate(self):
rng1 = list(generate_range(START, END, offset=BDay()))
rng2 = list(generate_range(START, END, offset="B"))
assert rng1 == rng2
def test_generate_cday(self):
rng1 = list(generate_range(START, END, offset=CDay()))
rng2 = list(generate_range(START, END, offset="C"))
assert rng1 == rng2
def test_1(self):
rng = list(generate_range(start=datetime(2009, 3, 25), periods=2))
expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)]
assert rng == expected
def test_2(self):
rng = list(generate_range(start=datetime(2008, 1, 1), end=datetime(2008, 1, 3)))
expected = [datetime(2008, 1, 1), datetime(2008, 1, 2), datetime(2008, 1, 3)]
assert rng == expected
def test_3(self):
rng = list(generate_range(start=datetime(2008, 1, 5), end=datetime(2008, 1, 6)))
expected = []
assert rng == expected
def test_precision_finer_than_offset(self):
# GH#9907
result1 = date_range(
start="2015-04-15 00:00:03", end="2016-04-22 00:00:00", freq="Q"
)
result2 = date_range(
start="2015-04-15 00:00:03", end="2015-06-22 00:00:04", freq="W"
)
expected1_list = [
"2015-06-30 00:00:03",
"2015-09-30 00:00:03",
"2015-12-31 00:00:03",
"2016-03-31 00:00:03",
]
expected2_list = [
"2015-04-19 00:00:03",
"2015-04-26 00:00:03",
"2015-05-03 00:00:03",
"2015-05-10 00:00:03",
"2015-05-17 00:00:03",
"2015-05-24 00:00:03",
"2015-05-31 00:00:03",
"2015-06-07 00:00:03",
"2015-06-14 00:00:03",
"2015-06-21 00:00:03",
]
expected1 = DatetimeIndex(
expected1_list, dtype="datetime64[ns]", freq="Q-DEC", tz=None
)
expected2 = DatetimeIndex(
expected2_list, dtype="datetime64[ns]", freq="W-SUN", tz=None
)
tm.assert_index_equal(result1, expected1)
tm.assert_index_equal(result2, expected2)
dt1, dt2 = "2017-01-01", "2017-01-01"
tz1, tz2 = "US/Eastern", "Europe/London"
@pytest.mark.parametrize(
"start,end",
[
(Timestamp(dt1, tz=tz1), Timestamp(dt2)),
(Timestamp(dt1), Timestamp(dt2, tz=tz2)),
(Timestamp(dt1, tz=tz1), Timestamp(dt2, tz=tz2)),
(Timestamp(dt1, tz=tz2), Timestamp(dt2, tz=tz1)),
],
)
def test_mismatching_tz_raises_err(self, start, end):
# issue 18488
msg = "Start and end cannot both be tz-aware with different timezones"
with pytest.raises(TypeError, match=msg):
date_range(start, end)
with pytest.raises(TypeError, match=msg):
date_range(start, end, freq=BDay())
class TestBusinessDateRange:
def test_constructor(self):
bdate_range(START, END, freq=BDay())
bdate_range(START, periods=20, freq=BDay())
bdate_range(end=START, periods=20, freq=BDay())
msg = "periods must be a number, got B"
with pytest.raises(TypeError, match=msg):
date_range("2011-1-1", "2012-1-1", "B")
with pytest.raises(TypeError, match=msg):
bdate_range("2011-1-1", "2012-1-1", "B")
msg = "freq must be specified for bdate_range; use date_range instead"
with pytest.raises(TypeError, match=msg):
bdate_range(START, END, periods=10, freq=None)
def test_misc(self):
end = datetime(2009, 5, 13)
dr = bdate_range(end=end, periods=20)
firstDate = end - 19 * BDay()
assert len(dr) == 20
assert dr[0] == firstDate
assert dr[-1] == end
def test_date_parse_failure(self):
badly_formed_date = "2007/100/1"
msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
Timestamp(badly_formed_date)
with pytest.raises(ValueError, match=msg):
bdate_range(start=badly_formed_date, periods=10)
with pytest.raises(ValueError, match=msg):
bdate_range(end=badly_formed_date, periods=10)
with pytest.raises(ValueError, match=msg):
bdate_range(badly_formed_date, badly_formed_date)
def test_daterange_bug_456(self):
# GH #456
rng1 = bdate_range("12/5/2011", "12/5/2011")
rng2 = bdate_range("12/2/2011", "12/5/2011")
assert rng2._data.freq == BDay()
result = rng1.union(rng2)
assert isinstance(result, DatetimeIndex)
@pytest.mark.parametrize("closed", ["left", "right"])
def test_bdays_and_open_boundaries(self, closed):
# GH 6673
start = "2018-07-21" # Saturday
end = "2018-07-29" # Sunday
result = date_range(start, end, freq="B", closed=closed)
bday_start = "2018-07-23" # Monday
bday_end = "2018-07-27" # Friday
expected = date_range(bday_start, bday_end, freq="D")
tm.assert_index_equal(result, expected)
# Note: we do _not_ expect the freqs to match here
def test_bday_near_overflow(self):
# GH#24252 avoid doing unnecessary addition that _would_ overflow
start = Timestamp.max.floor("D").to_pydatetime()
rng = date_range(start, end=None, periods=1, freq="B")
expected = DatetimeIndex([start], freq="B")
tm.assert_index_equal(rng, expected)
def test_bday_overflow_error(self):
# GH#24252 check that we get OutOfBoundsDatetime and not OverflowError
msg = "Out of bounds nanosecond timestamp"
start = Timestamp.max.floor("D").to_pydatetime()
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start, periods=2, freq="B")
class TestCustomDateRange:
def test_constructor(self):
bdate_range(START, END, freq=CDay())
bdate_range(START, periods=20, freq= | CDay() | pandas._libs.tslibs.offsets.CDay |
import os
import sys
from enum import Enum
from pathlib import Path
import tkinter as tk
from tkinter import filedialog
import csv
import pandas as pd
import warnings
file_dir = os.path.dirname(__file__)
sys.path.append(file_dir)
root = tk.Tk()
root.withdraw()
def get_root_folder():
path = Path(os.getcwd())
return str(path)
def process_classic_results():
directory = filedialog.askdirectory()
all_results = []
for file in os.listdir(directory):
results = [[0 for i in range(4)] for i in range(2)]
print(os.getcwd())
try:
filename = os.fsdecode(file)
results[0][0] = '{}(in)'.format(filename)
results[1][0] = '{}(out)'.format(filename)
with open(directory + '/' +filename+ '/'+'log.txt', 'r', encoding='utf8') as log:
for index, line in enumerate(log):
if index == 5:
print(line)
results[0][3] = line.split()[5]
if index == 12:
print(line)
results[1][3] = line.split()[5]
if index == 15:
results[0][1] = line.split()[7]
results[0][2] = line.split()[11]
if index == 20:
results[1][1] = line.split()[7]
results[1][2] = line.split()[11]
all_results.extend(results)
except Exception as e:
warnings.warn(e)
continue
frame = | pd.DataFrame(all_results) | pandas.DataFrame |
import warnings
import numpy as np
import pandas as pd
from pandas.api.types import (
is_categorical_dtype,
is_datetime64tz_dtype,
is_interval_dtype,
is_period_dtype,
is_scalar,
is_sparse,
union_categoricals,
)
from ..utils import is_arraylike, typename
from ._compat import PANDAS_GT_100
from .core import DataFrame, Index, Scalar, Series, _Frame
from .dispatch import (
categorical_dtype_dispatch,
concat,
concat_dispatch,
get_parallel_type,
group_split_dispatch,
hash_object_dispatch,
is_categorical_dtype_dispatch,
make_meta,
make_meta_obj,
meta_nonempty,
tolist_dispatch,
union_categoricals_dispatch,
)
from .extensions import make_array_nonempty, make_scalar
from .utils import (
_empty_series,
_nonempty_scalar,
_scalar_from_dtype,
is_categorical_dtype,
is_float_na_dtype,
is_integer_na_dtype,
)
##########
# Pandas #
##########
@make_scalar.register(np.dtype)
def _(dtype):
return _scalar_from_dtype(dtype)
@make_scalar.register(pd.Timestamp)
@make_scalar.register(pd.Timedelta)
@make_scalar.register(pd.Period)
@make_scalar.register(pd.Interval)
def _(x):
return x
@make_meta.register((pd.Series, pd.DataFrame))
def make_meta_pandas(x, index=None):
return x.iloc[:0]
@make_meta.register(pd.Index)
def make_meta_index(x, index=None):
return x[0:0]
meta_object_types = (pd.Series, pd.DataFrame, pd.Index, pd.MultiIndex)
try:
import scipy.sparse as sp
meta_object_types += (sp.spmatrix,)
except ImportError:
pass
@make_meta_obj.register(meta_object_types)
def make_meta_object(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')]) # doctest: +SKIP
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8')) # doctest: +SKIP
Series([], Name: a, dtype: float64)
>>> make_meta('i8') # doctest: +SKIP
1
"""
if is_arraylike(x) and x.shape:
return x[:0]
if index is not None:
index = make_meta(index)
if isinstance(x, dict):
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index
)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError(
"Expected iterable of tuples of (name, dtype), got {0}".format(x)
)
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x],
index=index,
)
elif not hasattr(x, "dtype") and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except Exception:
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
@meta_nonempty.register(object)
def meta_nonempty_object(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if is_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError(
"Expected Pandas-like Index, Series, DataFrame, or scalar, "
"got {0}".format(typename(type(x)))
)
@meta_nonempty.register(pd.DataFrame)
def meta_nonempty_dataframe(x):
idx = meta_nonempty(x.index)
dt_s_dict = dict()
data = dict()
for i, c in enumerate(x.columns):
series = x.iloc[:, i]
dt = series.dtype
if dt not in dt_s_dict:
dt_s_dict[dt] = _nonempty_series(x.iloc[:, i], idx=idx)
data[i] = dt_s_dict[dt]
res = pd.DataFrame(data, index=idx, columns=np.arange(len(x.columns)))
res.columns = x.columns
if PANDAS_GT_100:
res.attrs = x.attrs
return res
_numeric_index_types = (pd.Int64Index, pd.Float64Index, pd.UInt64Index)
@meta_nonempty.register(pd.Index)
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in _numeric_index_types:
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(["a", "b"], name=idx.name)
elif typ is pd.DatetimeIndex:
start = "1970-01-01"
# Need a non-monotonic decreasing index to avoid issues with
# partial string indexing see https://github.com/dask/dask/issues/2389
# and https://github.com/pandas-dev/pandas/issues/16515
# This doesn't mean `_meta_nonempty` should ever rely on
# `self.monotonic_increasing` or `self.monotonic_decreasing`
try:
return pd.date_range(
start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
except ValueError: # older pandas versions
data = [start, "1970-01-02"] if idx.freq is None else None
return pd.DatetimeIndex(
data, start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
elif typ is pd.PeriodIndex:
return pd.period_range(
start="1970-01-01", periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, "D")
try:
return pd.timedelta_range(
start=start, periods=2, freq=idx.freq, name=idx.name
)
except ValueError: # older pandas versions
start = np.timedelta64(1, "D")
data = [start, start + 1] if idx.freq is None else None
return pd.TimedeltaIndex(
data, start=start, periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.CategoricalIndex:
if len(idx.categories) == 0:
data = pd.Categorical(_nonempty_index(idx.categories), ordered=idx.ordered)
else:
data = pd.Categorical.from_codes(
[-1, 0], categories=idx.categories, ordered=idx.ordered
)
return pd.CategoricalIndex(data, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(l) for l in idx.levels]
codes = [[0, 0] for i in idx.levels]
try:
return pd.MultiIndex(levels=levels, codes=codes, names=idx.names)
except TypeError: # older pandas versions
return pd.MultiIndex(levels=levels, labels=codes, names=idx.names)
raise TypeError(
"Don't know how to handle index of type {0}".format(typename(type(idx)))
)
@meta_nonempty.register(pd.Series)
def _nonempty_series(s, idx=None):
# TODO: Use register dtypes with make_array_nonempty
if idx is None:
idx = _nonempty_index(s.index)
dtype = s.dtype
if len(s) > 0:
# use value from meta if provided
data = [s.iloc[0]] * 2
elif is_datetime64tz_dtype(dtype):
entry = pd.Timestamp("1970-01-01", tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
if len(s.cat.categories):
data = [s.cat.categories[0]] * 2
cats = s.cat.categories
else:
data = _nonempty_index(s.cat.categories)
cats = s.cat.categories[:0]
data = | pd.Categorical(data, categories=cats, ordered=s.cat.ordered) | pandas.Categorical |
import pymongo
import logging
import numpy as np
import pandas as pd
from scipy.stats import entropy
from config import Configuration
from utils.bot_utils import is_bot
from tasks.collectors.edit_type import CollectEditTypes
from utils.date_utils import parse_timestamp
from tasks.collectors.revision import CollectRevisions
from tasks.features.base import FeatureTask
from tasks.calculators.page import CalculatePageFirstEditDate, CalculatePageLastEditDate
config = Configuration()
logging.basicConfig(filename='sme.log',
filemode='w',
level=logging.DEBUG,
format='%(levelname)s:%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
class EditPeriodsFeature(FeatureTask):
@staticmethod
def calculate_quarter(row):
first_edit_date = parse_timestamp(row['first_edit_date'])
last_edit_date = parse_timestamp(row['last_edit_date'])
timestamp = parse_timestamp(row['timestamp'])
edit_delta = last_edit_date.toordinal() - first_edit_date.toordinal()
ordinal_period_1 = first_edit_date.toordinal() + (edit_delta / 4)
ordinal_period_2 = first_edit_date.toordinal() + (edit_delta / 2)
ordinal_period_3 = last_edit_date.toordinal() - (edit_delta / 4)
ordinal_timestamp = timestamp.toordinal()
if ordinal_timestamp < ordinal_period_1:
return 'q1'
elif ordinal_timestamp < ordinal_period_2:
return 'q2'
elif ordinal_timestamp < ordinal_period_3:
return 'q3'
else:
return 'q4'
@staticmethod
def init_user_data(page_id, user_name):
return {'page_id': page_id,
'user_name': user_name,
'edit_period_q1': 0,
'edit_period_q2': 0,
'edit_period_q3': 0,
'edit_period_q4': 0}
@staticmethod
def get_user_row(user_data):
total_edits = user_data['edit_period_q1'] + \
user_data['edit_period_q2'] + \
user_data['edit_period_q3'] + \
user_data['edit_period_q4']
return [user_data['page_id'],
user_data['user_name'],
user_data['edit_period_q1']/float(total_edits),
user_data['edit_period_q2']/float(total_edits),
user_data['edit_period_q3']/float(total_edits),
user_data['edit_period_q4']/float(total_edits)]
def cache_name(self):
return 'edit_periods'
def on_requires(self):
return [CalculatePageFirstEditDate(data_dir=self.data_dir),
CalculatePageLastEditDate(data_dir=self.data_dir),
CollectRevisions(data_dir=self.data_dir)]
def on_process(self, data_frames):
data = []
columns = ['page_id',
'user_name',
'edit_period_q1',
'edit_period_q2',
'edit_period_q3',
'edit_period_q4']
fe_df = data_frames[0]
le_df = data_frames[1]
revs_df = data_frames[2]
if isinstance(revs_df, pd.DataFrame) and isinstance(fe_df, pd.DataFrame) and isinstance(le_df, pd.DataFrame):
for page_id, page_df in revs_df.groupby(by='page_id'):
first_edit_date = parse_timestamp(fe_df[fe_df['page_id'] == page_id].iloc[0]['first_edit_date'])
last_edit_date = parse_timestamp(le_df[le_df['page_id'] == page_id].iloc[0]['last_edit_date'])
edit_delta = last_edit_date.toordinal() - first_edit_date.toordinal()
ordinal_period_1 = first_edit_date.toordinal() + (edit_delta / 4)
ordinal_period_2 = first_edit_date.toordinal() + (edit_delta / 2)
ordinal_period_3 = last_edit_date.toordinal() - (edit_delta / 4)
if isinstance(page_df, pd.DataFrame):
for user_name, user_df in page_df.groupby(by='user_name'):
user_data = self.init_user_data(page_id=page_id, user_name=user_name)
if isinstance(user_df, pd.DataFrame):
for index, row in user_df.iterrows():
timestamp = parse_timestamp(row['timestamp'])
ordinal_timestamp = timestamp.toordinal()
if ordinal_timestamp < ordinal_period_1:
user_data['edit_period_q1'] = user_data['edit_period_q1'] + 1
elif ordinal_timestamp < ordinal_period_2:
user_data['edit_period_q2'] = user_data['edit_period_q2'] + 1
elif ordinal_timestamp < ordinal_period_3:
user_data['edit_period_q3'] = user_data['edit_period_q3'] + 1
else:
user_data['edit_period_q4'] = user_data['edit_period_q4'] + 1
data.append(self.get_user_row(user_data))
return pd.DataFrame(data=data, columns=columns)
class EditFrequencyFeature(FeatureTask):
@staticmethod
def calculate_edit_frequency(group, norm_factor):
if isinstance(group, pd.DataFrame):
timestamps = [parse_timestamp(timestamp) for timestamp in group['timestamp']]
intervals = []
index = 0
for timestamp in timestamps:
interval = timestamp - timestamp if (index - 1 < 0) else timestamp - timestamps[index - 1]
norm_interval = (float(interval.total_seconds()) / norm_factor) if norm_factor > 0 else 0
intervals.append(norm_interval)
index += 1
return np.mean(intervals), np.median(intervals)
def cache_name(self):
return 'edit_frequency'
def on_requires(self):
return [CollectRevisions(data_dir=self.data_dir)]
def on_process(self, data_frames):
data = []
columns = ['page_id',
'user_name',
'mean_edit_frequency',
'median_edit_frequency']
revs_df = data_frames[0]
if isinstance(revs_df, pd.DataFrame):
norm_factor = (parse_timestamp('2050-01-01') - parse_timestamp('2001-01-01')).total_seconds()
for (page_id, user_name), group in revs_df.groupby(by=['page_id', 'user_name']):
logging.debug('Page ID: {}\tUser Name: {}'.format(page_id, user_name))
mean_edit_interval, median_edit_interval = self.calculate_edit_frequency(group, norm_factor)
data.append([page_id, user_name, mean_edit_interval, median_edit_interval])
return pd.DataFrame(data=data, columns=columns)
class EditSizeFeature(FeatureTask):
def cache_name(self):
return 'edit_size'
def on_requires(self):
return [CollectRevisions(data_dir=self.data_dir)]
def on_process(self, data_frames):
normalization_scale = 1000000
data = []
columns = ['page_id',
'user_name',
'mean_edit_size',
'median_edit_size']
revs_df = data_frames[0]
if isinstance(revs_df, pd.DataFrame):
for (page_id, user_name), group in revs_df.groupby(by=['page_id', 'user_name']):
logging.debug('Page ID: {}\tUser Name: {}'.format(page_id, user_name))
data.append([page_id,
user_name,
np.mean(group['size'])/normalization_scale,
np.median(group['size'])/normalization_scale])
return pd.DataFrame(data=data, columns=columns)
class EditTypesFeature(FeatureTask):
def cache_name(self):
return 'edit_types'
def on_requires(self):
return [CollectEditTypes(data_dir=self.data_dir)]
def on_process(self, data_frames):
edit_type_columns = ['edit_type_a',
'edit_type_b',
'edit_type_c',
'edit_type_d',
'edit_type_e',
'edit_type_f',
'edit_type_g',
'edit_type_h',
'edit_type_i',
'edit_type_j',
'edit_type_k',
'edit_type_l',
'edit_type_m']
df = data_frames[0]
counter = 0
data = []
for (page_id, user_name), group in df.groupby(by=['page_id', 'user_name']):
row = [page_id, user_name] + [np.sum(group[et_col]) / len(group) for et_col in edit_type_columns]
data.append(row)
if counter % 50000 == 0 and counter > 0:
print(counter)
counter += 1
return pd.DataFrame(data=data, columns=['page_id', 'user_name'] + edit_type_columns)
class PageEditsEntropyFeature(FeatureTask):
def cache_name(self):
return 'page_edit_entropy'
def on_requires(self):
return [CollectRevisions(data_dir=self.data_dir)]
@staticmethod
def aggregate(collection, user_name):
agg_result = collection.aggregate([
{
'$match': {'user_name': user_name, 'page_ns': 0}},
{
'$group': {'_id': "$page_id", 'count': {'$sum': 1}}
}
], allowDiskUse=True)
if agg_result is not None:
counts = []
for dic in agg_result:
counts.append(dic['count'])
return entropy(counts)
else:
return None
def on_process(self, data_frames):
host = config.get('MONGO', 'host')
port = config.get_int('MONGO', 'port')
database = config.get('MONGO', 'database')
collection = config.get('MONGO', 'collection')
revs_df = data_frames[0]
data = []
columns = ['user_name', 'page_edit_dist']
if isinstance(revs_df, pd.DataFrame):
user_names = revs_df['user_name'].unique()
with pymongo.MongoClient(host=host, port=port) as client:
db = client.get_database(database)
collection = db.get_collection(collection)
for user_name in user_names:
if is_bot(user_name):
continue
page_edit_dist = self.aggregate(collection=collection, user_name=user_name)
if page_edit_dist is None:
continue
data.append([user_name, page_edit_dist])
logging.debug('Username: {}\tTotal edited pages: {}'.format(user_name, page_edit_dist))
df = pd.DataFrame(data=data, columns=columns)
data = []
cols = ['page_id', 'user_name', 'page_edit_dist']
df = revs_df.merge(df, how='left', on='user_name')[cols]
for (page_id, user_name), group in df.groupby(by=['page_id', 'user_name']):
data.append([page_id, user_name, group.iloc[0]['page_edit_dist']])
return | pd.DataFrame(data=data, columns=cols) | pandas.DataFrame |
#!/usr/bin/env python3
# Process cleaned data set into separate Q-n-A pairs, with each Q-n-A pair as one row in a CSV file
import pandas as pd
def qna_pairs(row):
'''
For argument row of pandas dataframe, parse column 'FAQ' into heading and
question-and-answer pairs, storing in columns 'heading' and 'qna'
respectively, and returning modified row. Concurrent questions are assumed
to be in the same entry together.
'''
heading = True
h = q = a = ''
qna = []
# Cycle through list of strings in FAQ column
for item in row.FAQ:
# Check for heading and store separately, if exists. If not, store first question.
if heading:
if '?' not in item:
h += ' ' + item
else:
heading = False
q = item.strip()
a = ''
# Check for subsequent question and, if exists, append previous qna pair before storing.
elif '?' in item:
qna.append([q.strip(), a.strip()])
q = item
a = ''
# Accumulate answer strings
else:
a += ' ' + item
# Treat heading as an answer to the question of 'Topic' column text
if h:
qna = [[row.Topic + '?', h.strip()]] + qna
if q:
qna.append([q.strip(), a.strip()])
row['heading'] = h.strip()
row['qna'] = qna
return row
if __name__ == '__main__':
# Load cleaned data, split FAQ column on double new lines, and apply Q-n-A separation function
faq = pd.read_csv('../data/interim/faq-hand-cleaned.csv')
faq.FAQ = faq.FAQ.apply(lambda x: x.split('\n\n'))
faq = faq.apply(qna_pairs, axis=1)
# Re-stack Q-n-A pairs into separate 'question' and 'answer' columns and drop unneeded columns
stack = faq.apply(lambda x: | pd.Series(x['qna']) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: MingZ
# @Date created: 21 Sep 2017
# @Date last modified: 21 Sep 2017
# Python Version: 2.7
# historical data from Google/yahoo finace
# http://www.google.com/finance/historical?q=JNUG&startdate=20170101&enddate=20170707&output=csv
# start = datetime.datetime(2017,1,1)
# end = datetime.datetime(2017,7,7)
# f = web.DataReader('JNUG','yahoo',start,end)
from Tkinter import *
import tkFileDialog as fd
import datetime
import pandas as pd
import sys
import os
import pandas_datareader.data as web
class stockdownload:
def __init__(self):
today = datetime.date.today()
preday = today-datetime.timedelta(days=2)
root = Tk()
root.title("STOCK DOWNLOAD")
root.geometry('%dx%d+%d+%d' % (800,800,(root.winfo_screenwidth()/2-400),(root.winfo_screenheight()/2-400)))
# root.geometry("800x500")
# root.columnconfigure(0, weight=1)
frame = Frame(root)
frame.grid_rowconfigure(0, weight=1)
frame.grid_columnconfigure(0, weight=1)
#输入起始日期
l_start = Label(frame,text="start:").grid(row=0)
e_start = Entry(frame)
e_start.grid(row=0,column=1)
e_start.insert(0,preday)
#输入结束日期
l_end = Label(frame,text="end:").grid(row=1)
e_end = Entry(frame)
e_end.grid(row=1,column=1)
e_end.insert(0,today)
# 选项
scrollbar = Scrollbar(frame)
scrollbar.grid(row=7,column=2, sticky=N+S+W)
lb_symbol = Listbox(frame,yscrollcommand=scrollbar.set,height=30,selectmode=SINGLE)
def __src_path(relative_path):
parent_path = os.getcwd()
return os.path.join(parent_path,relative_path)
file_path =__src_path("Symbol.csv")
file = pd.read_csv(file_path,index_col='Symbol')
for index in file.index:
lb_symbol.insert(END, index)
lb_symbol.grid(row=7,column=1,sticky=N+S+E+W)
scrollbar.config(command=lb_symbol.yview)
#下载位置
def __browse():
filename = fd.askdirectory()
folder_path.set(filename)
folder_path = StringVar()
l_dl = Label(frame,text="download to..").grid(row=2)
e_dl = Entry(frame,textvariable=folder_path)
e_dl.grid(row=2,column=1)
b_dl = Button(frame,text="browse",command=__browse).grid(row=2,column=2)
b_action = Button(frame,text="Download",
command=lambda:self.__download(lb_select.get(0,END),e_start.get(),e_end.get(),e_dl.get())).grid(row=3,column=1)
Label(frame, text="").grid(row=4,column=2)
#全选按钮
def __bSelect():
lb_select.delete(0,END)
temp = lb_symbol.get(0,END)
for item in temp:
lb_select.insert(END,item)
def __bClear():
lb_select.delete(0,END)
b_select = Button(frame,text="select all",command=__bSelect)
b_clear = Button(frame,text="clear",command=__bClear)
b_clear.grid(row=5,column=3)
b_select.grid(row=5,column=1)
#查找按钮
def __eFind(Event):
try:
symbolTemp = e_find.get().upper()
index = lb_symbol.get(0,END).index(symbolTemp)
lb_symbol.see(index)
if symbolTemp not in lb_select.get(0,END):
lb_select.insert(0,symbolTemp)
except ValueError:
self.__popup("ValueError","Symbol no exist")
l_find = Label(frame,text="find&select:").grid(row=6)
e_find = Entry(frame)
l_select = Label(frame,text="selected:").grid(row=6,column=3)
def __delSelect(Event):
w = Event.widget
try:
index = int(w.curselection()[0])
lb_select.delete(index)
except:
pass
def __addSelect(Event):
w = Event.widget
lb_order = list(lb_select.get(0,END))
index = int(w.curselection()[0])
value = w.get(index)
if value not in lb_order:
lb_order.append(value)
lb_order.sort()
lb_select.delete(0,END)
for item in lb_order:
lb_select.insert(END,item)
s_select = Scrollbar(frame)
s_select.grid(row=7,column=4, sticky=N+S+W)
lb_select = Listbox(frame,yscrollcommand=s_select.set)
lb_select.grid(row=7,column=3,sticky=N+S+E+W)
s_select.config(command=lb_select.yview)
lb_select.bind('<<ListboxSelect>>',__delSelect)
lb_symbol.bind('<<ListboxSelect>>',__addSelect)
e_find.grid(row=6,column=1)
e_find.insert(1,"A")
e_find.bind('<Return>',__eFind)
def __browse_list():
symbol_file_name = fd.askopenfilename(filetypes = (("csv files","*.csv"),("all files","*.*")))
symbol_file = pd.read_csv(symbol_file_name)
try:
for item in symbol_file['Symbol']:
if item in lb_symbol.get(0,END) and item not in lb_select.get(0,END):
lb_select.insert(END,item)
self.__popup("Success","Success")
except:
self.__popup("Error","Header should contain 'Symbol'")
folder_path_symbol = StringVar()
l_load = Label(frame, text='load symbol list:').grid(row=8,column=3)
b_load = Button(frame, text='browse',command=__browse_list)
b_load.grid(row=9,column=3)
frame.pack()
root.mainloop()
def __popup(self,title,text):
popupwindow = Toplevel()
popupwindow.attributes("-topmost",1)
popupwindow.title(title)
popupwindow.geometry("200x60+400+250")
label = Label(popupwindow, text=text).pack()
button = Button(popupwindow, text="ok", command=popupwindow.destroy).pack()
def __download(self, symbols, start, end, path):
if not os.path.exists(path):
self.__popup("FolderError","file path no exist")
else:
try:
start = datetime.datetime(int(start[0:4]),int(start[5:7]),int(start[8:]))
end = datetime.datetime(int(end[0:4]),int(end[5:7]),int(end[8:]))
# print start,end
except:
self.__popup("FormatError","date format is wrong")
else:
f_error = | pd.DataFrame() | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kaggle Titanic Data - Raw Data Analysis
# ## Import required library and load data
import numpy as np
import pandas as pd
import pandas_profiling as pdp
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# Input data: train.csv, test.csv
# (Data column)
# Sibsp: number of siblings/spouses aboard the Titanic
# Parch: number of parents/children aboard the Titanic
# embarked: Port of embarkation
# C = Cherbourg
# Q = Queenstown
# S = Southampton
# Pclass: ticket class
# 1 = upper
# 2 = middle
# 3 = low
data_train = pd.read_csv('data/train.csv')
data_test = pd.read_csv('data/test.csv')
data_train.dtypes
data_test.dtypes
# ## Exploring raw data
data_train.head()
data_test.head()
data_train.describe()
data_test.describe()
pdp.ProfileReport(data_train)
# Function to obtain information of null data (missing data)
def null_info(df):
num_null_val = df.isnull().sum()
p_null_val = 100*num_null_val/len(df)
null_info = pd.concat([num_null_val, p_null_val], axis=1)
null_info = null_info.rename(columns = {0: 'Counts of null', 1:'%'})
return null_info
null_info(data_train)
null_info(data_test)
# ## Data Cleaning
# Since ~80% of cells of cabin is NaN, cabin is discarded in training.
# ### Repalce sex to 0 (male) or 1 (female)
# + code_folding=[]
# data_train = data_train.replace("male", 0).replace("female", 1)
# data_test = data_test.replace("male", 0).replace("female", 1)
data_train['Sex'] = data_train['Sex'].map({"male":0, "female":1})
data_test['Sex'] = data_test['Sex'].map({"male":0, "female":1})
data_train
# -
# ### Fill null data in Age with median value of age
data_train["Age"] = data_train["Age"].fillna(data_train["Age"].median())
data_test["Age"] = data_test["Age"].fillna(data_test["Age"].median())
# ### Fill null data in "Embarked"
# S is the most freqent embarked port and hence the missing cells are filled with S.
data_train["Embarked"] = data_train["Embarked"].fillna("S")
data_test["Embarked"] = data_test["Embarked"].fillna("S")
# Repalce strings in "Embarked" to 0 (S), 1 (C), 2 (Q)
data_train["Embarked"] = data_train["Embarked"].map({"S":0, "C":1, "Q":2})
data_test["Embarked"] = data_test["Embarked"].map({"S":0, "C":1, "Q":2})
data_train
null_info(data_train)
data_train.dtypes
data_test["Fare"] = data_test["Fare"].fillna(data_test["Fare"].median())
null_info(data_test)
pdp.ProfileReport(data_train)
# survival rate
data_train['Survived'].mean()
# Pclass
data_train['Survived'].groupby(data_train['Pclass']).mean()
sns.countplot(data_train['Pclass'], hue=data_train['Survived'])
# + code_folding=[]
columns = ['Sex', 'Pclass', 'SibSp', 'Parch', 'Embarked']
fig, axes = plt.subplots(len(columns), 1, figsize=(8, 20))
plt.subplots_adjust(hspace=0.3)
for column, ax in zip(columns, axes):
sns.countplot(x='Survived', hue=column, data=data_train, ax=ax)
ax.legend(loc='upper right')
ax.set_title(column)
# -
data_train['bin_age'] = pd.cut(data_train['Age'],10)
pd.crosstab(data_train['bin_age'], data_train['Survived']).plot.bar(stacked=True)
data_train['bin_age'] = | pd.cut(data_train['Age'],10) | pandas.cut |
"""
Tests for Series cumulative operations.
See also
--------
tests.frame.test_cumulative
"""
from itertools import product
import numpy as np
import pytest
import pandas as pd
from pandas import _is_numpy_dev
import pandas._testing as tm
def _check_accum_op(name, series, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(series).values, func(np.array(series)), check_dtype=check_dtype,
)
# with missing values
ts = series.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
class TestSeriesCumulativeOps:
def test_cumsum(self, datetime_series):
_check_accum_op("cumsum", datetime_series)
def test_cumprod(self, datetime_series):
_check_accum_op("cumprod", datetime_series)
@pytest.mark.xfail(
_is_numpy_dev,
reason="https://github.com/pandas-dev/pandas/issues/31992",
strict=False,
)
def test_cummin(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummin().values,
np.minimum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(
_is_numpy_dev,
reason="https://github.com/pandas-dev/pandas/issues/31992",
strict=False,
)
def test_cummax(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummax().values,
np.maximum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummin_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-1", "2000-1-1", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummax_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-2", "NaT", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "1 min"])
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "1 min", "1 min", "1 min"])
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "2 min", "NaT", "3 min"])
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "2 min", "2 min", "3 min"])
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummethods_bool(self):
# GH#6270
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"cummin": np.minimum.accumulate,
"cummax": np.maximum.accumulate,
}
args = product((a, b, c, d), methods)
for s, method in args:
expected = pd.Series(methods[method](s.values))
result = getattr(s, method)()
tm.assert_series_equal(result, expected)
e = pd.Series([False, True, np.nan, False])
cse = | pd.Series([0, 1, np.nan, 1], dtype=object) | pandas.Series |
# coding: utf-8
# In[1]:
# Load dependencies
from scipy.stats import gmean
import pandas as pd
import numpy as np
import sys
sys.path.insert(0, '../../statistics_helper')
from CI_helper import *
from fraction_helper import *
pd.options.display.float_format = '{:,.1f}'.format
# # Estimating the biomass of soil microbes
#
# In order to estimate the total biomass of soil microbes, we rely on two recent studies - [Xu et al.](http://dx.doi.org/10.1111/geb.12029) and [Serna-Chavez et al.](http://dx.doi.org/10.1111/geb.12070)
#
# We use the final estimates in each of the studies as two independent estimates of the biomass of soil microbes. Xu et al. estimate a biomass of ≈23 Gt C of soil microbes, whereas Serna-Chavez et al. estimate ≈15 Gt C.
# In[2]:
# Define the values for the estimates of the biomass of soil microbes from Xu et al. and Serna-Chavez et al.
xu = 23.2e15
serna_chavez = 14.6e15
# As our best estimate for the biomass of soil microbes, we use the geometric mean of the values from Xu et al. and Serna-Chavez et al.
# In[3]:
# Our best estimate is the geometric mean of values from Xu et al. and Serna-Chavez et al.
best_estimate = gmean([xu,serna_chavez])
print('Our best estimate for the biomass of soil microbes is ≈%.0f Gt C' % (best_estimate/1e15))
# ## Cells in deeper layers
# The estimates reported in Xu et al. and Serna-Chavez et al. are for microbial biomass in the top 1 meter of soil. To take into account microbial biomass in depths lower than 1 meter, we try to estimate the fraction of microbial biomass in the top 1 meter out of the total biomass of soil microbes.
#
# Xu et al. extrapolate the microbial biomass across the soil profile based on empirical equations for the distribution of root biomass along soil depth from [Jackson et al.](http://dx.doi.org/10.1007/BF00333714). The empirical equations are biome-specific, and follow the general form: $$Y = 1-\beta^d$$ Where Y is the cumulative fraction of roots, d is depth in centimeters, and $\beta$ is a coefficient fitted for each biome. This means the $\beta^d$ represents the fraction of roots present in layers lower deeper than d centimeters.
#
# We use the fitted $\beta$ coefficients from Jackson et al., along with estimates for the total microbial biomass in the top meter fo soils in each biome from Xu et al. to estimate the amount of biomass present in soil layers deeper than 1 meter.
# In[4]:
# Load data on the microbial biomass from each biome and the coefficients for the depth distribution of roots
# in each biome
data = pd.read_excel('soil_microbial_biomass_data.xlsx',skiprows=1)
# Calculate the fraction of biomass deeper than 100 centimeters for each biome
biome_deeper_frac = (data['beta']**100)
# Calculate the relative fraction of total microbial biomass that is present in layers deeper than 1 meter
total_deeper_relative_fraction = (data['Total microbial biomass 100 cm (g C)']*biome_deeper_frac).sum()/xu
print('The fraction of microbial biomass in layers deeper than 1 meter based on Xu et al. is ' + '{:,.0f}%'.format(total_deeper_relative_fraction*100))
# As an additional source for estimating the fraction of biomass of microbes in soil layers deeper than 1 meter, we use the concentration of bacterial cells present in layers deeper than 1 meter reported in [Whitman et al.](https://www.ncbi.nlm.nih.gov/pubmed/9618454). Whitman et al. estimate that in forests there are ≈$4×10^7$ cells per gram in the top 1 meter and ≈$10^6$ cells per gram in depths of 1-8 meters. For other soils, Whitman et al. estimate ≈$2×10^9$ cells per gram in the top 1 meterand ≈$10^8$ cells per gram in depth of 1-8 meters. Assuming cells in deeper layers are similar in size to cells in the top 1 meter, this is equivalent to:
# In[5]:
# Concentration of cells in top 1 meter of forest soils
forest_upper = 4e7
# Top 1 meter is 1 meter in depth
forest_upper_depth = 1
# Concentration of cells in depths of 1-7 meters of forest soils
forest_lower = 1e6
# The deeper layer of soils is 1-8 meters - 7 meters in depth
forest_lower_depth = 7
# Concentration of cells in top 1 meter of other soils
other_upper = 2e9
# Top 1 meter is 1 meter in depth
other_upper_depth = 1
# Concentration of cells in depths of 1-7 meters of other soils
other_lower = 1e8
# The deeper layer of soils is 1-8 meters - 7 meters in depth
other_lower_depth = 7
#Calculate the fraction of cells present in deeper layers of soil in forests and other soils
forest_lower_frac = forest_lower*forest_lower_depth/(forest_lower*forest_lower_depth + forest_upper*forest_upper_depth)
other_lower_frac = other_lower*other_lower_depth/(other_lower*other_lower_depth + other_upper*other_upper_depth)
whitman_mean_frac = frac_mean(np.array([forest_lower_frac,other_lower_frac]))
print('The fraction of cells found in soil layers deeper than 1 meter is ' + '{:,.0f}%'.format(forest_lower_frac*100) + ' in forests and ' '{:,.0f}%'.format(other_lower_frac*100) + ' in other soils.')
print('The average fraction of cells found in deeper layers is ' + '{:,.0f}%'.format(100*whitman_mean_frac))
# As our estimate for the fraction of biomass present in layers deeper than 1 meter, we take the geometric mean of the fractions estimated by Xu et al. and by Whitman et al.
# In[6]:
# Calculate the geometric mean of the estimates by Xu et al. and Whitman et al.
mean_deep_frac = frac_mean(np.array([total_deeper_relative_fraction,whitman_mean_frac]))
print('Our best estimate for the fraction of biomass present in layers deeper than 1 meter is ' + '{:,.0f}%'.format(100*mean_deep_frac))
# Correct out best estimate to account for the biomass of microbes in soil layers deeper than 1 meter
best_estimate_corrected = best_estimate*(1+mean_deep_frac)
print('Our best estimate for the biomass of soil microbes, including contributions from microbes present in layers deeper than 1 meter is %.0f Gt C' % (best_estimate_corrected/1e15))
# # Uncertainty analysis
# To calculate the uncertainty associated with the estimate for the total number of of bacteria and archaea, we first collect all available uncertainties and then take the largest value as our best projection for the uncertainty.
#
# ## Total biomass of microbes in the top 1 meter
#
# ### Intra-study uncertainty
# As noted above, our estimate is based on two studies which report the total biomass of soil microbes - [Xu et al.](http://dx.doi.org/10.1111/geb.12029) and [Serna-Chavez et al.](http://dx.doi.org/10.1111/geb.12070). Xu et al. does not report uncertainties associated with the total estimate of microbial biomass. However, Xu et al. report 95% confidence intervals for the average microbial biomass densities in each biome. We use these ranges as a measure of the intra-study uncertainty in Xu et al. The highest uncertainty across biomes is ≈1.5-fold.
# In[7]:
# We calculate the upper and lower multiplicative 95% confidence interval of the average microbial
# biomass density for each biome
xu_upper_CI = data['upper 95% confidence interval of Cmic']/data['Cmic (0-30 cm) [mmol C kg^-1]']
xu_lower_CI = data['Cmic (0-30 cm) [mmol C kg^-1]']/data['lower 95% confidence interval of Cmic']
# Our estimate for the final uncertainty is the average of the upper and lower confidence intervals.
data['95% confidence interval'] = ( | pd.concat([xu_upper_CI,xu_lower_CI],axis=1) | pandas.concat |
import pandas as pd
from sodapy import Socrata
import datetime
import definitions
# global variables for main data:
hhs_data, test_data, nyt_data_us, nyt_data_state, max_hosp_date = [],[],[],[],[]
"""
get_data()
Fetches data from API, filters, cleans, and combines with provisional.
After running, global variables are filled for use in subsequent functions
"""
def get_data():
global nyt_data_us
global nyt_data_state
global test_data
global hhs_data
global max_hosp_date
nyt_data_us = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us.csv")
nyt_data_state = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us-states.csv")
client = Socrata("healthdata.gov", None)
results = client.get("g62h-syeh", limit=2000000)
test_results = client.get("j8mb-icvb", limit=2000000)
print("LOG: Fetched all raw data")
# Filter data to get columns of interest
hhs_data = pd.DataFrame.from_records(results)[['state', 'date', 'inpatient_beds_used_covid']]
hhs_data.inpatient_beds_used_covid = hhs_data.inpatient_beds_used_covid.fillna(0)
hhs_data = hhs_data.astype({'inpatient_beds_used_covid': 'int32'})
test_data = pd.DataFrame.from_records(test_results)[['state', 'date', 'overall_outcome', 'new_results_reported']]
test_data.new_results_reported = test_data.new_results_reported.fillna(0)
test_data = test_data.astype({'new_results_reported': 'int32'})
print("LOG: Filtered Data")
# For provisional data, gets days since most recent update of HHS time series
max_date = hhs_data.date.max()
max_hosp_date = max_date
provisional = client.get("4cnb-m4rz", limit=2000000, where=f"update_date > '{max_date}'")
hhs_provisional = pd.DataFrame.from_records(provisional)[['update_date', 'archive_link']]
hhs_provisional.update_date = hhs_provisional.update_date.apply(lambda x: x[:10])
hhs_provisional.update_date = pd.to_datetime(hhs_provisional.update_date)
# Gets last archive of every day
group = hhs_provisional.groupby(['update_date'])
hhs_provisional = group.last()
# Add provisional data to HHS data
frames = []
for a in hhs_provisional.iterrows():
date = a[0]
url = a[1].item()['url']
df = pd.read_csv(url)[['state', 'inpatient_beds_used_covid']]
df['date']=date
if date > | pd.Timestamp(max_date) | pandas.Timestamp |
import numpy as np
import pandas as pd
import os
import trace_analysis
import sys
import scipy
import scipy.stats
def compute_kolmogorov_smirnov_2_samp(packets_node, window_size, experiment):
# Perform a Kolmogorov Smirnov Test on each node of the network
ks_2_samp = None
for node_id in packets_node:
true_mu = packets_node[node_id]['rtt']
min_index = 0
max_index = window_size-1
# Compute the t-test for each window
while max_index < 200:
window_packets = packets_node[node_id].loc[(packets_node[node_id]['seq'] >= min_index) & (packets_node[node_id]['seq'] <= max_index)]['rtt']
onesample_result = scipy.stats.ks_2samp(window_packets, true_mu)
if ks_2_samp is None:
ks_2_samp = pd.DataFrame({'node_id': node_id,
'experiment': experiment,
'ks-test statistic': onesample_result[0],
'p-value': onesample_result[1],
'window': [str(min_index+1) + '-' + str(max_index+1)]})
else:
ks_2_samp = pd.concat([ks_2_samp, pd.DataFrame({'node_id': node_id,
'experiment': experiment,
'ks-test statistic': onesample_result[0],
'p-value': onesample_result[1],
'window': [str(min_index+1) + '-' + str(max_index+1)]})])
min_index = max_index + 1
max_index += window_size
return ks_2_samp
def compute_one_sample_t_test(packets_node, window_size, experiment):
# Perform a 1 Sample T-Test on each node of the network
t_test = None
for node_id in packets_node:
true_mu = packets_node[node_id]['rtt'].mean()
min_index = 0
max_index = window_size-1
# Compute the t-test for each window
while max_index < 200:
window_packets = packets_node[node_id].loc[(packets_node[node_id]['seq'] >= min_index) & (packets_node[node_id]['seq'] <= max_index)]['rtt']
onesample_result = scipy.stats.ttest_1samp(window_packets, true_mu)
if t_test is None:
t_test = pd.DataFrame({'node_id': node_id,
'experiment': experiment,
't-test statistic': onesample_result[0],
'p-value': onesample_result[1],
'window': [str(min_index+1) + '-' + str(max_index+1)]})
else:
t_test = pd.concat([t_test, pd.DataFrame({'node_id': node_id,
'experiment': experiment,
't-test statistic': onesample_result[0],
'p-value': onesample_result[1],
'window': [str(min_index+1) + '-' + str(max_index+1)]})])
min_index = max_index + 1
max_index += window_size
return t_test
def compute_labeled_statistics_by_network(stats, feature, n_nodes):
# Input: stats a dataframe containing the statistics of the network
# feature a feature to extract
# n_nodes the number of nodes in the network
#Output: extract feature for each node of the network
data = stats[['experiment',str(feature),'label']].sort_values(by=['experiment']).reset_index(drop=True)
network = None
experiment = None
label = None
nodes = []
for index in data.index:
# Write the experiment to a dataframe
if experiment != data.at[index,'experiment'] and experiment != None:
features = {'experiment': [experiment], 'label': [label]}
for node in range(1, n_nodes+1):
if node <= len(nodes):
features[node] = [nodes[node-1]]
else:
features[node] = [np.float32(sys.maxsize)]
# Create a new dataframe
if network is None:
network = pd.DataFrame(features)
else:
network = pd.concat([network, pd.DataFrame(features)])
nodes = []
experiment = data.at[index,'experiment']
label = data.at[index,'label']
# First iteration
elif experiment == None:
nodes = []
experiment = data.at[index,'experiment']
label = data.at[index,'label']
nodes.append(data.at[index, feature])
# Write the last experiment
experiment = data["experiment"].iloc[-1]
label = data["label"].iloc[-1]
features = {'experiment': [experiment], 'label': [label]}
for node in range(1, n_nodes+1):
if node <= len(nodes):
features[node] = [nodes[node-1]]
else:
features[node] = [np.float32(sys.maxsize)]
# Create a new dataframe
if network is None:
network = pd.DataFrame(features)
else:
network = pd.concat([network, pd.DataFrame(features)])
network = network.reset_index(drop=True)
return network
def compute_window_labeled_statistics_by_network(win_stats, feature, n_nodes, window_size, n_packets=200):
# Input: stats a dataframe containing the statistics of the network
# feature a feature to extract
# n_nodes the number of nodes in the network
# window_size the size of the window
#Output: extract feature for each node of the network
data = win_stats[['experiment','node_id',str(feature),'label']].sort_values(by=['experiment','node_id']).reset_index(drop=True)
network = None
experiment = None
label = None
nodes = {}
for index in data.index:
# Write the experiment to a dataframe
if experiment != data.at[index,'experiment'] and experiment != None:
features = {'experiment': [experiment for i in range(1,int(n_packets/window_size)+1)], 'label': [label for i in range(1,int(n_packets/window_size)+1)]}
# For each node in the network
for node in range(1, n_nodes+1):
# For each node_id
for node_id in nodes:
if node_id in nodes:
features[node] = nodes[node_id]
# If some window is lost we need to add infinite values
if len(features[node]) < int(n_packets/window_size):
while len(features[node]) < int(n_packets/window_size):
features[node].append(np.float32(sys.maxsize))
# Create a new dataframe
if network is None:
network = pd.DataFrame(features)
else:
network = pd.concat([network, pd.DataFrame(features)])
nodes = {}
experiment = data.at[index,'experiment']
label = data.at[index,'label']
# First iteration
elif experiment == None:
nodes = {}
experiment = data.at[index,'experiment']
label = data.at[index,'label']
if data.at[index,'node_id'] not in nodes:
nodes[data.at[index,'node_id']] = [data.at[index, feature]]
else:
nodes[data.at[index,'node_id']].append(data.at[index, feature])
# Write the last experiment
features = {'experiment': [experiment for i in range(1,int(n_packets/window_size)+1)], 'label': [label for i in range(1,int(n_packets/window_size)+1)]}
# For each node in the network
for node in range(1, n_nodes+1):
# For each node_id
for node_id in nodes:
if node_id in nodes:
features[node] = nodes[node_id]
# If some window is lost we need to add infinite values
if len(features[node]) < int(n_packets/window_size):
while len(features[node]) < int(n_packets/window_size):
features[node].append(np.float32(sys.maxsize))
# Create a new dataframe
if network is None:
network = pd.DataFrame(features)
else:
network = pd.concat([network, pd.DataFrame(features)])
network = network.reset_index(drop=True)
return network
def compute_window_labeled_statistics(nodes, packets_node, label, experiment, window_size):
# Input: a Dataframe nodes = node_id, rank + packets_node = {node_id: node_id, seq, hop, rtt},
# label that indicate the class of the experiment, the experiment_id and window_size
# Output: compute a dataframe containing node_id, count, mean, var, std, hop, min, max, loss, label for each window
win_stats = None
outliers = trace_analysis.compute_outliers_by_node(packets_node)
for node in packets_node:
count = packets_node[node]['rtt'].groupby(packets_node[node]['rtt'].index // window_size * window_size).count()
mean = packets_node[node]['rtt'].groupby(packets_node[node]['rtt'].index // window_size * window_size).mean()
var = packets_node[node]['rtt'].groupby(packets_node[node]['rtt'].index // window_size * window_size).var()
std = packets_node[node]['rtt'].groupby(packets_node[node]['rtt'].index // window_size * window_size).std()
hop = int(nodes[nodes['node_id'] == node]['rank'])
min_val = packets_node[node]['rtt'].groupby(packets_node[node]['rtt'].index // window_size * window_size).min()
max_val = packets_node[node]['rtt'].groupby(packets_node[node]['rtt'].index // window_size * window_size).max()
n_outliers = outliers[node]['rtt'].groupby(outliers[node]['rtt'].index // window_size * window_size).count()
loss = count.copy().apply(lambda x: 1 - float(x)/window_size)
for index in count.index:
if win_stats is None:
win_stats = pd.DataFrame({'node_id': [node],
'experiment': [experiment],
'count': [count.loc[index]],
'mean': [mean.loc[index]],
'var': [var.loc[index]],
'std': [std.loc[index]],
'hop': [hop],
'min': [min_val.loc[index]],
'max': [max_val.loc[index]],
'loss': [loss.loc[index]],
'outliers': [n_outliers.get(index, 0)],
'label': [label]})
else:
win_stats = pd.concat([win_stats, pd.DataFrame({'node_id': [node],
'experiment': [experiment],
'count': [count.loc[index]],
'mean': [mean.loc[index]],
'var': [var.loc[index]],
'std': [std.loc[index]],
'hop': [hop],
'min': [min_val.loc[index]],
'max': [max_val.loc[index]],
'loss': [loss.loc[index]],
'outliers': [n_outliers.get(index, 0)],
'label': [label]})])
# Drop duplicates
if win_stats is not None:
win_stats = win_stats.dropna()
return win_stats
def compute_labeled_statistics(nodes, packets_node, label, experiment):
# Input: a Dataframe nodes = node_id, rank + packets_node = {node_id: node_id, seq, hop, rtt}
# label that indicate the class of the experiment and the experiment_id
# Output: compute a dataframe containing node_id, count, mean, var, std, hop, min, max, loss, label
stats = None
outliers = trace_analysis.compute_outliers_by_node(packets_node)
for node in packets_node:
count = packets_node[node]['rtt'].count()
mean = packets_node[node]['rtt'].mean()
var = packets_node[node]['rtt'].var()
std = packets_node[node]['rtt'].std()
hop = int(nodes[nodes['node_id'] == node]['rank'])
min_val = packets_node[node]['rtt'].min()
max_val = packets_node[node]['rtt'].max()
n_outliers = outliers[node]['rtt'].count()
loss = 1 - float(count)/200
if stats is None:
stats = pd.DataFrame({'node_id': [node],
'experiment': [experiment],
'count': [count],
'mean': [mean],
'var': [var],
'std': [std],
'hop': [hop],
'min': [min_val],
'max': [max_val],
'loss': [loss],
'outliers': [n_outliers],
'label': [label]})
else:
stats = pd.concat([stats, pd.DataFrame({'node_id': [node],
'experiment': [experiment],
'count': [count],
'mean': [mean],
'var': [var],
'std': [std],
'hop': [hop],
'min': [min_val],
'max': [max_val],
'loss': [loss],
'outliers': [n_outliers],
'label': [label]})])
return stats
def tumbling_statistics_per_node(path, tracefile, window_size=10):
# Compute a dictionary containing all the statistics from each node of the dataset
# Read the rank of each node
nodes = pd.read_csv(path + 'addr-' + tracefile + '.cap',
sep=';|seq=| hop|time = |ms',
na_filter=True,
usecols=[1,3,5],
header=None,
skiprows=799,
names=['node_id','seq','rtt'],
engine='python').dropna().drop_duplicates()
nodes = nodes.sort_values(by=['node_id','seq'], ascending=True, na_position='first')
nodes = nodes[nodes['rtt'] >= 1] # Removes values with RTT < 1ms
d_nodes = {} # <node_id, DataFrame containing seq and rtt columns>
for n in nodes.index:
if nodes['node_id'][n] in d_nodes:
d_nodes[nodes['node_id'][n]] = d_nodes[nodes['node_id'][n]].append(pd.DataFrame({'seq': [int(nodes['seq'][n])], nodes['node_id'][n]: [nodes['rtt'][n]]}))
else:
d_nodes[nodes['node_id'][n]] = pd.DataFrame({'seq': [int(nodes['seq'][n])], nodes['node_id'][n]:[nodes['rtt'][n]]})
# Generate a dataframe containing all nodes
nodes = pd.DataFrame([seq for seq in range(1,1001)], columns=['seq']).set_index('seq')
for node in d_nodes.keys():
nodes = nodes.join(d_nodes[node].set_index('seq'))
nodes = nodes[~nodes.index.duplicated(keep='first')]
# Calculate all the statistics
statistics = {} # <node_id, statistics of the node>
for node in nodes:
stats = nodes[node].groupby(nodes[node].index // window_size).count().to_frame()
stats = stats.rename(index=str, columns={node: "packet_loss"})
stats["packet_loss"] = | pd.to_numeric(stats["packet_loss"], downcast='float') | pandas.to_numeric |
# Load dependencies
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from matplotlib import *
import matplotlib.pyplot as plt
from matplotlib.cm import register_cmap
from scipy import stats
from sklearn.decomposition import PCA
import seaborn
class Wrangle:
def __init__(self, df):
self.df = df
def format(self):
df = self.df
df.drop(df[df.sq_feet == 0].index, inplace=True)
df.drop(df[df.price == 0].index, inplace=True)
df.dropna(inplace=True)
# Remove outliers
df = df[df.sq_feet < df.sq_feet.quantile(0.80)]
# Manual one hot encodng of utilities included column
df = df.assign(heat=0, electricity=0, water=0, internet=0, cable=0)
for index, row in df.iterrows():
if "Heat" in row["utilities_included"]:
df.at[index, "heat"] = 1
if "Electricity" in row["utilities_included"]:
df.at[index, "electricity"] = 1
if "Water" in row["utilities_included"]:
df.at[index, "water"] = 1
if "Internet" in row["utilities_included"]:
df.at[index, "internet"] = 1
if "Cable" in row["utilities_included"]:
df.at[index, "cable"] = 1
# Conditionally replace quadrant names
df.loc[df["quadrant"] == None, "quadrant"] = "Unspecified"
df.loc[
(df["quadrant"] == "Inner-City||SW") | (df["quadrant"] == "SW||Inner-City"),
"quadrant",
] = "SW-Central"
df.loc[
(df["quadrant"] == "Inner-City||NW") | (df["quadrant"] == "NW||Inner-City"),
"quadrant",
] = "NW-Central"
df.loc[
(df["quadrant"] == "Inner-City||SE") | (df["quadrant"] == "SE||Inner-City"),
"quadrant",
] = "SE-Central"
df.loc[
(df["quadrant"] == "Inner-City||NE") | (df["quadrant"] == "NE||Inner-City"),
"quadrant",
] = "NE-Central"
# One hot encoding of quadrants
df["quadrant"] = pd.Categorical(df["quadrant"])
dfDummies = pd.get_dummies(df["quadrant"], prefix="Quadrant")
df = pd.concat([df, dfDummies], axis=1)
# One hot encoding of type
df["type"] = pd.Categorical(df["type"])
dfDummies = pd.get_dummies(df["type"], prefix="type")
df = pd.concat([df, dfDummies], axis=1)
# One hot encoding of community
df["community"] = pd.Categorical(df["community"])
dfDummies = pd.get_dummies(df["community"], prefix="community")
df = pd.concat([df, dfDummies], axis=1)
# Clean the den column
df.loc[df["den"] == "Yes", "den"] = 1
df.loc[(df["den"] == "No") | (df["den"] == None), "den"] = 0
# One hot encoding for den
df["den"] = | pd.Categorical(df["den"]) | pandas.Categorical |
# Copyright (c) 2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
"""
Functions to find trajectory sector intersection data.
"""
import numpy as np
import pandas as pd
from via_sphere import global_Point3d
from .AirspaceVolume import AirspaceVolume
from .gis_database_interface import find_horizontal_user_airspace_intersections, \
get_user_sector_name, get_user_sector_altitude_range, NotFoundException
from .airspace_intersections import find_3D_airspace_intersections
from pru.logger import logger
log = logger(__name__)
def find_trajectory_user_airspace_intersections(smooth_traj):
"""
Find airspace user airspace intersection positions from a smoothed trajectory.
Parameters
----------
smooth_traj: SmoothedTrajectory
A SmoothedTrajectory containing the flight id, smoothed horizontal path,
time profile and altitude profile.
Returns
-------
intersection_positions: a pandas DataFrame
The trajectory user airspace intersection positions.
Empty if no intersections found.
"""
lats = []
lons = []
volume_ids = []
min_altitude = smooth_traj.altp.altitudes.min()
max_altitude = smooth_traj.altp.altitudes.max()
lats, lons, volume_ids = find_horizontal_user_airspace_intersections(smooth_traj.flight_id,
smooth_traj.path.lats,
smooth_traj.path.lons,
min_altitude, max_altitude)
if len(lats):
# A dict to hold the intersected volumes
volumes = {}
try:
for volume_id in set(volume_ids):
volume_name = get_user_sector_name(volume_id)
bottom_alt, top_alt = get_user_sector_altitude_range(volume_id)
volumes.setdefault(volume_id, AirspaceVolume(volume_name,
bottom_alt, top_alt))
except NotFoundException:
log.exception('user airspace id: %s not found for flight id: %s',
volume_id, smooth_traj.flight_id)
return | pd.DataFrame() | pandas.DataFrame |
"""
SARIMAX parameters class.
Author: <NAME>
License: BSD-3
"""
import numpy as np
import pandas as pd
from numpy.polynomial import Polynomial
from statsmodels.tsa.statespace.tools import is_invertible
from statsmodels.tsa.arima.tools import validate_basic
class SARIMAXParams(object):
"""
SARIMAX parameters.
Parameters
----------
spec : SARIMAXSpecification
Specification of the SARIMAX model.
Attributes
----------
spec : SARIMAXSpecification
Specification of the SARIMAX model.
exog_names : list of str
Names associated with exogenous parameters.
ar_names : list of str
Names associated with (non-seasonal) autoregressive parameters.
ma_names : list of str
Names associated with (non-seasonal) moving average parameters.
seasonal_ar_names : list of str
Names associated with seasonal autoregressive parameters.
seasonal_ma_names : list of str
Names associated with seasonal moving average parameters.
param_names :list of str
Names of all model parameters.
k_exog_params : int
Number of parameters associated with exogenous variables.
k_ar_params : int
Number of parameters associated with (non-seasonal) autoregressive
lags.
k_ma_params : int
Number of parameters associated with (non-seasonal) moving average
lags.
k_seasonal_ar_params : int
Number of parameters associated with seasonal autoregressive lags.
k_seasonal_ma_params : int
Number of parameters associated with seasonal moving average lags.
k_params : int
Total number of model parameters.
"""
def __init__(self, spec):
self.spec = spec
# Local copies of relevant attributes
self.exog_names = spec.exog_names
self.ar_names = spec.ar_names
self.ma_names = spec.ma_names
self.seasonal_ar_names = spec.seasonal_ar_names
self.seasonal_ma_names = spec.seasonal_ma_names
self.param_names = spec.param_names
self.k_exog_params = spec.k_exog_params
self.k_ar_params = spec.k_ar_params
self.k_ma_params = spec.k_ma_params
self.k_seasonal_ar_params = spec.k_seasonal_ar_params
self.k_seasonal_ma_params = spec.k_seasonal_ma_params
self.k_params = spec.k_params
# Cache for holding parameter values
self._params_split = spec.split_params(
np.zeros(self.k_params) * np.nan, allow_infnan=True)
self._params = None
@property
def exog_params(self):
"""(array) Parameters associated with exogenous variables."""
return self._params_split['exog_params']
@exog_params.setter
def exog_params(self, value):
if np.isscalar(value):
value = [value] * self.k_exog_params
self._params_split['exog_params'] = validate_basic(
value, self.k_exog_params, title='exogenous coefficients')
self._params = None
@property
def ar_params(self):
"""(array) Autoregressive (non-seasonal) parameters."""
return self._params_split['ar_params']
@ar_params.setter
def ar_params(self, value):
if np.isscalar(value):
value = [value] * self.k_ar_params
self._params_split['ar_params'] = validate_basic(
value, self.k_ar_params, title='AR coefficients')
self._params = None
@property
def ar_poly(self):
"""(Polynomial) Autoregressive (non-seasonal) lag polynomial."""
coef = np.zeros(self.spec.max_ar_order + 1)
coef[0] = 1
ix = self.spec.ar_lags
coef[ix] = -self._params_split['ar_params']
return Polynomial(coef)
@ar_poly.setter
def ar_poly(self, value):
# Convert from the polynomial to the parameters, and set that way
if isinstance(value, Polynomial):
value = value.coef
value = validate_basic(value, self.spec.max_ar_order + 1,
title='AR polynomial')
if value[0] != 1:
raise ValueError('AR polynomial constant must be equal to 1.')
ar_params = []
for i in range(1, self.spec.max_ar_order + 1):
if i in self.spec.ar_lags:
ar_params.append(-value[i])
elif value[i] != 0:
raise ValueError('AR polynomial includes non-zero values'
' for lags that are excluded in the'
' specification.')
self.ar_params = ar_params
@property
def ma_params(self):
"""(array) Moving average (non-seasonal) parameters."""
return self._params_split['ma_params']
@ma_params.setter
def ma_params(self, value):
if np.isscalar(value):
value = [value] * self.k_ma_params
self._params_split['ma_params'] = validate_basic(
value, self.k_ma_params, title='MA coefficients')
self._params = None
@property
def ma_poly(self):
"""(Polynomial) Moving average (non-seasonal) lag polynomial."""
coef = np.zeros(self.spec.max_ma_order + 1)
coef[0] = 1
ix = self.spec.ma_lags
coef[ix] = self._params_split['ma_params']
return Polynomial(coef)
@ma_poly.setter
def ma_poly(self, value):
# Convert from the polynomial to the parameters, and set that way
if isinstance(value, Polynomial):
value = value.coef
value = validate_basic(value, self.spec.max_ma_order + 1,
title='MA polynomial')
if value[0] != 1:
raise ValueError('MA polynomial constant must be equal to 1.')
ma_params = []
for i in range(1, self.spec.max_ma_order + 1):
if i in self.spec.ma_lags:
ma_params.append(value[i])
elif value[i] != 0:
raise ValueError('MA polynomial includes non-zero values'
' for lags that are excluded in the'
' specification.')
self.ma_params = ma_params
@property
def seasonal_ar_params(self):
"""(array) Seasonal autoregressive parameters."""
return self._params_split['seasonal_ar_params']
@seasonal_ar_params.setter
def seasonal_ar_params(self, value):
if np.isscalar(value):
value = [value] * self.k_seasonal_ar_params
self._params_split['seasonal_ar_params'] = validate_basic(
value, self.k_seasonal_ar_params, title='seasonal AR coefficients')
self._params = None
@property
def seasonal_ar_poly(self):
"""(Polynomial) Seasonal autoregressive lag polynomial."""
# Need to expand the polynomial according to the season
s = self.spec.seasonal_periods
coef = [1]
if s > 0:
expanded = np.zeros(self.spec.max_seasonal_ar_order)
ix = np.array(self.spec.seasonal_ar_lags, dtype=int) - 1
expanded[ix] = -self._params_split['seasonal_ar_params']
coef = np.r_[1, np.pad(np.reshape(expanded, (-1, 1)),
[(0, 0), (s - 1, 0)], 'constant').flatten()]
return Polynomial(coef)
@seasonal_ar_poly.setter
def seasonal_ar_poly(self, value):
s = self.spec.seasonal_periods
# Note: assume that we are given coefficients from the full polynomial
# Convert from the polynomial to the parameters, and set that way
if isinstance(value, Polynomial):
value = value.coef
value = validate_basic(value, 1 + s * self.spec.max_seasonal_ar_order,
title='seasonal AR polynomial')
if value[0] != 1:
raise ValueError('Polynomial constant must be equal to 1.')
seasonal_ar_params = []
for i in range(1, self.spec.max_seasonal_ar_order + 1):
if i in self.spec.seasonal_ar_lags:
seasonal_ar_params.append(-value[s * i])
elif value[s * i] != 0:
raise ValueError('AR polynomial includes non-zero values'
' for lags that are excluded in the'
' specification.')
self.seasonal_ar_params = seasonal_ar_params
@property
def seasonal_ma_params(self):
"""(array) Seasonal moving average parameters."""
return self._params_split['seasonal_ma_params']
@seasonal_ma_params.setter
def seasonal_ma_params(self, value):
if np.isscalar(value):
value = [value] * self.k_seasonal_ma_params
self._params_split['seasonal_ma_params'] = validate_basic(
value, self.k_seasonal_ma_params, title='seasonal MA coefficients')
self._params = None
@property
def seasonal_ma_poly(self):
"""(Polynomial) Seasonal moving average lag polynomial."""
# Need to expand the polynomial according to the season
s = self.spec.seasonal_periods
coef = np.array([1])
if s > 0:
expanded = np.zeros(self.spec.max_seasonal_ma_order)
ix = np.array(self.spec.seasonal_ma_lags, dtype=int) - 1
expanded[ix] = self._params_split['seasonal_ma_params']
coef = np.r_[1, np.pad(np.reshape(expanded, (-1, 1)),
[(0, 0), (s - 1, 0)], 'constant').flatten()]
return Polynomial(coef)
@seasonal_ma_poly.setter
def seasonal_ma_poly(self, value):
s = self.spec.seasonal_periods
# Note: assume that we are given coefficients from the full polynomial
# Convert from the polynomial to the parameters, and set that way
if isinstance(value, Polynomial):
value = value.coef
value = validate_basic(value, 1 + s * self.spec.max_seasonal_ma_order,
title='seasonal MA polynomial',)
if value[0] != 1:
raise ValueError('Polynomial constant must be equal to 1.')
seasonal_ma_params = []
for i in range(1, self.spec.max_seasonal_ma_order + 1):
if i in self.spec.seasonal_ma_lags:
seasonal_ma_params.append(value[s * i])
elif value[s * i] != 0:
raise ValueError('MA polynomial includes non-zero values'
' for lags that are excluded in the'
' specification.')
self.seasonal_ma_params = seasonal_ma_params
@property
def sigma2(self):
"""(float) Innovation variance."""
return self._params_split['sigma2']
@sigma2.setter
def sigma2(self, params):
length = int(not self.spec.concentrate_scale)
self._params_split['sigma2'] = validate_basic(
params, length, title='sigma2').item()
self._params = None
@property
def reduced_ar_poly(self):
"""(Polynomial) Reduced form autoregressive lag polynomial."""
return self.ar_poly * self.seasonal_ar_poly
@property
def reduced_ma_poly(self):
"""(Polynomial) Reduced form moving average lag polynomial."""
return self.ma_poly * self.seasonal_ma_poly
@property
def params(self):
"""(array) Complete parameter vector."""
if self._params is None:
self._params = self.spec.join_params(**self._params_split)
return self._params.copy()
@params.setter
def params(self, value):
self._params_split = self.spec.split_params(value)
self._params = None
@property
def is_complete(self):
"""(bool) Are current parameter values all filled in (i.e. not NaN)."""
return not np.any(np.isnan(self.params))
@property
def is_valid(self):
"""(bool) Are current parameter values valid (e.g. variance > 0)."""
valid = True
try:
self.spec.validate_params(self.params)
except ValueError:
valid = False
return valid
@property
def is_stationary(self):
"""(bool) Is the reduced autoregressive lag poylnomial stationary."""
validate_basic(self.ar_params, self.k_ar_params,
title='AR coefficients')
validate_basic(self.seasonal_ar_params, self.k_seasonal_ar_params,
title='seasonal AR coefficients')
ar_stationary = True
seasonal_ar_stationary = True
if self.k_ar_params > 0:
ar_stationary = is_invertible(self.ar_poly.coef)
if self.k_seasonal_ar_params > 0:
seasonal_ar_stationary = is_invertible(self.seasonal_ar_poly.coef)
return ar_stationary and seasonal_ar_stationary
@property
def is_invertible(self):
"""(bool) Is the reduced moving average lag poylnomial invertible."""
# Short-circuit if there is no MA component
validate_basic(self.ma_params, self.k_ma_params,
title='MA coefficients')
validate_basic(self.seasonal_ma_params, self.k_seasonal_ma_params,
title='seasonal MA coefficients')
ma_stationary = True
seasonal_ma_stationary = True
if self.k_ma_params > 0:
ma_stationary = is_invertible(self.ma_poly.coef)
if self.k_seasonal_ma_params > 0:
seasonal_ma_stationary = is_invertible(self.seasonal_ma_poly.coef)
return ma_stationary and seasonal_ma_stationary
def to_dict(self):
"""
Return the parameters split by type into a dictionary.
Returns
-------
split_params : dict
Dictionary with keys 'exog_params', 'ar_params', 'ma_params',
'seasonal_ar_params', 'seasonal_ma_params', and (unless
`concentrate_scale=True`) 'sigma2'. Values are the parameters
associated with the key, based on the `params` argument.
"""
return self._params_split.copy()
def to_pandas(self):
"""
Return the parameters as a Pandas series.
Returns
-------
series : pd.Series
Pandas series with index set to the parameter names.
"""
return | pd.Series(self.params, index=self.param_names) | pandas.Series |
"""
Generates choropleth charts that are displayed in a web browser.
Takes data from simulation and displays a single language distribution across a
global map. Uses plotly's gapminder dataset as a base for world data.
For more information on choropleth charts see https://en.wikipedia.org/wiki/Choropleth_map
ldp.visualization.choropleth
./visualization/choropleth.py
author: <NAME>
created: 7-22-2019
update: 7-22-2019
"""
import plotly.express as px
import pandas as pd
def show_choropleth(sim_dataframe: pd.DataFrame, language: str) -> None:
"""
Shows a choropleth chart of the language distribution from sim_dataframe.
Args:
sim_dataframe (pandas.DataFrame): A DataFrame containing the output from
the ldp simulation.
language (str): The name of a language distribution to display. Must be
a column header in sim_dataframe.
Raises:
ValueError: if language is not a column header in sim_dataframe.
"""
if language not in sim_dataframe.columns:
raise ValueError(f"ValueError: Invalid language '{language}'.")
# merge plotly.gapminder dataset with our data on iso_alpha
df_map = sim_dataframe.rename(columns={'regions':'iso_alpha'}, inplace=False)
gapminder = px.data.gapminder().query("year==2007")
df_all = | pd.merge(gapminder, df_map, on="iso_alpha") | pandas.merge |
# pylint: disable=C0103,E0401
"""
Template for SNAP Dash apps.
"""
import copy, math, os
import dash
import luts
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.express as px
from dash.dependencies import Input, Output
from gui import layout, path_prefix
from plotly.subplots import make_subplots
# Read data blobs and other items used from env
data = pd.read_pickle("data/roses.pickle")
calms = pd.read_pickle("data/calms.pickle")
exceedance = pd.read_pickle("data/crosswind_exceedance.pickle")
# monthly_means = pd.read_csv("monthly_averages.csv")
# future_rose = pd.read_csv("future_roses.csv")
# percentiles = pd.read_csv("percentiles.csv", index_col=0)
# We set the requests_pathname_prefix to enable
# custom URLs.
# https://community.plot.ly/t/dash-error-loading-layout/8139/6
app = dash.Dash(__name__, requests_pathname_prefix=path_prefix)
# AWS Elastic Beanstalk looks for application by default,
# if this variable (application) isn't set you will get a WSGI error.
application = app.server
gtag_id = os.environ["GTAG_ID"]
app.index_string = f"""
<!DOCTYPE html>
<html>
<head>
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-3978613-12"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){{dataLayer.push(arguments);}}
gtag('js', new Date());
gtag('config', '{gtag_id}');
</script>
{{%metas%}}
<title>{{%title%}}</title>
<meta charset="utf-8"/>
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- Schema.org markup for Google+ -->
<meta itemprop="name" content="Alaska Community Wind Tool">
<meta itemprop="description" content="Explore historical wind data for Alaska communities">
<meta itemprop="image" content="http://windtool.accap.uaf.edu/assets/wind-rose.png">
<!-- Twitter Card data -->
<meta name="twitter:card" content="summary_large_image">
<meta name="twitter:site" content="@SNAPandACCAP">
<meta name="twitter:title" content="Alaska Community Wind Tool">
<meta name="twitter:description" content="Explore historical wind data for Alaska communities">
<meta name="twitter:creator" content="@SNAPandACCAP">
<!-- Twitter summary card with large image must be at least 280x150px -->
<meta name="twitter:image:src" content="http://windtool.accap.uaf.edu/assets/wind-rose.png">
<!-- Open Graph data -->
<meta property="og:title" content="Alaska Community Wind Tool" />
<meta property="og:type" content="website" />
<meta property="og:url" content="http://windtool.accap.uaf.edu" />
<meta property="og:image" content="http://windtool.accap.uaf.edu/assets/wind-rose.png" />
<meta property="og:description" content="Explore historical wind data for Alaska communities" />
<meta property="og:site_name" content="Alaska Community Wind Tool" />
<link rel="alternate" hreflang="en" href="http://windtool.accap.uaf.edu" />
<link rel="canonical" href="http://windtool.accap.uaf.edu"/>
{{%favicon%}}
{{%css%}}
</head>
<body>
{{%app_entry%}}
<footer>
{{%config%}}
{{%scripts%}}
{{%renderer%}}
</footer>
</body>
</html>
"""
app.title = "WRCC Alaska Winds"
app.layout = layout
@app.callback(Output("communities-dropdown", "value"), [Input("map", "clickData")])
def update_place_dropdown(selected_on_map):
""" If user clicks on the map, update the drop down. """
# Look up ID by name -- kind of backwards, but
# it's because we can't bundle much data into
# map click handles.
# TODO look at customdata property here
if selected_on_map is not None:
c = luts.communities[
luts.communities["place"] == selected_on_map["points"][0]["text"]
]
return c.index.tolist()[0]
# Return a default
return "PAFA"
@app.callback(Output("map", "figure"), [Input("communities-dropdown", "value")])
def update_selected_community_on_map(community):
""" Draw a second trace on the map with one community highlighted. """
return {
"data": [
luts.map_communities_trace,
go.Scattermapbox(
lat=[luts.communities.loc[community]["latitude"]],
lon=[luts.communities.loc[community]["longitude"]],
mode="markers",
marker={"size": 20, "color": "rgb(207, 38, 47)"},
line={"color": "rgb(0, 0, 0)", "width": 2},
text=luts.communities.loc[community]["place"],
hoverinfo="text",
),
],
"layout": luts.map_layout,
}
def get_rose_calm_sxs_annotations(titles, calm):
"""
Return a list of correctly-positioned %calm indicators
for the monthly wind rose charts.
Take the already-generated list of titles and use
that pixel geometry to position the %calm info.
"""
calm_annotations = copy.deepcopy(titles)
k = 0
for anno in calm_annotations:
anno["y"] = anno["y"] - 0.556
# anno["y"] = anno["y"] - 0.01
anno["font"] = {"color": "#000", "size": 10}
calm_text = str(int(round(calm.iloc[k]["percent"] * 100))) + "%"
if calm.iloc[k]["percent"] > 0.2:
# If there's enough room, add the "calm" text fragment
calm_text += " calm"
anno["text"] = calm_text
k += 1
return calm_annotations
def get_rose_traces(d, traces, showlegend=False):
"""
Get all traces for a wind rose, given the data chunk.
Month is used to tie the subplot to the formatting
chunks in the multiple-subplot graph.
"""
# Directly mutate the `traces` array.
for sr, sr_info in luts.speed_ranges.items():
dcr = d.loc[(d["speed_range"] == sr)]
props = dict(
r=dcr["frequency"].tolist(),
theta=pd.to_numeric(dcr["direction_class"]) * 10,
name=sr + " mph",
hovertemplate="%{r} %{fullData.name} winds from %{theta}<extra></extra>",
marker_color=sr_info["color"],
showlegend=showlegend,
legendgroup="legend",
)
traces.append(go.Barpolar(props))
# Compute the maximum extent of any particular
# petal on the wind rose.
max_petal = d.groupby(["direction_class"]).sum().max()
return max_petal
@app.callback(Output("exceedance_plot", "figure"), [Input("communities-dropdown", "value")])
def update_exceedance_plot(community):
"""Plot line chart of allowable crosswind threshold exceedance"""
df = exceedance.loc[exceedance["sid"] == community]
title = "Test Allowable crosswind component exceedance"
fig = px.line(df, x="direction", y="exceedance", color="threshold", title=title)
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)', "yaxis.gridcolor": "black"})
return fig
@app.callback(Output("rose", "figure"), [Input("communities-dropdown", "value")])
def update_rose(community):
""" Generate cumulative wind rose for selected community """
traces = []
# Subset for community & 0=year
# d = data.loc[(data["sid"] == community) & (data["month"] == 0)]
# month not used in these data, for now
d = data.loc[data["sid"] == community]
get_rose_traces(d, traces, True)
# Compute % calm, use this to modify the hole size
c = calms[calms["sid"] == community]
# c_mean = c.mean()
# c_mean = int(round(c_mean["percent"]))
calm = int(round(c["percent"].values[0]))
c_name = luts.communities.loc[community]["place"]
rose_layout = {
"title": dict(
text="Annual Wind Speed/Direction Distribution, 1980-2014, " + c_name,
font=dict(size=18),
),
"height": 700,
"font": dict(family="Open Sans", size=10),
"margin": {"l": 0, "r": 0, "b": 20, "t": 75},
"legend": {"orientation": "h", "x": 0, "y": 1},
"annotations": [
{
"x": 0.5,
"y": 0.5,
"showarrow": False,
"text": str(calm) + r"% calm",
"xref": "paper",
"yref": "paper",
}
],
"polar": {
"legend": {"orientation": "h"},
"angularaxis": {
"rotation": 90,
"direction": "clockwise",
"tickmode": "array",
"tickvals": [0, 45, 90, 135, 180, 225, 270, 315],
"ticks": "", # hide tick marks
"ticktext": ["N", "NE", "E", "SE", "S", "SW", "W", "NW"],
"tickfont": {"color": "#444"},
"showline": False, # no boundary circles
"color": "#888", # set most colors to #888
"gridcolor": "#efefef",
},
"radialaxis": {
"color": "#888",
"gridcolor": "#efefef",
"ticksuffix": "%",
"showticksuffix": "last",
"tickcolor": "rgba(0, 0, 0, 0)",
"tick0": 0,
"dtick": 3,
"ticklen": 10,
"showline": False, # hide the dark axis line
"tickfont": {"color": "#444"},
},
"hole": calm / 100,
},
}
return {"layout": rose_layout, "data": traces}
@app.callback(
Output("rose_sxs", "figure"), [Input("communities-dropdown", "value")]
)
def update_rose_sxs(community):
"""
Create side-by-side (sxs) plot of wind roses from different decades
"""
# t = top margin in % of figure.
subplot_spec = dict(type="polar", t=0.02)
fig = make_subplots(
rows=1,
cols=2,
horizontal_spacing=0.03,
#vertical_spacing=0.04,
specs=[[subplot_spec, subplot_spec]],
subplot_titles=["1980-1999", "2010-2019"],
)
max_axes = pd.DataFrame()
month = 1
c_data = data.loc[data["sid"] == community]
data_list = [c_data.loc[c_data["decade"] == decade] for decade in ["1990-1999", "2010-2019"]]
max_axes = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 10 00:05:49 2021
@author: <NAME>
"""
import requests
import json
import time
from datetime import date, timedelta
import itertools
from ftfy import fix_encoding
import unidecode
import pandas as pd
class admetricks_api:
"""
A class to generate requests to the Admetricks REST API and get a report.
The operation of the methods is subject to the correct input of the variables and
to the admetricks user you are using having access to the admetricks REST API.
To learn more about the API documentation, go to https://dev.admetricks.com/#introduccion
...
Attributes
----------
username : str
user used to login in Admetricks
password : str
<PASSWORD> to login in Admetricks
Methods
-------
reports_generator(country = None, ad_type = None, device = None, since_date = None):
Returns a dataframe with a full report with the information of the API
screenshots_data(country = None, site = None, since_date = None, until_date = None):
Returns a dataframe with the raw information of captured screenshots
"""
dictionary_countrys = {1:'chile',
2:'colombia',
3:'argentina',
4:'brasil',
5:'españa',
6:'peru',
7:'mexico',
8:'honduras',
9:'puerto rico',
10:'panama',
11:'uruguay',
12:'costa rica',
13:'guatemala',
14:'ecuador',
15:'venezuela',
16:'nicaragua',
17:'salvador',
18:'republica dominicana',
19:'paraguay'}
device = {1:'desktop', 2:'mobile'}
ad_type = {1:'display', 2:'video', 3:'text'}
current_date = date.today().isoformat()
days_before = (date.today()-timedelta(days=30)).isoformat()
combinations = list(itertools.product(list(device.values()),list(ad_type.values())))
def __init__(self, username = None, password = None):
"""
You provide the necessary data to authenticate within Admetricks.
Parameters
----------
username : str
username used to login in Admetricks
password : str
password used to login in Admetricks
"""
self.username = username
self.password = password
url = """https://clientela.admetricks.com/o/token/?username={username}&password={password}&client_id=IW8M80h7qgCaSz4hPm3gr3wJP89NiJTPyhkwPurT&client_secret=KnBW84uyHlxwlNrKOXyym6Ro1IT6IlYdhScdop63hHddCzJIxUwDG7VItNgEONb1U2ebEH6fBmkYgX9LrZD4uqFJlYscHYn9MLxOm2qVccNE2WGEuePpKA7t3jQ2CvMu&grant_type=password"""
response = requests.post(url.format(username = self.username, password = self.password))
res = json.loads(response.text)
self.token = res.get('access_token')
print('Your active token is {}'.format(self.token))
print(response)
def reports_generator(self, country = None, ad_type = None, device = None, since_date = None):
"""
A function that returns a dataframe with a full report with the information of the API.
Parameters
----------
country : str
name of your country.
ad_type : str
Type of ad you want to study. The options are: [all, display, video, text]
device : str
Type of device you want to study. The options are: [all, desktop, mobile]
since_date : str
From what date do you want to export data.
Returns
-------
DataFrame
"""
if isinstance(country, type(None)):
country_error = 'Define your country'
raise country_error
if isinstance(ad_type, type(None)):
ad_type = 'all'
if isinstance(device, type(None)):
device = 'all'
if isinstance(since_date, type(None)):
since_date = str(self.days_before)
country = country.lower()
country = unidecode.unidecode(country)
my_dataframe = pd.DataFrame()
header = {
'Authorization': 'Bearer '+ self.token,
'content-type': 'application/json'}
country_value = list(self.dictionary_countrys.keys())[list(self.dictionary_countrys.values()).index(country)]
if ad_type == 'all':
if device == 'all':
for devices, ad_types in self.combinations:
device_value = list(self.device.keys())[list(self.device.values()).index(devices)]
ad_type_value = list(self.ad_type.keys())[list(self.ad_type.values()).index(ad_types)]
params = (('day', since_date), ('country', str(country_value)), ('device', str(device_value)), ('ad_type', str(ad_type_value)),)
requested = requests.post(url = 'https://clientela.admetricks.com/market-report/data/v3/', headers = header, params = params)
data = json.loads(requested.text)
my_dataframe = pd.concat([my_dataframe, pd.DataFrame.from_dict(data['data'])])
time.sleep(0.5)
else:
device_value = list(self.device.keys())[list(self.device.values()).index(device)]
for value, names in self.ad_type.items():
params = (('day', since_date), ('country', str(country_value)), ('device', str(device_value)), ('ad_type', str(value)),)
requested = requests.post(url = 'https://clientela.admetricks.com/market-report/data/v3/', headers = header, params = params)
data = json.loads(requested.text)
my_dataframe = pd.concat([my_dataframe, pd.DataFrame.from_dict(data['data'])])
time.sleep(0.5)
else:
if device == 'all':
ad_type_value = list(self.ad_type.keys())[list(self.ad_type.values()).index(ad_type)]
for value, names in self.device.items():
params = (('day', since_date), ('country', str(country_value)), ('device', str(value)), ('ad_type', str(ad_type_value)),)
requested = requests.post(url = 'https://clientela.admetricks.com/market-report/data/v3/', headers = header, params = params)
data = json.loads(requested.text)
my_dataframe = pd.concat([my_dataframe, pd.DataFrame.from_dict(data['data'])])
time.sleep(0.5)
else:
device_value = list(self.device.keys())[list(self.device.values()).index(device)]
ad_type_value = list(self.ad_type.keys())[list(self.ad_type.values()).index(ad_type)]
params = (('day', since_date), ('country', str(country_value)), ('device', str(device_value)), ('ad_type', str(ad_type_value)),)
requested = requests.post(url = 'https://clientela.admetricks.com/market-report/data/v3/', headers = header, params = params)
data = json.loads(requested.text)
my_dataframe = pd.concat([my_dataframe, | pd.DataFrame.from_dict(data['data']) | pandas.DataFrame.from_dict |
import tkinter as tk
import os
import sys
import pandas as pd
import numpy as np
from PIL import Image, ImageTk
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import requests
from bs4 import BeautifulSoup
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
from my_functions import img_google
pd.set_option('mode.chained_assignment', None)
# TODO data browser
class timeline(tk.Tk):
def __init__(self, filename=os.path.join(os.path.dirname(sys.argv[0]), 'some_dates.csv')):
tk.Tk.__init__(self)
self.title("timeline")
self.fr_time, self.fr_cat, self.fr_scales, self.fr_img = self.config_frames()
self.colors = pd.Series(['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black'])
self.bind('<Return>', self.redraw)
# define scale values
self.scales_info = pd.DataFrame(columns=['var_name', 'xlabel', 'title', 'default_min', 'default_max', 'tot_min', 'tot_max', 'cat_order'])
self.scales_info.loc[len(self.scales_info)] = ['mya', 'million years ago', 'History of Life', -4600, 0, -4600, 0, ['supereon', 'eon', 'era', 'period', 'epoch', 'event']]
self.scales_info.loc[len(self.scales_info)] = ['year', 'year', 'Modern History', 1500, 2000, -3500, 2019, ['epoch', 'person', 'art', 'event', 'invention']]
self.scales_info.loc[len(self.scales_info)] = ['pre', 'kilo years ago', 'Prehistoric Time', -1000, 0, -3600, 0, ['epoch', 'event', 'invention', 'art', 'person']]
# selected scale
self.scale_type = tk.StringVar()
self.scale_type.set('pre')
self.myscale = self.get_myscale()
self.year_from_var = tk.IntVar()
self.year_to_var = tk.IntVar()
self.year_from_var.set(self.myscale['default_min'])
self.year_to_var.set(self.myscale['default_max'])
# load total data file
self.filename = filename
self.df_tot = self.load_data()
# make radiobuttons
for i, scale in self.scales_info.iterrows():
tk.Radiobutton(self.fr_scales, text=scale.title, variable=self.scale_type, value=scale.var_name, command=self.new_scale).pack()
# OK button
tk.Button(self.fr_time, text="OK", command=self.redraw).grid(row=2, column=0, sticky='NSEW')
# prepare img
self.label_value, self.img_label = self.init_img()
# initialize attributes
self.df_orig = self.get_df_orig()
self.df_cat = self.get_df_cat()
self.df = self.get_df()
self.new_scale()
def config_frames(self):
# frame configuration
self.geometry("%dx%d+0+0" % (self.winfo_screenwidth(), self.winfo_screenheight()))
tk.Grid.rowconfigure(self, 0, weight=1)
tk.Grid.rowconfigure(self, 1, weight=2)
tk.Grid.columnconfigure(self, 0, weight=1)
tk.Grid.columnconfigure(self, 1, weight=1)
tk.Grid.columnconfigure(self, 2, weight=1)
tk.Grid.columnconfigure(self, 3, weight=1)
fr_time = tk.Frame(self)
fr_time.grid(row=0, column=0, sticky="nsew")
fr_cat = tk.Frame(self)
fr_cat.grid(row=0, column=1, sticky="nsew")
fr_scales = tk.Frame(self)
fr_scales.grid(row=0, column=2, sticky="nsew")
fr_img = tk.Frame(self)
fr_img.grid(row=0, column=3, sticky="nsew")
tk.Grid.rowconfigure(fr_img, 0, weight=1)
tk.Grid.rowconfigure(fr_img, 1, weight=4)
return fr_time, fr_cat, fr_scales, fr_img
def get_myscale(self):
scale_type = self.scale_type.get()
myscale = self.scales_info[self.scales_info.var_name == scale_type].to_dict('r')[0]
return myscale
def draw_slider(self):
# TODO: why does this line have to be repeated?
self.year_from_var = tk.IntVar()
self.year_to_var = tk.IntVar()
self.year_from_var.set(self.myscale['default_min'])
self.year_to_var.set(self.myscale['default_max'])
#
scale_length = np.int(self.winfo_screenwidth() / 5)
# min scale
slid_min = tk.Scale(self.fr_time, length=scale_length, sliderlength=10, label='Time span:',
from_=self.myscale['tot_min'], to=self.myscale['tot_max'], orient=tk.HORIZONTAL, variable=self.year_from_var)
slid_min.grid(row=0, column=0, sticky='NSEW', padx=4)
# max scale
slid_max = tk.Scale(self.fr_time, length=scale_length, sliderlength=10, tickinterval=1000, resolution=1,
from_=self.myscale['tot_min'], to=self.myscale['tot_max'], orient=tk.HORIZONTAL, variable=self.year_to_var)
slid_max.grid(row=1, column=0, sticky='NSEW', padx=4)
def get_df_cat(self):
# create dataframe to store information about categories
df_cat = pd.DataFrame({'category': self.df_orig['category'].unique()})
df_cat['category'] = | pd.Categorical(df_cat['category'], self.myscale['cat_order']) | pandas.Categorical |
# all domains
# merge/split common boundary x = max(3bin,0.1 TAD Length)
# region < agrs.remote
# less complex
# zoom
# to filter the strength first
import pandas as pd
import numpy as np
#from tqdm import tqdm
import argparse
import os
# import warnings
# warnings.filterwarnings('ignore')
# the arguments from command line
parser = argparse.ArgumentParser(description='python scriptname <-d> <-t> [options]')
parser.add_argument('-d','--diff', type=str, default = None,help="path/ the text of diffdoamin's outcome")
parser.add_argument('-t','--tad',type=str, default=None,help='path/ the other tadlist')
parser.add_argument('-o','--out',type=str,default=None,help='the output path')
parser.add_argument('-l','--limit',type=int,default=40000,help='the range(length of bases) to judge the common boundary')
parser.add_argument('-k','--kpercent',type=int,default=10,help='the common boundareis are within max(l*bin,k% TAD length)')
parser.add_argument('-r','--remote',type=int,default=1000000,help='the limitation of the biggest region')
parser.add_argument('-s1','--skip1',type=int,default=25,help='to skip the first s1 rows in "--diff" file; if you input 25, the first 25 rows [0,24] will be skipped.')
parser.add_argument('-s2','--skip2',type=int,default=None,help='to skip the first s2 rows in the other tadlist file')
parser.add_argument('--sep1',type=str,default='\t',help="the seperator of the diffdoamin's outcome (like ',')")
parser.add_argument('--sep2',type=str,default='\t',help="the seperator of the other tadlist")
args = parser.parse_args()
# load the files
data = pd.read_table(args.diff,skiprows=skip1,sep=args.sep1)
tad = pd.read_table(args.tad,skiprows=skip2,sep=args.sep2,header=None)
#preprocessing
cols = data.columns
data.rename(columns={cols[0]:'chr',cols[1]:'start',cols[2]:'end'},inplace=True)
data_diff = data.loc[data['adj_pvalue']<0.05,['chr','start','end']]
data_diff['significant'] = 1
data_diff.reset_index(inplace=True,drop=True)
tad = tad.iloc[:,0:3]
tad.columns = ['chr','start','end']
tad.sort_values(by=['chr','start','end'],inplace=True)
tad.reset_index(inplace=True,drop = True)
tad['range'] = list(map(lambda a,b:(a,b) , tad.start,tad.end))
# preparation
chrs = list(map(str,list(range(1,23))))+['X']
colnames = ['chr','start','end','range','type','origin','subtype','significant']
tad_ = data_main = loss = single = merge = split = multi = pd.DataFrame(columns=colnames)
tad_ = pd.concat([tad_,tad],axis=0)
tad = tad_
data_main = pd.concat([data_main,data.iloc[:,0:3]],axis=0)
data_main['significant'] = 0
data_main = pd.concat([data_main,data_diff],axis=0)
data_main.drop_duplicates(subset=['chr','start','end'],keep='last',inplace=True)
data_main['range'] = list(map(lambda a,b:(a,b) , data_main.start,data_main.end))
data_main['origin'] = 'diffdomain'
data_main.sort_values(by=['chr','start','end'],inplace=True)
data_main.reset_index(inplace=True,drop=True)
def identical(boundary1,boundary2):
# to judge the "common boundary"
if int(boundary1) <= int(boundary2)+limit and int(boundary1) >= int(boundary2)-limit:
return True
else:
return False
def cross(main,vise):
# main is the protagnist tad
# to find the tads related to main in vise
note=pd.DataFrame(columns=colnames)
for i in range(vise.shape[0]):
if (int(main['end'])-limit > int(vise.loc[i,'start']) and int(main['start'])+limit < int(vise.loc[i,'end']) ):
note=pd.concat([note,pd.DataFrame(vise.loc[i,:].values.reshape(1,-1),columns=colnames)],axis=0)
return note
def n_of_region(outcome):
# to count the number of regions in the dataframe
n_region = 0
if len(outcome) != 0 :
n_region = 1
for i in range(2,len(outcome)):
if outcome['origin'].values[i]=='diffdomain' and outcome['origin'].values[i-1]=='the other tadlist':
n_region = n_region+1
return n_region
def n_diffdomain(outcome):
n_diff = outcome.loc[outcome['origin']=='diffdomain',:].shape[0]
return n_diff
# the 4th virsion+ bin
# try:
for c in chrs:
temp = data_main.loc[data_main['chr']==c,:].copy()
tadlist = tad.loc[tad['chr']==c,:].copy()
tadlist['origin'] = 'the other tadlist'
temp.reset_index(inplace=True,drop=True)
tadlist.reset_index(inplace=True,drop=True)
temp = temp[colnames]
tadlist = tadlist[colnames]
temp['start'] = temp['start'].astype(int)
temp['end'] = temp['end'].astype(int)
tadlist['start'] = tadlist['start'].astype(int)
tadlist['end'] = tadlist['end'].astype(int)
# filter the strength-change diffdomains and other non-significantly differentail tads with common boudaries in vise tadlist
tad_index = []
cross_index = []
for i in range(temp.shape[0]):
# the i th TADs in the result of DiffDomain
# to filter the TADs with common boundaries in different conditions
# initialize the variables
note_tad = note_cross = pd.DataFrame(columns=colnames)
# set the "limit" for judging the common boundaries
limit = max(args.limit,args.kpercent*0.01*(temp['end'][i]-temp['start'][i]))
note_tad = pd.concat([note_tad,pd.DataFrame(temp.loc[i,:].values.reshape(1,-1),columns=colnames)],axis=0)
for k in range(tadlist.shape[0]):
if (identical(temp.loc[i,'start'],tadlist.loc[k,'start'])) and (identical(temp.loc[i,'end'],tadlist.loc[k,'end'])) :
note_cross = pd.concat([note_cross,pd.DataFrame(tadlist.loc[k,:].values.reshape(1,-1),columns=colnames)],
axis=0,ignore_index = True)
cross_index.append(k)
tad_index.append(i)
n_cross = note_cross.shape[0]
if n_cross !=0 :
# in case that there are TADs in temp having common boundaries but not in tadlist
for j in range(i+1,temp.shape[0]):
# to find the TADs (in the result of DiffDomain) located on the same boundaries with the i th TADs
if (identical(temp.loc[i,'start'],temp.loc[j,'start'])) and (identical(temp.loc[i,'end'],temp.loc[j,'end'])):
note_tad = pd.concat([note_tad,pd.DataFrame(temp.loc[j,:].values.reshape(1,-1),columns=colnames)],
axis=0,ignore_index = True)
tad_index.append(i)
tad_index.append(j)
note_tad.drop_duplicates(subset=['chr','start','end'],inplace=True)
note_cross.drop_duplicates(subset=['chr','start','end'],inplace=True)
n_tad = note_tad.shape[0]
if n_tad ==1 and n_cross ==1 :
note_tad['type'] = 'single'
note_tad['subtype'] = 'strength'
single = pd.concat([single,note_tad,note_cross],axis=0,
ignore_index = True)
elif n_tad == 1 and n_cross >=2 :
note_tad['type'] = 'split'
split = pd.concat([split,note_tad,note_cross],axis=0,
ignore_index = True)
elif n_tad >= 2 and n_cross ==1 :
note_tad['type'] = 'merge'
merge = pd.concat([merge,note_tad,note_cross],axis=0,
ignore_index = True)
elif n_tad >= 2 and n_cross >= 2 :
if n_tad == n_cross :
note_tad['type'] = 'single'
note_tad['subtype'] = 'strength'
single = pd.concat([single,note_tad,note_cross],axis=0,
ignore_index = True)
else:
note_tad['type'] = 'complex'
multi = pd.concat([multi,note_tad,note_cross],axis=0,
ignore_index = True)
temp.drop(tad_index,inplace=True)
temp.reset_index(drop=True,inplace=True)
tadlist.drop(cross_index,inplace = True)
tadlist.reset_index(drop=True,inplace=True)
# temp_sig = temp.loc[temp['significant']==1,:].copy()
# temp_sig.reset_index(drop = True,inplace=True)
for i in range(temp.shape[0]):
# to adjust the longest distance between "common boundaries"
# to find the related TADs without common boundaries in different conditions
limit = max(args.limit,(temp['end'][i]-temp['start'][i])*args.kpercent*0.01)
note_cross = pd.DataFrame(columns=colnames)
note_tad = pd.DataFrame(columns=colnames)
# to find the tads in tadlist related to the significantly differential tad
note_cross = pd.concat([note_cross,cross(temp.iloc[i,:],tadlist)],axis=0,
ignore_index = True)
note_tad = pd.concat([note_tad,pd.DataFrame(temp.iloc[i,:].values.reshape(1,-1),columns=colnames)],
axis=0,ignore_index = True)
n_cross = note_cross.shape[0]
if n_cross == 0:
# the significantly differential tad grew out of nothing
note_tad['type'] = 'loss'
loss = pd.concat([loss,note_tad],axis=0
,ignore_index = True)
elif n_cross >=1:
flag = 1
note_tad['start'] = note_tad['start'].astype(int)
note_tad['end'] = note_tad['end'].astype(int)
note_cross['start'] = note_cross['start'].astype(int)
note_cross['end'] = note_cross['end'].astype(int)
while (flag == 1) and (max(note_tad['end'])-min(note_tad['start']) <= int(args.remote)):
for p in range(note_cross.shape[0]):
# to find TADs related to the tads found in vise tadlist
note_tad = pd.concat([note_tad,cross(note_cross.iloc[p,:],temp)],axis=0,ignore_index = True)
for q in range(note_tad.shape[0]):
# to find TADs in the tadlist related to the TADs in the result of DiffDomain
note_cross = pd.concat([note_cross,cross(note_tad.iloc[q,:],tadlist)],axis=0,ignore_index = True)
first_tad = note_tad.loc[note_tad.start == min(note_tad.start),:]
last_tad = note_tad.loc[note_tad.end == max(note_tad.end),:]
first_cross = note_cross.loc[note_cross.start == min(note_cross.start),:]
last_cross = note_cross.loc[note_cross.end == max(note_cross.end),:]
thres1 = pd.concat([cross(first_tad.iloc[0,:],tadlist),cross(last_tad.iloc[0,:],tadlist)],axis=0)
thres2 = pd.concat([cross(first_cross.iloc[0,:],temp),cross(last_cross.iloc[0,:],temp)],axis=0)
if (thres1['range'].isin(note_cross['range'])).all() and thres2['range'].isin(note_tad['range']).all():
flag = 2
note_tad.drop_duplicates(subset=['chr','start','end'],inplace=True)
note_cross.drop_duplicates(subset=['chr','start','end'],inplace=True)
note_tad.reset_index(inplace=True,drop=True)
note_cross.reset_index(inplace=True,drop=True)
n_tad = note_tad.shape[0]
n_cross = note_cross.shape[0]
if n_tad == 1 and n_cross == 1:
note_tad['type'] = 'single'
note_tad['subtype'] = 'zoom'
single = | pd.concat([single,note_tad,note_cross],axis=0,ignore_index = True) | pandas.concat |
# pip install git+https://github.com/alberanid/imdbpy
# pip install imdbpy
from imdb import IMDb, IMDbDataAccessError
import pandas as pd
import time
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
import ast
from collections import defaultdict
import multiprocessing
dct_no_entries = defaultdict(int)
import numpy as np
import itertools
import os
from collections import Counter
from time import sleep
import math
import json
import janitor
import utils.plot_utils as utils
# manager = multiprocessing.Manager()
# shared_list = manager.list()
def benchmark_string_comparison():
import time
dct_foo = {'alpha':3,
'beta':1,
'gamma':2}
a = id('<NAME>')
b= id('<NAME>')
avg_a = avg_b= avg_c = avg_d =avg_e= avg_f=0
for i in range(0,10000):
start = time.time()
com = dct_foo['alpha']==dct_foo['beta']
avg_a +=start - time.time()
start = time.time()
com = 3==1
avg_b += start - time.time()
start = time.time()
com = 'alpha' == 'beta'
avg_c += start - time.time()
start = time.time()
com = '<NAME>' == '<NAME>'
avg_d += start - time.time()
start = time.time()
com = id('<NAME>') == id('<NAME>')
avg_e += start - time.time()
start = time.time()
com = a == b
avg_f += start - time.time()
print(i)
print(id('foo'))
avg_a = (avg_a / i) *1000
avg_b = (avg_b/i) * 1000
avg_c = (avg_c/i) * 1000
avg_d = (avg_d/i) * 1000
avg_e = (avg_e / i) * 1000
print(' Avg_a:{} \n Avg_b:{} \n Avg_c:{} \n Avg_d:{} \n Avg_e:{} \n Avg_f:{}'.format(avg_a,avg_b,avg_c,avg_d, avg_e, avg_f ))
# benchmark_string_comparison()
#%%
import pandas as pd
# df = df_meta.value_counts()
# print(df.head())
def save_dict_as_json(dct, name):
with open('../data/generated/' + name, 'w') as file:
json.dump(dct, file, indent=4, sort_keys=True)
def load_json_as_dict(name):
with open('../data/generated/' + name, 'r') as file:
id2names = json.loads(file)
return id2names
def load_dataset(small_dataset):
if (small_dataset):
print("Load small dataset")
#%%
df_movies = pd.read_csv("../data/movielens/small/links.csv")
else:
print("Load large dataset")
df_movies = pd.read_csv("../data/movielens/large/links.csv")
return df_movies
def fetch_example():
# create an instance of the IMDb class
ia = IMDb()
# get a movie
movie = ia.get_movie('0133093')
print(ia.get_movie_keywords('0133093'))
print('movie \n{}'.format(movie))
# print the names of the directors of the movie
print('Directors:')
for director in movie['directors']:
print(director['name'])
# print the genres of the movie
print('Genres:')
for genre in movie['genres']:
print(genre)
# search for a person name
people = ia.search_person('<NAME>')
for person in people:
print(person.personID, person['name'])
def beautify_names(dct_data, key):
# clean actors:
# start_time = time.time()
ls_names = []
try:
for actor in dct_data[key]:
if(bool(actor)):
ls_names.append(actor['name'])
except KeyError:
dct_no_entries[key]+=1
# print()No entries for key:
# print("--- %s seconds ---" % (time.time() - start_time))
# total_time_one +=time.time() - start_time
return ls_names
def remove_keys(dict, keys):
if(keys == None):
keys = ['certificates', 'cover url', 'thanks',
'special effects companies', 'transportation department',
'make up department', 'special effects', 'stunts', 'costume departmen',
'location management', 'editorial department', 'casting directors', 'art directors',
'production managers', 'art department', 'sound department',
'visual effects', 'camera department', 'costume designers'
'casting department', 'miscellaneous', 'akas', 'production companies', 'distributors',
'other companies', 'synopsis', 'cinematographers', 'production designers',
'custom designers', 'Opening Weekend United Kingdom', 'Opening Weekend United States']
for key in keys:
dict.pop(key, None)
return dict
def fetch_movie(id, imdb):
# TODO Actually it should be checked whether this is single process or not, bc the IMDB Peer error occurs only w/ multiprocessing
movie = imdb.get_movie(id)
# TODO Optional: select metadata
dct_data = movie.data
# to be cleaned:
keys_to_beautify = ['cast', 'directors', 'writers', 'producers', 'composers', 'editors',
'animation department', 'casting department', 'music department', 'set decorators',
'script department', 'assistant directors', 'writer', 'director', 'costume designers']
for key in keys_to_beautify:
dct_data[key] = beautify_names(dct_data, key)
# unwrap box office:
try:
dct_data.update(dct_data['box office'])
del dct_data['box office']
except KeyError:
pass
# print('Unwrap: key error for movieId:{} '.format(movie.movieID))# dct_data['title']
dct_data = remove_keys(dct_data, None)
return dct_data
def fetch_by_imdb_ids(ls_tpl_ids):
imdb = IMDb()
ls_metadata =[]
import random
# cnt_connection_reset=0
try:
# Example:
# (103,1) => entire movie + metadata is missing
# (103,0) => only metadata is missing
for tpl_id_missing in tqdm(ls_tpl_ids, total = len(ls_tpl_ids)): # loop through ls_ids
dct_data={}
id=tpl_id_missing[0]
is_movie_missing = tpl_id_missing[1]
tt_id = imdb_id_2_full_Id(id)
sleep_t = random.randint(2,7)
sleep(sleep_t) # Time in seconds
# if(crawl_from_scratch[0][0]):
dct_data['imdbId'] = id
if(is_movie_missing):
dct_data = fetch_movie(id, imdb)
#Fetch stars of the movie with bs4
ls_stars = fetch_stars(tt_id)
dct_data['stars'] =ls_stars
#add dict to the list of all metadata
ls_metadata.append(dct_data)
except Exception:
print('Exception for id:{}'.format(id))
# cnt_connection_reset+=1
return ls_metadata, dct_no_entries
#extracts baed on column_name a nested list of the attribute, e.g. cast and creates
# a second list with the respective ids that are looked up in actor2id.
# you can extract more columns by adding them to column_name
def ids2names(df_movies, actor2id, column_name):
dct_ls_ids = defaultdict(list)
dct_ls_columns = defaultdict(list)
for idx, row in tqdm(df_movies.iterrows(), total=df_movies.shape[0]):
for column in column_name: # column_name: ['cast','stars']
if (type(row[column]) == list):
ls_names = row[column]
else:
ls_names = ast.literal_eval(
row[column]) # literal_eval casts the list which is encoded as a string to a list
# ls_names = row[column]
dct_ls_columns[column] = ls_names
# dct_ls_columns[column]= dct_ls_columns[column].append(ls_names)
# if(type(row['cast'])==list):
# casts = row['cast']
# else:
# casts = ast.literal_eval(row['cast']) #literal_eval casts the list which is encoded as a string to a list
# if(type(row['stars'])==list):
# stars = row['stars']
# else:
# stars = ast.literal_eval(row['stars'])
for key, ls_names in dct_ls_columns.items():
dct_ls_ids[key].append([actor2id[name] for name in dct_ls_columns[key]])
# ls_ls_cast_ids.append([actor2id[name] for name in casts])
# ls_ls_stars_ids.append([actor2id[name] for name in stars])
return dct_ls_columns, dct_ls_ids
def names2ids(df, column_name):
print('--- Transform names to ids and add an extra column for it ---')
# df_movies = pd.read_csv("../data/movielens/small/df_movies.csv")
df_movies = df
actor2id = defaultdict(lambda: 1+len(actor2id))
# [ls_casts[0].append(ls) for ls in ls_casts]
ls_names = []
#Add all names to one single list
print('... Collect names:')
for idx, row in tqdm(df_movies.iterrows(), total=df_movies.shape[0]):
for column in column_name:
if(type(row[column])==list):
ls_names.extend(row[column])
else:
ls_names.extend(ast.literal_eval(row[column])) #literal_eval casts the list which is encoded as a string to a list
# ls_elem = ls_elem.replace("[",'').replace("'",'').split(sep=',')
c = Counter(ls_names)
dct_bar = dict(c)
for elem in list(ls_names):
actor2id[elem] #Smart because, lambda has everytime a new element was added, a new default value
# actor2id[elem] = actor2id[elem] + 1 #count the occurence of an actor
# if (actor2id[elem] == 0): #assign an unique id to an actor/name
# actor2id[elem] = len(actor2id)
print(actor2id)
id2actor = {value: key for key, value in actor2id.items()}
save_dict_as_json(actor2id, 'names2ids.json')
save_dict_as_json(id2actor, 'ids2names.json')
# print(id2actor[2])
print("... Assign Ids to names:")
dct_ls_columns, dct_ls_ids = ids2names(df_movies,actor2id,column_name)
# lists look like this:
# dct_ls_columns = {'cast':['wesley snipes','brad pitt'...]
# dct_ls_ids ={'cast':[22,33,...]}
for key, ls_names in dct_ls_columns.items():
df_movies[key+"_id"] = dct_ls_ids[key]
return df_movies
def fetch_stars(id):
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
ls_stars = []
try:
url = "https://www.imdb.com/title/{}/?ref_=rvi_tt".format(id)
req = requests.get(url, headers)
soup = BeautifulSoup(req.content, 'html.parser')
h4_stars = soup.find("h4", text='Stars:')
div_tag = h4_stars.parent
next_a_tag = div_tag.findNext('a')
while (next_a_tag.name != 'span'):
if (next_a_tag.name == 'a'):
ls_stars.append(str(next_a_tag.contents[0]))#str() casts from NavigabelString to string
next_a_tag = next_a_tag.next_sibling
# class 'bs4.element.Tag'>
# next_a_tag.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling['class'][0] == 'ghost'
# print(ls_stars)
except AttributeError:
print('AttributeError (most likely no stars are available), movieId:{}'.format(id))
finally:
return ls_stars
# TODO Unfinished: This is not done yet
def enhance_by_stars(df):
pass
# tqdm.pandas(desc="my bar!")
# df['stars'] = df['movieID'].apply(lambda id: fetch_stars(id))
# return df
def print_exception_statistic(dct_no_entries, len_crawled_ids):
print('[--------- Exception Statistics ---------]')
# print('No. of ConnectionResetError: {}'.format(cnt_reset))
print('Joined No-Keys-Exception for the following keys:')
for key, value in dct_no_entries.items():
print("\tKey: {}, count:{}, relative: {}".format(key, value, value /len_crawled_ids))
print('[----------------------------------------]')
def worker(ids):#ids, crawl_from_scratch
# ids = args[0]
# ls_missing_imdb_ids = args[1]
# global shared_list https://stackoverflow.com/questions/40630428/share-a-list-between-different-processes-in-python
metadata, dct_no_entries = fetch_by_imdb_ids(ids)
# shared_list.extend(metadata)
# print('worker done')
return metadata, dct_no_entries
def crawl_metadata(ls_imdb_ids,multi_processing, no_processes, develop_size):
print('Fetching metadata of {} movies'.format(len(ls_imdb_ids)))
if(develop_size>0):
ls_imdb_ids = ls_imdb_ids[:develop_size]
if (multi_processing):
print('Start multiprocessing...')
start_time = time.time() # measure time
no_processes = no_processes
ls_ls_metadata = []
ls_dct_exceptions = []
cnt_reset = 0
len_dataset = len(ls_imdb_ids)
ls_splitted = np.array_split(np.array(ls_imdb_ids), no_processes)
# ls_missing_imdb_ids = np.array_split(np.array(ls_missing_imdb_ids), no_processes)
pool = multiprocessing.Pool(processes=no_processes)
# m = multiprocessing.Manager()
# q = m.Queue()
# Pool.map returns list of pairs: https://stackoverflow.com/questions/39303117/valueerror-too-many-values-to-unpack-multiprocessing-pool
for ls_metadata, dct_no_entries in pool.map(worker,ls_splitted): # ls_ls_metadata=pool.map(worker, ls_splitted):
# append both objects to a separate list
ls_ls_metadata.append(ls_metadata)
ls_dct_exceptions.append(dct_no_entries)
print("--- %s seconds ---" % (time.time() - start_time))
merged_res = itertools.chain(*ls_ls_metadata) # unpack the list to merge n lists
ls_metadata = list(merged_res)
df_exceptions = pd.DataFrame(ls_dct_exceptions).sum() # sum over all rows
print_exception_statistic(df_exceptions.to_dict(),len(ls_imdb_ids))
print("--- %s seconds ---" % (time.time() - start_time))
else:
start_time = time.time()
ls_metadata, dct_no_entries = fetch_by_imdb_ids(ls_imdb_ids)
print_exception_statistic(dct_no_entries)
print("--- %s seconds ---" % (time.time() - start_time))
df_meta = pd.DataFrame(ls_metadata)
print('Shape of crawled dataset:{}'.format(df_meta.shape[0]))
return df_meta
# tmp_list = []
# for element in df_meta[column]:
#
# ls_ls_casted = [eval(str_elem) for str_elem in df_meta[element].values]
# itertools.chain(*ls_ls_casted)
# if(type(element)==str):
# tmp_list.extend(eval(element))
# # tmp_list.value
# dct_rel_freq[element] =
# df_meta[column] = tmp_list
# df = df_meta['cast'][0].value_counts()
# print(df)
def imdb_id_2_full_Id(imdb_id):
# str_imdb_id = row['imdbId'].astype(str)
# if(len(str_imdb_id) >6):
if (imdb_id >= 1000000):
prefix = 'tt'
elif(imdb_id >= 100000):
prefix = 'tt0'
elif(imdb_id >= 10000):
prefix = 'tt00'
else:
prefix = 'tt000'
return prefix + str(imdb_id)
def join_kaggle_with_links():
df_movies_large = pd.read_csv('../data/kaggle/df_imdb_kaggle.csv')
df_links = load_dataset(small_dataset=True)
# df_links['imdb_title_id'] = 'tt0'+df_links['imdbId'].astype(str) if
for idx in range(df_links.shape[0]): # iterrows does not preserve dtypes
full_id = imdb_id_2_full_Id(df_links.loc[idx, 'imdbId'])
df_links.loc[idx, 'imdb_title_id'] = full_id
df_links_joined_one = df_links.set_index('imdb_title_id').join(df_movies_large.set_index('imdb_title_id'),
on='imdb_title_id', how='left')
df_links_joined_one.to_csv('../data/generated/df_joined_partly.csv', index_label='imdb_title_id')
def main():
# join_kaggle_with_links()
df_links_joined_one = | pd.read_csv('../data/generated/df_joined_partly.csv') | pandas.read_csv |
"""
Note, this contains both the older V1 processing code
as well as the V2 code. The V1 code isn't tested to
work for a full processing cycle, and may need
some adjustments.
"""
# pylint: disable=all
import pandas as pd
import dask.dataframe as dd
import os
from datetime import datetime
from luts import speed_ranges
directory = "./data/station"
cols = ["sid", "direction", "speed", "month"]
def preprocess_stations():
"""
This producess two (large) files which combine
all the individual station files into one tidy table.
stations.csv is ready to be processed into the wind roses.
Values with direction=0 or speed=0 are dropped to avoid
north bias.
mean_stations.csv includes direction=0 and speed=0.
For both, any rows with N/A values are dropped.
"""
if preprocess:
print("*** Preprocessing station data for wind roses & averages... ***")
print("Looking for station CSV files in ", directory)
data = pd.DataFrame(columns=cols)
mean_data = pd.DataFrame(columns=cols)
for filename in os.listdir(directory):
d = pd.read_csv(os.path.join(directory, filename))
# Throw away columns we won't use, and null values
d = d.drop(columns=["sped", "t_actual"])
d = d.dropna()
# Copy for slightly different treatment of
# station data for averages
m = d.copy(deep=True)
# Toss rows where direction is 0, because
# this represents unclear direction. Otherwise,
# the data has a "north bias." Also drop
# values where the speed is 0 (calm)
# for the wind roses.
d = d[d["drct"] != 0]
d = d[d["sped_adj"] != 0]
# Pull month out of t_round column.
d = d.assign(month=pd.to_numeric(d["t_round"].str.slice(5, 7)))
m = m.assign(month=pd.to_numeric(m["t_round"].str.slice(5, 7)))
m = m.assign(year=pd.to_numeric(m["t_round"].str.slice(0, 4)))
d = d.drop(columns=["t_round"])
m = m.drop(columns=["t_round"])
# Rename remaining columns
d.columns = cols
m.columns = ["sid", "direction", "speed", "month", "year"]
data = data.append(d)
mean_data = mean_data.append(m)
data.to_csv("stations.csv")
mean_data.to_csv("mean_stations.csv")
# Needs Dask DF not Pandas.
def averages_by_month(mean_data):
"""
Compute averages for each month by year.
"""
print("*** Precomputing monthly averages by year... ***")
d = mean_data.groupby(["sid", "year", "month"]).mean().compute()
# Drop indices and get table in good shape for writing
d = d.reset_index()
d = d.drop(["direction"], axis=1)
# Weird code -- drop the prior index, which is unnamed
d = d.loc[:, ~d.columns.str.contains("^Unnamed")]
d = d.astype({"year": "int16", "month": "int16"})
d = d.assign(speed=round(d["speed"], 1))
d.to_csv("monthly_averages.csv")
# Requires Dask DF, not Pandas
def process_calm(mean_data):
"""
For each station/year/month, generate a count
of # of calm measurements.
"""
print("*** Generating calm counts... ***")
# Create temporary structure which holds
# total wind counts and counts where calm to compute
# % of calm measurements.
t = mean_data.groupby(["sid", "month"]).size().reset_index().compute()
calms = t
# Only keep rows where speed == 0
d = mean_data[(mean_data["speed"] == 0)]
d = d.groupby(["sid", "month"]).size().reset_index().compute()
calms = calms.assign(calm=d[[0]])
calms.columns = ["sid", "month", "total", "calm"]
calms = calms.assign(percent=round(calms["calm"] / calms["total"], 3) * 100)
calms.to_csv("calms.csv")
def chunk_to_rose(sgroup, station_name=None):
"""
Builds data suitable for Plotly's wind roses from
a subset of data.
Given a subset of data, group by direction and speed.
Return accumulator of whatever the results of the
incoming chunk are.
"""
# Bin into 36 categories.
bins = list(range(5, 356, 10))
bin_names = list(range(1, 36))
# Accumulator dataframe.
proc_cols = ["sid", "direction_class", "speed_range", "count"]
accumulator = pd.DataFrame(columns=proc_cols)
# Assign directions to bins.
# We'll use the exceptional 'NaN' class to represent
# 355º - 5º, which would otherwise be annoying.
# Assign 0 to that direction class.
ds = pd.cut(sgroup["direction"], bins, labels=bin_names)
sgroup = sgroup.assign(direction_class=ds.cat.add_categories("0").fillna("0"))
# First compute yearly data.
# For each direction class...
directions = sgroup.groupby(["direction_class"])
for direction, d_group in directions:
# For each wind speed range bucket...
for bucket, bucket_info in speed_ranges.items():
d = d_group.loc[
(
sgroup["speed"].between(
bucket_info["range"][0], bucket_info["range"][1], inclusive=True
)
== True
)
]
count = len(d.index)
full_count = len(sgroup.index)
frequency = 0
if full_count > 0:
frequency = round(((count / full_count) * 100), 2)
accumulator = accumulator.append(
{
"sid": station_name,
"direction_class": direction,
"speed_range": bucket,
"count": count,
"frequency": frequency,
},
ignore_index=True,
)
return accumulator
def process_roses(data):
"""
For each station we need one trace for each direction.
Each direction has a data series containing the frequency
of winds within a certain range.
Columns:
sid - stationid
direction_class - number between 0 and 35. 0 represents
directions between 360-005º (north), and so forth by 10 degree
intervals.
speed_range - text fragment from luts.py for the speed class
month - 0 for year, 1-12 for month
"""
print("*** Preprocessing wind rose frequency counts... ***")
proc_cols = ["sid", "direction_class", "speed_range", "count", "month"]
rose_data = pd.DataFrame(columns=proc_cols)
groups = data.groupby(["sid"])
for station_name, station in groups:
# Yearly data.
t = chunk_to_rose(station)
t = t.assign(month=0) # year
rose_data = rose_data.append(t)
# Monthly data.
station_grouped_by_month = station.groupby(station["month"])
# TODO -- can this be rewritten to avoid looping
# over the groupby? If so, it'd be much much faster.
for month, station_by_month in station_grouped_by_month:
acc = pd.DataFrame(columns=proc_cols)
t = chunk_to_rose(station_by_month, acc)
t = t.assign(month=month)
rose_data = rose_data.append(t)
rose_data.to_csv("roses.csv")
def process_future_roses():
"""
Process wind roses for future data.
We create the data with decadal groups this way for display:
0 = ERA, 1980-2009
1 = CCSM4/CM3, 2025-2054
2 = CCSM4/CM3, 2070-2099
"""
places = pd.read_csv("./places.csv")
cols = [
"sid",
"gcm",
"decadal_group",
"direction_class",
"speed_range",
"count",
"frequency",
]
future_roses = pd.DataFrame(columns=cols)
# Define date ranges to be consistent
era_end = 2009
start_mid_century = 2025
end_mid_century = 2054
start_late_century = 2070
end_late_century = 2099
for index, place in places.iterrows():
print("[future roses] starting " + place["sid"])
# Read and prep for ERA/CCSM4.
df = pd.read_csv("./data/wrf_adj/CCSM4_" + place["sid"] + ".csv")
df.columns = ["gcm", "sid", "ts", "speed", "direction"]
df["ts"] = pd.to_datetime(df["ts"])
df["year"] = pd.DatetimeIndex(df["ts"]).year
df = df.set_index(["gcm", "year"])
df = df.reset_index()
dk = df.loc[(df.gcm == "ERA") & (df.year <= era_end)]
t = chunk_to_rose(dk, place["sid"])
t["gcm"] = "ERA"
t["decadal_group"] = 0
future_roses = future_roses.append(t)
# For both CCSM4 and CM3, we need two buckets --
# 2031 - 2050, and 2080-2099.
dk = df.loc[(df.gcm == "CCSM4") & (df.year >= start_mid_century) & (df.year <= end_mid_century)]
t = chunk_to_rose(dk, place["sid"])
t["gcm"] = "CCSM4"
t["decadal_group"] = 1
future_roses = future_roses.append(t)
dk = df.loc[(df.gcm == "CCSM4") & (df.year >= start_late_century) & (df.year <= end_late_century)]
dk = dk.reset_index() # for performance.
t = chunk_to_rose(dk, place["sid"])
t["gcm"] = "CCSM4"
t["decadal_group"] = 2
future_roses = future_roses.append(t)
# Read & prep CM3
df = pd.read_csv("./data/wrf_adj/CM3_" + place["sid"] + ".csv")
df.columns = ["gcm", "sid", "ts", "speed", "direction"]
df["ts"] = pd.to_datetime(df["ts"])
df["year"] = pd.DatetimeIndex(df["ts"]).year
df = df.set_index(["gcm", "year"])
df = df.reset_index()
dk = df.loc[(df.gcm == "CM3") & (df.year >= start_mid_century) & (df.year <= end_mid_century)]
dk = dk.reset_index() # for performance.
t = chunk_to_rose(dk, place["sid"])
t["gcm"] = "CM3"
t["decadal_group"] = 1
future_roses = future_roses.append(t)
dk = df.loc[(df.gcm == "CM3") & (df.year >= start_late_century) & (df.year <= end_late_century)]
dk = dk.reset_index() # for performance.
t = chunk_to_rose(dk, place["sid"])
t["gcm"] = "CM3"
t["decadal_group"] = 2
future_roses = future_roses.append(t)
future_roses.to_csv("future_roses.csv")
def process_threshold_percentiles():
dt = | pd.read_csv("WRF_hwe_perc.csv") | pandas.read_csv |
from sales_analysis.data_pipeline import BASEPATH
from sales_analysis.data_pipeline._pipeline import SalesPipeline
import pytest
import os
import pandas as pd
# --------------------------------------------------------------------------
# Fixtures
@pytest.fixture
def pipeline():
FILEPATH = os.path.join(BASEPATH, "data")
DATA_FILES = [f for f in os.listdir(FILEPATH) if f.endswith('.csv')]
DATA = {f : pd.read_csv(os.path.join(FILEPATH, f)) for f in DATA_FILES}
return SalesPipeline(**DATA)
# --------------------------------------------------------------------------
# Data
data = {'customers': {pd.Timestamp('2019-08-01 00:00:00'): 9,
pd.Timestamp('2019-08-02 00:00:00'): 10,
pd.Timestamp('2019-08-03 00:00:00'): 10,
pd.Timestamp('2019-08-04 00:00:00'): 10,
pd.Timestamp('2019-08-05 00:00:00'): 9,
pd.Timestamp('2019-08-06 00:00:00'): 9,
pd.Timestamp('2019-08-07 00:00:00'): 10,
pd.Timestamp('2019-08-08 00:00:00'): 8,
pd.Timestamp('2019-08-09 00:00:00'): 5,
pd.Timestamp('2019-08-10 00:00:00'): 5,
pd.Timestamp('2019-08-11 00:00:00'): 10,
pd.Timestamp('2019-08-12 00:00:00'): 10,
pd.Timestamp('2019-08-13 00:00:00'): 6,
pd.Timestamp('2019-08-14 00:00:00'): 7,
pd.Timestamp('2019-08-15 00:00:00'): 10,
pd.Timestamp('2019-08-16 00:00:00'): 8,
pd.Timestamp('2019-08-17 00:00:00'): 7,
pd.Timestamp('2019-08-18 00:00:00'): 9,
pd.Timestamp('2019-08-19 00:00:00'): 5,
pd.Timestamp('2019-08-20 00:00:00'): 5},
'total_discount_amount': {pd.Timestamp('2019-08-01 00:00:00'): 15152814.736907512,
pd.Timestamp('2019-08-02 00:00:00'): 20061245.64408109,
pd.Timestamp('2019-08-03 00:00:00'): 26441693.751396574,
pd.Timestamp('2019-08-04 00:00:00'): 25783015.567048658,
pd.Timestamp('2019-08-05 00:00:00'): 16649773.993076814,
pd.Timestamp('2019-08-06 00:00:00'): 24744027.428384878,
pd.Timestamp('2019-08-07 00:00:00'): 21641181.771564845,
pd.Timestamp('2019-08-08 00:00:00'): 27012160.85245146,
pd.Timestamp('2019-08-09 00:00:00'): 13806814.237002019,
pd.Timestamp('2019-08-10 00:00:00'): 9722459.599448118,
pd.Timestamp('2019-08-11 00:00:00'): 20450260.26194652,
pd.Timestamp('2019-08-12 00:00:00'): 22125711.151501,
pd.Timestamp('2019-08-13 00:00:00'): 11444206.200090334,
| pd.Timestamp('2019-08-14 00:00:00') | pandas.Timestamp |
# -*- coding: utf-8 -*-
################ imports ###################
import pandas as pd
import numpy as np
import itertools
# import matplotlib.pyplot as plt
# %matplotlib inline
import welly
from welly import Well
import lasio
import glob
from sklearn import neighbors
import pickle
import math
import dask
import dask.dataframe as dd
from dask.distributed import Client
import random
from xgboost.sklearn import XGBClassifier
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
import multiprocessing
from main import getJobLibPickleResults
#### Adding this bit to silence an error that was causing the notebook to have a dead kernal
#### This is an unsafe solution but couldn't get any of the "right solutions" to work!
#### Ended up using this = https://www.kaggle.com/c/bosch-production-line-performance/discussion/25082
#### Other solutions = https://github.com/dmlc/xgboost/issues/1715 but the solution here didn't seem to work for me?
import os
# os.environ['KMP_DUPLICATE_LIB_OK']='True'
###### Set environment variable to get around weird conda clang error that causes notebook kernal to die. ########
###### Error was: OMP: Error #15: Initializing libomp.dylib, but found libiomp5.dylib already initialized.
###### OMP: Hint This means that multiple copies of the OpenMP runtime have been linked into the program. That is dangerous, since it can degrade performance or cause incorrect results. The best thing to do is to ensure that only a single OpenMP runtime is linked into the process, e.g. by avoiding static linking of the OpenMP runtime in any library. As an unsafe, unsupported, undocumented workaround you can set the environment variable KMP_DUPLICATE_LIB_OK=TRUE to allow the program to continue to execute, but that may cause crashes or silently produce incorrect results. For more information, please see http://openmp.llvm.org/
###### Abort trap: 6
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
################## Class Prediction Results for training dataframe for X #############
def loadMLinstanceAndModel(output_data_inst):
model = getJobLibPickleResults(
output_data_inst, output_data_inst.path_trainclasses, "trainclasses_model.pkl"
)
ML1 = getJobLibPickleResults(
output_data_inst,
output_data_inst.path_trainclasses,
"trainclasses_ML1_instance.pkl",
)
return model, ML1
class class_accuracy:
"""
This class holds several functions for calculating accuracy of the class-identification model
It takes in as the initiation argument, an instance of the ML_obj_class, which contains all the
necessary data already processed with features created and ready to do for the machine-learning task.
It initiates on creation a variety of class instance attributes that mirror those created in the ML_obj_class class.
There are 5 functions. The help function will print some explanitory text.
The rest proceed to predict a dataframe from a trained model, reformat some of the input data so
it can be combined, calculate accuracy, and a final function that runs the last three if you don't want to
run them individually.
The last two functions will return an accuracy nubmer as a percentage of class rows or instances the model predicted corrected.
"""
def __init__(self, ML):
# self.knn_dir = ML.knn_dir
# self.load_dir = ML.load_dir
# self.features_dir = ML.features_dir
self.machine_learning_dir = ML.machine_learning_dir
self.h5_to_load = ML.h5_to_load
self.train_X = ML.train_X
self.train_y = ML.train_y
self.test_X = ML.test_X
self.test_y = ML.test_y
self.train_index = ML.train_index
self.test_index = ML.test_index
self.preSplitpreBal = ML.preSplitpreBal
self.result_df_from_prediction = None
def help(self):
print(
" eventually there will some sort of help printed here to explain this function more and how it is envisioned you wil run it. In other words, step 1, step 2, etc."
)
def predict_from_model(self, model, df_X_toPredict):
"""
The predict_from_model function takes as argument a model that is already trained on training data, in the demo case a
scikit-learn XGBoost model and the dataframe of the columns to predict. From this, it fills in
the self.result_df_from_prediction attribute and returns nothing.
"""
self.result_df_from_prediction = model.predict(df_X_toPredict)
def first_Reformat(self, train_y, TopTarget_Pick_pred):
train_y_indexValues = train_y.index.values
df_result_train = pd.DataFrame(
self.result_df_from_prediction,
index=train_y_indexValues,
columns=[TopTarget_Pick_pred],
)
df_results_train_ = pd.concat([train_y, df_result_train], axis=1)
return df_results_train_
def accuracy_calc(self, train_y, TopTarget_Pick_pred, class_DistFrPick_TopTarget):
df_results_train_ = self.first_Reformat(train_y, TopTarget_Pick_pred)
accuracy = accuracy_score(
df_results_train_[class_DistFrPick_TopTarget],
df_results_train_[TopTarget_Pick_pred],
)
return accuracy
def run_all(
self,
model,
df_X_toPredict,
train_y,
TopTarget_Pick_pred,
class_DistFrPick_TopTarget,
):
self.predict_from_model(model, df_X_toPredict)
return self.accuracy_calc(
train_y, TopTarget_Pick_pred, class_DistFrPick_TopTarget
)
#### Example of use of function above:
##### Creating a class_accuracy instance with the already established ML1 variable for an isntance of the ML_obj_class
# ac = class_accuracy(ML1)
################## Class Prediction Results for training dataframe for X #############
##### Creating a class_accuracy instance with the already established ML1 variable for an isntance of the ML_obj_class
# ac = class_accuracy(ML1)
################## First with training data #############
#### Running the accuracy calculation using the model trained on training data against training data.
#### Testing how well the model predicts the class of each point, with class being categorized distance from actual pick.
# accuracy = ac.run_all(model,ac.train_X,ac.train_y,'TopTarget_Pick_pred','class_DistFrPick_TopTarget')
# print("accuracy of training dataset",accuracy)
################## Then with test data ###############
#### Running the accuracy calculation using the model trained on training data against TEST data.
#### Testing how well the model predicts the class of each point, with class being categorized distance from actual pick.
# accuracy = ac.run_all(model,ac.test_X,ac.test_y,'TopTarget_Pick_pred','class_DistFrPick_TopTarget')
# print("accuracy of test dataset",accuracy)
####################################### THIS IS TEST FOR ACCURACY OVER ALL ROWS, WHICH WE REALLY DON"T CARE ABOUT ##########
############ WE CARE ABOUT THE PICK ############################
# New class for functions that take in point by point distance class prediction and use rolling window and other methods to pick which point should be the top in question
# Make a few different columns classifiers that get the rolling mean of pick classifiers within different windows.
# This will help compare a class prediction of 95 in one part of the well to a class prediction of 95 in a nother part of the well. The assumption being the right prediction will have not just one 100 or 95 prediction but several in close proximity where as the false predictions are more likely to be by themselves:
# Median
# Rolling mean 6
# Rolling mean 12
# Rolling Mean 20
# Sums of rolling all means
######## In the future, it would be nice to calculate error bars as well!!!!! ##########
##################### The next part will attempt to go from classifiers of #####################
##################### (at, near, or far away from the pick in each well) to a single depth prediction for the pick in each well ######################
##################### Class for calculating accuracy of single pick prediction in each well vs. #####################
###################### known pick based on rolling average & median ranking of depths with distance class #####################
##################### predictions of being close to pick. #####################
class InputDistClassPrediction_to_BestDepthForTop:
"""
Explain theyself
"""
def __init__(self, output_data_inst):
self.result_df_dist_class_prediction = None
self.concat_modelResults_w_indexValues = None
self.df_results_trainOrtest_wIndex = None
self.model = None
self.MLobj = None
self.result_df_dist_class_prediction = None
def help(self):
print(
" eventually there will some sort of help printed here to explain this function more and how it is envisioned you wil run it. In other words, step 1, step 2, etc."
)
def load_MLobj(self, MLobj):
self.MLobj = MLobj
print("loaded model into object instance")
def predict_from_model(self, model, df_X_toPredict):
"""
The predict_from_model function takes as argument a model that is already trained on training data, in the demo case a
scikit-learn XGBoost model and the dataframe of the columns to predict. From this, it fills in
the self.result_df_from_prediction attribute and returns nothing.
"""
self.result_df_dist_class_prediction = model.predict(df_X_toPredict)
if type(self.result_df_dist_class_prediction) == None:
print(
"this function didn't work, self.distClassDF_wRollingCols_training is not populated with anything but None"
)
else:
print(
"ran predict_from_model() which runs inside self.result_df_dist_class_prediction = model.predict(df_X_toPredict) access the results by appending .result_df_dist_class_prediction to the class instance"
)
return self.result_df_dist_class_prediction
def load_dist_class_pred_df(self, dist_class_pred_df):
"""
explain theyself
"""
# if self.result_df_dist_class_prediction == None:
self.result_df_dist_class_prediction = dist_class_pred_df
# else:
# print("trying to replace earlier result_df_dist_class_prediction")
def concat_modelResultsNDArray_w_indexValues(
self, distClassModel_resultsNDArry, train_or_test, col_name_prediction
):
#### self,self.result_df_dist_class_prediction,"test",vs.pick_class_str
if train_or_test == "train":
y_indexValues = self.MLobj.train_y.index.values
train_or_test_y = self.MLobj.train_y
else:
y_indexValues = self.MLobj.test_y.index.values
train_or_test_y = self.MLobj.test_y
print(type(distClassModel_resultsNDArry))
print(type(y_indexValues))
if len(distClassModel_resultsNDArry) != len(y_indexValues):
print(
"Two input arguments length does not match. This invalidates an assumption of this function"
)
print(
"length of distClassModel_resultsNDArry is ",
len(distClassModel_resultsNDArry),
" and length of y_indexValues",
len(y_indexValues),
)
else:
# y_indexValues = train_or_test_y.index.values
# df_result = pd.DataFrame(result_test, index=test_y_indexValues, columns=['TopTarget_Pick_pred'])
df_result = pd.DataFrame(
distClassModel_resultsNDArry,
index=y_indexValues,
columns=[col_name_prediction],
)
df_results_test_ = | pd.concat([train_or_test_y, df_result], axis=1) | pandas.concat |
import os
import pandas as pd
from sta_core.handler.db_handler import DataBaseHandler
from sta_core.handler.shelve_handler import ShelveHandler
from sta_api.module.load_helper import global_dict
from sta_api.module.load_helper import tester
from sta_api.module.load_helper import db_exists
from flask import Blueprint, redirect, request
from flask import jsonify
from markupsafe import escape
route_data = Blueprint('data', __name__,)
all_route_data = "/api/v0"
def data_f():
k = {"a": [1,2,3,4,5,6,7,8],
"b": [1,2,3,4,5,6,7,8]}
df = pd.DataFrame(k)
return df
@route_data.route(f'{all_route_data}/data')
def data():
df = data_f()
print(df.head(3))
df_json = df.to_json()
print(df_json)
print(tester())
print( db_exists() )
return df_json
#return "Welcome to strava-data "
@route_data.route(f'{all_route_data}/data/users/all',
methods=["GET"])
def all_users():
dbh = DataBaseHandler(db_type=global_dict["db-type"])
dbh.set_db_path(db_path=global_dict["db-path"])
dbh.set_db_name(db_name=global_dict["db-name"])
all_users = dbh.get_all_users(by="user_username")
del dbh
return jsonify(all_users)
@route_data.route(f'{all_route_data}/data/user/<username>',
methods=["GET"])
def get_user(username):
user_name = escape(username)
dbh = DataBaseHandler(db_type=global_dict["db-type"])
dbh.set_db_path(db_path=global_dict["db-path"])
dbh.set_db_name(db_name=global_dict["db-name"])
all_users = dbh.get_all_users(by="user_username")
if user_name in all_users:
user_entry = dbh.search_user(user=user_name, by="username")
else:
user_entry = []
del dbh
return jsonify(user_entry)
@route_data.route(f'{all_route_data}/data/branches',
methods=['GET'])
def get_branches():
user_name = request.args.get('username')
dbh = DataBaseHandler(db_type=global_dict["db-type"])
dbh.set_db_path(db_path=global_dict["db-path"])
dbh.set_db_name(db_name=global_dict["db-name"])
all_users = dbh.get_all_users(by="user_username")
user_entry = dbh.search_user(user=user_name, by="username")
user_hash = user_entry[0].get("user_hash")
user_tracks = dbh.read_branch(key="user_hash", attribute=user_hash)
df = pd.DataFrame(user_tracks)
df["start_time"] = pd.to_datetime(df["start_time"], unit="ms")
df["end_time"] = pd.to_datetime(df["end_time"], unit="ms")
df["created_at"] = pd.to_datetime(df["created_at"], unit="ms")
df["updated_at"] = | pd.to_datetime(df["updated_at"], unit="ms") | pandas.to_datetime |
"""Eto SDK Fluent API for managing datasets"""
import os
import uuid
from itertools import islice
from typing import Optional, Union
import pandas as pd
from rikai.io import _normalize_uri
from rikai.parquet.dataset import Dataset as RikaiDataset
from eto.config import Config
from eto.fluent.client import get_api
from eto.fluent.jobs import ingest_rikai
from eto.util import add_method, get_dataset_ref_parts
def list_datasets(project="default") -> pd.DataFrame:
"""Lists existing datasets (dataset_id, uri, and other metadata)
Parameters
----------
project: str, default 'default'
List all datasets in a particular project.
If omitted just lists datasets in 'default'
"""
datasets = get_api("datasets").list_datasets(project)["datasets"]
return pd.DataFrame(datasets)
def get_dataset(dataset_name: str) -> pd.Series:
"""Retrieve metadata for a given dataset
Parameters
----------
dataset_name: str
Qualified name <project.dataset>.
If no project is specified, assume it's the 'default' project
"""
project_id, dataset_id = get_dataset_ref_parts(dataset_name)
project_id = project_id or "default"
return get_api("datasets").get_dataset(project_id, dataset_id)
def read_eto(
dataset_name: str, columns: Union[str, list[str]] = None, limit: int = None
) -> pd.DataFrame:
"""Read an Eto dataset as a pandas dataframe
Parameters
----------
dataset_name: str
The name of the dataset to be read
columns: str or list of str, default None
Which columns to read in. All columns by default.
limit: Optional[int]
The max rows to retrieve. If omitted or <=0 then all rows are retrieved
"""
uri = _normalize_uri(get_dataset(dataset_name).uri)
if isinstance(columns, str):
columns = [columns]
dataset = RikaiDataset(uri, columns)
if limit is None or limit <= 0:
return pd.DataFrame(dataset)
else:
rows = islice(dataset, limit)
return | pd.DataFrame(rows) | pandas.DataFrame |
"""
Outil de lecture des fichiers IPE
"""
import logging
import zipfile
from pathlib import Path
from typing import IO
from typing import List
from typing import Optional
from typing import Union
import pandas as pd
import tqdm
from .. import pathtools as pth
from .. import misc
logger = logging.getLogger(__name__)
def _read_single_ipe_file(filelike: IO[bytes], cols: Optional[List[str]] = None,
nrows: Optional[int] = None
) -> pd.DataFrame:
all_encodings = ['UTF-8', 'Windows-1252', 'Latin-1']
df = pd.DataFrame()
for encoding in all_encodings:
try:
df = pd.read_csv(filelike,
sep=';',
decimal=',',
encoding=encoding,
usecols=lambda c: c in cols,
dtype=str,
nrows=nrows) # type: ignore
if df.shape[1] > 0:
break
logger.debug('Not enough columns with encoding %s. Trying with some other.', encoding)
except UnicodeDecodeError:
logger.debug('Encoding %s did not work. Trying anew', encoding)
filelike.seek(0)
return df
def _type_df(df: pd.DataFrame, numeric_cols: List[str] = None):
if numeric_cols is not None:
for col in numeric_cols:
df.loc[:, col] = pd.to_numeric(df[col], errors='coerce')
def parse_ipe(ipe_zip_path: Union[str, Path],
columns: List[str],
numeric_cols: List[str] = None,
cols_are_optional: bool = True,
_test_nrows: int = None
) -> pd.DataFrame:
"""
Lis tous les fichiers IPE dans l'archive pointée et extrait les colonnes spécifiées, en les convertissant
éventuellement en nombre.
Args:
ipe_zip_path: Chemin vers l'archive
columns: Colonnes à extraire
numeric_cols: Colonnes numériques dans les colonnes à extraire
cols_are_optional: Ne plante pas si la colonne demandée n'existe pas dans l'IPE
_test_nrows:
Returns:
Un DF avec les colonnes demandées.
"""
with zipfile.ZipFile(ipe_zip_path) as z:
file_issues = []
dfs = []
for name in misc.make_iterator(z.namelist(), low_bound=1, desc='Reading IPE'):
extension = name[-3:]
if extension == 'csv':
with z.open(name, 'r') as f:
df = _read_single_ipe_file(f, cols=columns, nrows=_test_nrows)
has_all_cols = df.shape[1] == len(columns)
if not has_all_cols and not cols_are_optional:
file_issues.append(name)
if has_all_cols or cols_are_optional:
dfs.append(df)
if len(file_issues) > 0:
logger.debug('Done reading. Had %s issues. Could not read files : %s', len(file_issues), file_issues)
df_full = | pd.concat(dfs) | pandas.concat |
import MetaTrader5 as mt5
from datetime import datetime
import pandas as pd
import pytz
# display data on the MetaTrader 5 package
print("MetaTrader5 package author: ", mt5.__author__)
print("MetaTrader5 package version: ", mt5.__version__)
print("Connecting.....")
# establish MetaTrader 5 connection to a specified trading account
if not mt5.initialize():
print("initialize() failed, error code =", mt5.last_error())
quit()
else:
print("Connection Successful")
timezone = pytz.timezone("Etc/UTC") # set time zone to UTC
FirstCurrency = "AUDUSD"
SecondCurrency = "GBPUSD"
Timeframe = mt5.TIMEFRAME_M5 # data frequency/internval (eg. minutes, hourly, daily...etc)
Startdate = datetime(2022, 1, 7,
tzinfo=timezone) # create 'datetime' object in UTC time zone to avoid the implementation of a local time zone offset
AmountOfCandlesPerMonth = 5760
# 5M = 5760
# 15M = 1920
# 30M = 960
NumberOfMonths = 2
TimePeriod = AmountOfCandlesPerMonth * NumberOfMonths # amount of data sets of your specified timeframe
print("Retrieving Data From MT5 Platform......")
# get data starting from specified dates in UTC time zone
Firstrates = mt5.copy_rates_from(FirstCurrency, Timeframe, Startdate, TimePeriod)
Secondrates = mt5.copy_rates_from(SecondCurrency, Timeframe, Startdate, TimePeriod)
mt5.shutdown() # shut down connection to the MetaTrader 5 terminal
| pd.set_option('display.max_columns', 30) | pandas.set_option |
"""Analyze waterfloods with capacitance-resistance models. # noqa: D401,D400
Classes
-------
CRM : standard capacitance resistance modeling
CrmCompensated : including pressure
Methods
-------
q_primary : primary production
q_CRM_perpair : production due to injection (injector-producer pairs)
q_CRM_perproducer : production due to injection (one producer, many injectors)
q_bhp : production from changing bottomhole pressures of producers
"""
from __future__ import annotations
import pickle
from typing import Any, Tuple, Union
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from numba import njit
from numpy import ndarray
from scipy import optimize
@njit
def q_primary(
production: ndarray, time: ndarray, gain_producer: ndarray, tau_producer: ndarray
) -> ndarray:
"""Calculate primary production contribution.
Uses Arps equation with b=0
.. math::
q_{p}(t) = q_i e^{-bt}
Args
----------
production : ndarray
Production, size: Number of time steps
time : ndarray
Producing times to forecast, size: Number of time steps
gain_producer : ndarray
Arps q_i factor
tau_producer : ndarray
Arps time constant
Returns
----------
q_hat : ndarray
Calculated production, size: Number of time steps
"""
time_decay = np.exp(-time / tau_producer)
q_hat = time_decay * production[0] * gain_producer
return q_hat
@njit
def q_CRM_perpair(injection: ndarray, time: ndarray, gains: ndarray, taus: ndarray) -> ndarray:
"""Calculate per injector-producer pair production.
Runs for influences of each injector on one producer, assuming
individual `gain`s and `tau`s for each pair
Args
----------
injection : ndarray
Injected fluid, size: Number of time steps
time : ndarray
Producing times to forecast, size: Number of time steps
gains : ndarray
Connectivities between each injector and the producer,
size: Number of injectors
taus : ndarray
Time constants between each injector and the producer,
size: Number of injectors
Returns
----------
q_hat : ndarray
Calculated production, size: Number of time steps
"""
n = len(time)
q_hat = np.zeros(n)
conv_injected = np.zeros((n, injection.shape[1]))
# Compute convolved injection rates
for j in range(injection.shape[1]):
conv_injected[0, j] += (1 - np.exp((time[0] - time[1]) / taus[j])) * injection[0, j]
for k in range(1, n):
for m in range(1, k + 1):
time_decay = (1 - np.exp((time[m - 1] - time[m]) / taus[j])) * np.exp(
(time[m] - time[k]) / taus[j]
)
conv_injected[k, j] += time_decay * injection[m, j]
# Calculate waterflood rates
for k in range(n):
for j in range(injection.shape[1]):
q_hat[k] += gains[j] * conv_injected[k, j]
return q_hat
@njit
def q_CRM_perproducer(injection: ndarray, time: ndarray, gain: ndarray, tau: float) -> ndarray:
"""Calculate per injector-producer pair production (simplified tank).
Uses simplified CRMp model that assumes a single tau for each producer
Args
----------
injection : ndarray
injected fluid in reservoir volumes, size: Number of time steps
time : ndarray
Producing times to forecast, size: Number of time steps
gains : ndarray
Connectivities between each injector and the producer
size: Number of injectors
tau : float
Time constants all injectors and the producer
Returns
----------
q_hat : ndarray
Calculated production, size: Number of time steps
"""
tau2 = tau * np.ones(injection.shape[1])
return q_CRM_perpair(injection, time, gain, tau2)
@njit
def _pressure_diff(pressure_local: ndarray, pressure: ndarray) -> ndarray:
"""Pressure differences from local to each producer each timestep."""
n_t, n_p = pressure.shape
pressure_diff = np.zeros((n_p, n_t))
for j in range(n_p):
for t in range(1, n_t):
pressure_diff[j, t] = pressure_local[t - 1] - pressure[t, j]
return pressure_diff
def q_bhp(pressure_local: ndarray, pressure: ndarray, v_matrix: ndarray) -> ndarray:
r"""Calculate the production effect from bottom-hole pressure variation.
This looks like
.. math::
q_{BHP,j}(t_i) = \sum_{k} v_{kj}\left[ p_j(t_{i-1}) - p_k(t_i) \right]
Args
----
pressure_local : ndarray
pressure for the well in question, shape: n_time
pressure : ndarray
bottomhole pressure, shape: n_time, n_producers
v_matrix : ndarray
connectivity between one producer and all producers, shape: n_producers
Returns
-------
q : ndarray
production from changing BHP
shape: n_time
"""
pressure_diff = _pressure_diff(pressure_local, pressure)
q = np.einsum("j,jt->t", v_matrix, pressure_diff)
return q
def random_weights(n_i: int, n_j: int, axis: int = 0, seed: int | None = None) -> ndarray:
"""Generate random weights for producer-injector gains.
Args
----
n_i : int
n_j : int
axis : int, default is 0
seed : int, default is None
Returns
-------
gains_guess: ndarray
"""
rng = np.random.default_rng(seed)
limit = 10 * (n_i if axis == 0 else n_j)
vec = rng.integers(0, limit, (n_i, n_j))
axis_sum = vec.sum(axis, keepdims=True)
return vec / axis_sum
class CRM:
"""A Capacitance Resistance Model history matcher.
CRM uses a physics-inspired mass balance approach to explain production for \
waterfloods. It treats each injector-producer well pair as a system \
with mass input, output, and pressure related to the mass balance. \
Several versions exist. Select them from the arguments.
Args
----------
primary : bool
Whether to model primary production (strongly recommended)
tau_selection : str
How many tau values to select
- If 'per-pair', fit tau for each producer-injector pair
- If 'per-producer', fit tau for each producer (CRMp model)
constraints : str
How to constrain the gains
* If 'up-to one' (default), let gains vary from 0 (no connection) to 1 \
(all injection goes to producer)
* If 'positive', require each gain to be positive \
(It is unlikely to go negative in real life)
* If 'sum-to-one', require the gains for each injector to sum to one \
(all production accounted for)
* If 'sum-to-one injector' (not implemented), require each injector's \
gains to sum to one (all injection accounted for)
Examples
----------
crm = CRM(True, "per-pair", "up-to one")
References
----------
"A State-of-the-Art Literature Review on Capacitance Resistance Models for
Reservoir Characterization and Performance Forecasting" - Holanda et al., 2018.
"""
def __init__(
self,
primary: bool = True,
tau_selection: str = "per-pair",
constraints: str = "positive",
):
"""Initialize CRM with appropriate settings."""
if type(primary) != bool:
raise TypeError("primary must be a boolean")
self.primary = primary
if constraints not in (
"positive",
"up-to one",
"sum-to-one",
"sum-to-one injector",
):
raise ValueError("Invalid constraints")
self.constraints = constraints
self.tau_selection = tau_selection
if tau_selection == "per-pair":
self.q_CRM = q_CRM_perpair
elif tau_selection == "per-producer":
self.q_CRM = q_CRM_perproducer
else:
raise ValueError(
"tau_selection must be one of"
+ '("per-pair","per-producer")'
+ f", not {tau_selection}"
)
def fit(
self,
production: ndarray,
injection: ndarray,
time: ndarray,
initial_guess: ndarray = None,
num_cores: int = 1,
random: bool = False,
**kwargs,
):
"""Build a CRM model from the production and injection data.
Args
----------
production : ndarray
production rates for each time period,
shape: (n_time, n_producers)
injection : ndarray
injection rates for each time period,
shape: (n_time, n_injectors)
time : ndarray
relative time for each rate measurement, starting from 0,
shape: (n_time)
initial_guess : ndarray
initial guesses for gains, taus, primary production contribution
shape: (len(guess), n_producers)
num_cores (int): number of cores to run fitting procedure on, defaults to 1
random : bool
whether to randomly initialize the gains
**kwargs:
keyword arguments to pass to scipy.optimize fitting routine
Returns
----------
self: trained model
"""
_validate_inputs(production, injection, time)
self.production = production
self.injection = injection
self.time = time
if not initial_guess:
initial_guess = self._get_initial_guess(random=random)
bounds, constraints = self._get_bounds()
num_cores = kwargs.pop("num_cores", 1)
def fit_well(production, x0):
# residual is an L2 norm
def residual(x, production):
return sum(
(production - self._calculate_qhat(x, production, injection, time)) ** 2
)
result = optimize.minimize(
residual,
x0,
bounds=bounds,
constraints=constraints,
args=(production,),
**kwargs,
)
return result
if num_cores == 1:
results = map(fit_well, self.production.T, initial_guess)
else:
results = Parallel(n_jobs=num_cores)(
delayed(fit_well)(p, x0) for p, x0 in zip(self.production.T, initial_guess)
)
opts_perwell = [self._split_opts(r["x"]) for r in results]
gains_perwell, tau_perwell, gains_producer, tau_producer = map(list, zip(*opts_perwell))
self.gains = np.vstack(gains_perwell)
self.tau = np.vstack(tau_perwell)
self.gains_producer = np.array(gains_producer)
self.tau_producer = np.array(tau_producer)
return self
def predict(self, injection=None, time=None, connections=None):
"""Predict production for a trained model.
If the injection and time are not provided, this will use the training values
Args
----------
injection : ndarray
The injection rates to input to the system, shape (n_time, n_inj)
time : ndarray
The timesteps to predict
connections : dict
if present, the gains, tau, gains_producer, tau_producer
matrices
Returns
----------
q_hat :ndarray
The predicted values, shape (n_time, n_producers)
"""
if connections is not None:
gains = connections.get("gains", self.gains)
tau = connections.get("tau", self.tau)
gains_producer = connections.get("gains_producer", self.gains_producer)
tau_producer = connections.get("tau_producer", self.tau_producer)
else:
gains = self.gains
tau = self.tau
gains_producer = self.gains_producer
tau_producer = self.tau_producer
production = self.production
n_producers = production.shape[1]
if int(injection is None) + int(time is None) == 1:
raise TypeError("predict() takes 1 or 3 arguments, 2 given")
if injection is None:
injection = self.injection
if time is None:
time = self.time
if time.shape[0] != injection.shape[0]:
raise ValueError("injection and time need same number of steps")
q_hat = np.zeros((len(time), n_producers))
for i in range(n_producers):
q_hat[:, i] += q_primary(production[:, i], time, gains_producer[i], tau_producer[i])
q_hat[:, i] += self.q_CRM(injection, time, gains[i, :], tau[i])
return q_hat
def set_rates(self, production=None, injection=None, time=None):
"""Set production and injection rates and time array.
Args
-----
production : ndarray
production rates with shape (n_time, n_producers)
injection : ndarray
injection rates with shape (n_time, n_injectors)
time : ndarray
timesteps with shape n_time
"""
_validate_inputs(production, injection, time)
if production is not None:
self.production = production
if injection is not None:
self.injection = injection
if time is not None:
self.time = time
def set_connections(self, gains=None, tau=None, gains_producer=None, tau_producer=None):
"""Set waterflood properties.
Args
-----
gains : ndarray
connectivity between injector and producer
shape: n_gains, n_producers
tau : ndarray
time-constant for injection to be felt by production
shape: either n_producers or (n_gains, n_producers)
gains_producer : ndarray
gain on primary production, shape: n_producers
tau_producer : ndarray
Arps time constant for primary production, shape: n_producers
"""
if gains is not None:
self.gains = gains
if tau is not None:
self.tau = tau
if gains_producer is not None:
self.gains_producer = gains_producer
if tau_producer is not None:
self.tau_producer = tau_producer
def residual(self, production=None, injection=None, time=None):
"""Calculate the production minus the predicted production for a trained model.
If the production, injection, and time are not provided, this will use the
training values
Args
----------
production : ndarray
The production rates observed, shape: (n_timesteps, n_producers)
injection : ndarray
The injection rates to input to the system,
shape: (n_timesteps, n_injectors)
time : ndarray
The timesteps to predict
Returns
----------
residual : ndarray
The true production data minus the predictions, shape (n_time, n_producers)
"""
q_hat = self.predict(injection, time)
if production is None:
production = self.production
return production - q_hat
def to_excel(self, fname: str):
"""Write trained model to an Excel file.
Args
----
fname : str
Excel file to write out
"""
for x in ("gains", "tau", "gains_producer", "tau_producer"):
if x not in self.__dict__.keys():
raise (ValueError("Model has not been trained"))
with pd.ExcelWriter(fname) as f:
pd.DataFrame(self.gains).to_excel(f, sheet_name="Gains")
| pd.DataFrame(self.tau) | pandas.DataFrame |
import math
from collections import Iterable
from typing import List, Literal, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from lazy_object_proxy.utils import cached_property
from sklearn import metrics
from sklearn.cluster import KMeans
class StraightLine:
def __init__(
self,
x1: float = None,
y1: float = None,
x2: float = None,
y2: float = None,
slope: Optional[float] = None,
):
if slope is not None:
self.slope = slope
else:
if x1 == x2:
self.slope = np.nan
else:
self.slope = (y2 - y1) / (x2 - x1) # type: ignore
self.intercept = y1 - self.slope * x1 # type: ignore
def get_point_distance(self, x0: float, y0: float) -> float:
return abs(self.slope * x0 - y0 + self.intercept) / math.sqrt(
self.slope ** 2 + 1
)
def is_point_above_line(self, x0: float, y0: float) -> bool:
pred_y = x0 * self.slope + self.intercept
if pred_y == y0:
print(f'Point ({x0}, {y0}) is on line y = {self.slope}x + {self.intercept}')
return y0 > pred_y
def predict(
self, x_list: Iterable, limit: Optional[Iterable] = None
) -> List[float]:
if not isinstance(x_list, Iterable):
x_list = [x_list]
results = [self.slope * _ + self.intercept for _ in x_list]
if len(results) == 1:
return results[0]
if limit is not None:
results = [
_ if _ > min(limit) and _ < max(limit) else np.nan for _ in results
]
return results
def clustering_kmeans(num_list: List[float], thresh: float = 0.03) -> List[float]:
# support/resistance pos cluster starting from 1 to N
k_rng = range(1, len(num_list) + 1)
est_arr = [KMeans(n_clusters=k).fit([[num] for num in num_list]) for k in k_rng]
# distance-sum of all cluster
sum_squares = [e.inertia_ for e in est_arr]
# ratio of distance-sum to which of only one cluster
diff_squares = [squares / sum_squares[0] for squares in sum_squares]
diff_squares_pd = pd.Series(diff_squares)
# select cluster based on thresh
thresh_pd = diff_squares_pd[diff_squares_pd < thresh]
if len(thresh_pd) > 0:
select_k = thresh_pd.index[0] + 1
else:
# if no such, select the most one
select_k = k_rng[-1]
est = est_arr[select_k - 1]
results = est.predict([[num] for num in num_list])
return results
class SupportResistanceLine:
def __init__(
self, data: pd.Series, kind: Literal['support', 'resistance'] = 'support'
):
if not isinstance(data, pd.Series):
raise TypeError('data should be pd.Series')
self.y = data.reset_index(drop=True).rename('y').rename_axis('x')
self.x = self.y.index.to_series()
self.length = len(self.y)
self.kind = kind
self.dot_color = 'g' if kind == 'support' else 'r'
@cached_property
def twin(self):
srl = SupportResistanceLine(
self.y, 'resistance' if self.kind == 'support' else 'support'
)
srl.extreme_pos = self.extreme_pos # avoid repeated calc
return srl
@cached_property
def iterated_poly_fits(
self,
) -> Tuple[pd.DataFrame, np.polynomial.chebyshev.Chebyshev]:
fit_df = self.y.to_frame()
rolling_window = int(len(self.y) / 30)
fit_df['y_roll_mean'] = (
fit_df['y'].rolling(rolling_window, min_periods=1).mean()
)
# the orginal mean distance of y and moving avg
distance_mean = np.sqrt(
metrics.mean_squared_error(fit_df.y, fit_df.y_roll_mean)
)
degree = int(len(self.y) / 40)
poly = None
y_fit = None
while degree < 100:
# iterate 1-100 degress of ploy
poly = np.polynomial.Chebyshev.fit(self.x, self.y, degree)
y_fit = poly(self.x)
fit_df[f'poly_{degree}'] = y_fit
# mean distance of y and y_fit
distance_fit = np.sqrt(metrics.mean_squared_error(fit_df.y, y_fit))
if distance_fit <= distance_mean * 0.6:
# stop iteration when distance_fit <= distance_mean * 0.6, indicating a perfect trend line
break
degree += 1
return fit_df, poly
@cached_property
def best_poly(self) -> np.polynomial.chebyshev.Chebyshev:
return self.iterated_poly_fits[1]
@cached_property
def poly_degree(self) -> int:
'''Degree(s) of the fitting polynomials'''
return self.best_poly.degree()
@cached_property
def poly_fit(self) -> pd.Series:
'''Fitted series'''
return self.best_poly(self.x)
def plot_poly(self, show=False):
fig, ax = plt.subplots(1, figsize=(16, 9))
df = self.iterated_poly_fits[0].assign(y=self.y, best_poly=self.poly_fit)
df.plot(ax=ax, figsize=(16, 9), colormap='coolwarm')
if show:
plt.show()
return fig, ax
@cached_property
def extreme_pos(self) -> Tuple[List[int], List[int]]:
# roots derived function
extreme_pos = [int(round(_.real)) for _ in self.best_poly.deriv().roots()]
extreme_pos = [_ for _ in extreme_pos if _ > 0 and _ < self.length]
# distinguish maximum and minimum using second derivative
second_deriv = self.best_poly.deriv(2)
min_extreme_pos = []
max_extreme_pos = []
for pos in extreme_pos:
if second_deriv(pos) > 0:
min_extreme_pos.append(pos)
elif second_deriv(pos) < 0:
max_extreme_pos.append(pos)
return max_extreme_pos, min_extreme_pos
def plot_extreme_pos(self, show: bool = False):
max_extreme_pos, min_extreme_pos = self.extreme_pos
fig, ax = plt.subplots(1, figsize=(16, 9))
self.y.to_frame().assign(best_poly=self.poly_fit).plot(ax=ax)
ax.scatter(
min_extreme_pos, [self.best_poly(_) for _ in min_extreme_pos], s=50, c='g'
)
ax.scatter(
max_extreme_pos, [self.best_poly(_) for _ in max_extreme_pos], s=50, c='r'
)
if show:
plt.show()
return fig, ax
@cached_property
def support_resistance_pos(self) -> List[int]:
'''Real local extreme pos around roots'''
def find_left_and_right_pos(pos, refer_pos):
'''Find two resistance points around a support point, or vice versa'''
refer_sr = pd.Series(refer_pos)
left_pos = (
refer_sr[refer_sr < pos].iloc[-1]
if len(refer_sr[refer_sr < pos]) > 0
else 0
)
right_pos = (
refer_sr[refer_sr > pos].iloc[0]
if len(refer_sr[refer_sr > pos]) > 0
else self.length
)
return left_pos, right_pos
def extreme_around(left_pos, right_pos):
'''Locate real local extreme pos around roots'''
if self.kind == 'support':
extreme_around_pos = self.y.iloc[left_pos:right_pos].idxmin()
else: # resistance
extreme_around_pos = self.y.iloc[left_pos:right_pos].idxmax()
# If the extreme point is on the edge, meaning false point, discard
if extreme_around_pos in (left_pos, right_pos):
return 0
return extreme_around_pos
if self.kind == 'support':
refer_pos, extreme_pos = self.extreme_pos
else:
extreme_pos, refer_pos = self.extreme_pos
support_resistance_pos = []
for _, pos in enumerate(extreme_pos):
if pos in [0, self.length]:
continue
left_pos, right_pos = find_left_and_right_pos(pos, refer_pos)
support_resistance_pos.append(extreme_around(left_pos, right_pos))
# Deduplicate
support_resistance_pos = sorted(set(support_resistance_pos))
# Remove 0
if 0 in support_resistance_pos:
support_resistance_pos.remove(0)
return support_resistance_pos
@cached_property
def support_resistance_df(self) -> pd.DataFrame:
return (
pd.Series(
self.y.loc[self.support_resistance_pos],
index=self.support_resistance_pos,
)
.sort_index()
.rename_axis('x')
.reset_index()
)
def plot_real_extreme_points(self, show: bool = False):
return self.show_line(self.support_resistance_df, show=show)
@cached_property
def clustered_pos(self) -> List[int]:
def clustering_nearest(num_list, thresh=self.length / 80):
sr = pd.Series(num_list).sort_values().reset_index(drop=True)
while sr.diff().min() < thresh:
index1 = sr.diff().idxmin()
index2 = index1 - 1
num1 = sr[index1]
num2 = sr[index2]
y1 = self.y.iloc[num1]
y2 = self.y.iloc[num2]
smaller_y_index = index1 if y1 < y2 else index2
bigger_y_index = index1 if y1 > y2 else index2
sr = sr.drop(
bigger_y_index if self.kind == 'support' else smaller_y_index
).reset_index(drop=True)
return sr.tolist()
clustered_pos = clustering_nearest(self.support_resistance_df['x'].tolist())
return clustered_pos
def plot_clustered_pos(self, show: bool = False):
support_resistance_df = self.support_resistance_df.loc[
lambda _: _['x'].isin(self.clustered_pos)
].copy()
return self.show_line(support_resistance_df, show=show)
def score_lines_from_a_point(
self, last_support_resistance_pos: pd.Series
) -> pd.DataFrame:
'''Assign scores to all lines through a point'''
# Only include points before the point
support_resistance_df = self.support_resistance_df.loc[
lambda _: _['x'] <= last_support_resistance_pos['x']
].copy()
if len(support_resistance_df) <= 2:
return pd.DataFrame()
# Calc slopes of lines through each points
support_resistance_df['slope'] = support_resistance_df.apply(
lambda _: StraightLine(
_['x'],
_['y'],
last_support_resistance_pos['x'],
last_support_resistance_pos['y'],
).slope,
axis=1,
)
# Rank lines based on slope
if self.kind == 'support':
support_resistance_df = support_resistance_df.dropna().sort_values('slope')
elif self.kind == 'resistance':
support_resistance_df = support_resistance_df.dropna().sort_values(
'slope', ascending=False
)
# Filter out lines that are too cliffy
support_resistance_df = support_resistance_df[
support_resistance_df['slope'].abs() / self.y.mean() < 0.003
]
if len(support_resistance_df) <= 2:
return pd.DataFrame()
# Cluster
thresh = 0.03
support_resistance_df['cluster'] = clustering_kmeans(
support_resistance_df['slope'], thresh
)
while (
support_resistance_df.groupby('cluster').apply(len).max() <= 2
): # If num of points in the cluster with most point is still less than 2
thresh *= 2
if thresh >= 1:
return pd.DataFrame()
support_resistance_df['cluster'] = clustering_kmeans(
support_resistance_df['slope'], thresh
)
def calc_score_for_cluster(cluster_df):
if len(cluster_df) <= 2:
return pd.DataFrame()
avg_x = cluster_df.iloc[:-1]['x'].mean()
avg_y = cluster_df.iloc[:-1]['y'].mean()
line = StraightLine(
cluster_df.iloc[-1]['x'],
cluster_df.iloc[-1]['y'],
slope=cluster_df.iloc[-1]['slope'],
)
mean_distance = line.get_point_distance(avg_x, avg_y)
std = cluster_df.iloc[:-1]['x'].std(ddof=0)
mean_x = cluster_df.iloc[:-1]['x'].mean()
return pd.DataFrame(
{
'cluster': cluster_df.name,
'x1': last_support_resistance_pos['x'],
'y1': last_support_resistance_pos['y'],
'x2': cluster_df.iloc[-1]['x'],
'y2': cluster_df.iloc[-1]['y'],
'slope': cluster_df.iloc[-1]['slope'],
'count': len(cluster_df) - 1,
'mean_distance': mean_distance,
'mean_x': mean_x,
'std': std,
},
index=[0],
)
score_df = (
support_resistance_df.groupby('cluster')
.apply(calc_score_for_cluster)
.reset_index(drop=True)
)
# And the full points without clustering also should be included
all_df = support_resistance_df.copy()
all_df.name = 'all'
score_df.loc[len(score_df)] = calc_score_for_cluster(all_df).iloc[0]
return score_df
def show_line(
self,
points_df: pd.DataFrame,
*straight_line_list: StraightLine,
show: bool = False,
):
fig, ax = plt.subplots(1, figsize=(16, 9))
self.y.to_frame().assign(best_poly=self.poly_fit).plot(ax=ax)
# Green support dots, red resistance dots
ax.scatter(
points_df.x, points_df.y, s=50, c=self.dot_color, label=f'{self.kind}_dots'
)
for i, st_line in enumerate(straight_line_list):
ax.plot(
self.x,
st_line.predict(self.x, limit=(self.y.min(), self.y.max())),
label=(['1st', '2nd', '3rd'] + list('456789abcdefghijklmnopq'))[i],
)
plt.legend()
if show:
plt.show()
return fig, ax
@cached_property
def last_area_support_resistance_df(self) -> pd.DataFrame:
'''Find best lines for the 40% right-most points'''
last_area_support_resistance_df = self.support_resistance_df[
self.support_resistance_df['x'] > self.length * 0.75
].copy()
df_list = [
self.score_lines_from_a_point(row)
for index, row in last_area_support_resistance_df.iterrows()
]
last_area_support_resistance_df = | pd.concat(df_list) | pandas.concat |
# -*- coding: utf-8 -*-
import sys
import pandas
import numpy
import json
import os
sys.path.append('../')
from core_functions import remove_unannotated
from core_functions import construct_graph_from_mongo
from core_functions import get_mapping_from_mongo
import core_classes
if __name__ == '__main__':
main_dir = './PN_analysis/standardized_graph/'
if not os.path.exists(main_dir):
os.makedirs(main_dir)
'''
STEP 1
Create the standardized version of GO-BP, by filtering out unannotated
terms and those with extremely high IC and SV values
'''
G = construct_graph_from_mongo('GO_P') # initial GO-BP graph
# find terms annotated for at least one species
species_df = pandas.read_csv('./files/species.tsv', sep='\t', index_col=0)
union = []
for species in species_df.abbreviation:
mapping = get_mapping_from_mongo('GO_P', species, corrected_mapping=True)
union = list(set(union).union(mapping.keys()))
# terms to remove
to_remove_terms = list(set(G.entries.keys()).difference(union))
G = remove_unannotated(G, to_remove_terms)
semantics = core_classes.Semantics(G)
terms_details = []
for term in G.entries.keys():
sm = semantics.get_semantic_value(term, 'graph_corpus')
ic = semantics.get_information_content(term, 'graph_corpus')
terms_details.append([term, sm, ic])
semantics_df = pandas.DataFrame(terms_details, columns=['term_id',
'semantic_value', 'information_content'])
high_ic = round(numpy.percentile(semantics_df.information_content, 20),3)
low_ic = 0.25
high_sem_value = round(numpy.percentile(semantics_df.semantic_value,20),3)
low_sem_value = 0
substitutions_dict = {}
final_terms = []
for term in G.entries.keys():
new_terms = semantics.get_ancestors_from_bounded_graph(term, low_ic=0.25,
high_ic=high_ic,
high_sem_value=high_sem_value,
low_sem_value=0)
substitutions_dict.update({term:new_terms})
final_terms.extend(new_terms)
final_terms = list(set(final_terms))
with open(main_dir+'GO_P_terms_substitutions.json', 'w') as f:
json.dump(substitutions_dict, f)
'''
STEP 2
Construct semantic similarity matrices for the terms of standardized GO-BP
'''
all_terms = list(set([i for i in substitutions_dict.keys()]))
final_terms = list(set([i for j in substitutions_dict.values() for i in j]))
final_terms_data = []
for term in final_terms:
final_terms_data.append([term, G.get_entry_obj(term).definition,
str(semantics.get_information_content(term, 'graph_corpus')),
str(semantics.get_semantic_value(term, 'graph_corpus'))
])
tmp_df = pandas.DataFrame(final_terms_data, columns=['term_id', 'definition',
'ic', 'semantic_value'])
tmp_df = tmp_df.sort_values(by='ic', ascending=True)
tmp_df.to_csv(main_dir+'final_terms.csv')
terms = list(sorted(final_terms))
resnik_mica_matrix = semantics.get_pairwise_similarity(terms, 'resnik',
ancestors_set='mica')
resnik_xgrasm_matrix = semantics.get_pairwise_similarity(terms, 'resnik',
ancestors_set='xgrasm')
agg_ic_matrix = semantics.get_pairwise_similarity(terms, 'aggregate_ic',
ancestors_set='mica')
mean_matrix = (resnik_mica_matrix + resnik_xgrasm_matrix + agg_ic_matrix)/3.
pandas.DataFrame(resnik_mica_matrix, columns=terms, index=terms).to_csv(main_dir+'resnik_mica_matrix.csv', sep=',')
pandas.DataFrame(resnik_xgrasm_matrix, columns=terms, index=terms).to_csv(main_dir+'resnik_xgrasm_matrix.csv', sep=',')
| pandas.DataFrame(agg_ic_matrix, columns=terms, index=terms) | pandas.DataFrame |
import logging
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
import sentry_sdk
from solarforecastarbiter import utils
def _make_aggobs(obsid, ef=pd.Timestamp('20191001T1100Z'),
eu=None, oda=None):
return {
'observation_id': obsid,
'effective_from': ef,
'effective_until': eu,
'observation_deleted_at': oda
}
nindex = pd.date_range(start='20191004T0000Z',
freq='1h', periods=10)
@pytest.fixture()
def ids():
return ['f2844284-ea0a-11e9-a7da-f4939feddd82',
'f3e310ba-ea0a-11e9-a7da-f4939feddd82',
'09ed7cf6-ea0b-11e9-a7da-f4939feddd82',
'0fe9f2ba-ea0b-11e9-a7da-f4939feddd82',
'67ea9200-ea0e-11e9-832b-f4939feddd82']
@pytest.fixture()
def aggobs(ids):
return tuple([
_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191004T0501Z')),
_make_aggobs(ids[2], eu=pd.Timestamp('20191004T0400Z')),
_make_aggobs(ids[2], pd.Timestamp('20191004T0700Z'),
eu=pd.Timestamp('20191004T0800Z')),
_make_aggobs(ids[2], pd.Timestamp('20191004T0801Z')),
_make_aggobs(ids[3], oda=pd.Timestamp('20191005T0000Z')),
_make_aggobs(ids[4], oda=pd.Timestamp('20191009T0000Z'),
eu=pd.Timestamp('20191003T0000Z'))
])
def test_compute_aggregate(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2])
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
def test_compute_aggregate_missing_from_data(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
aggobs = list(aggobs[:-2]) + [
_make_aggobs('09ed7cf6-ea0b-11e9-a7da-f4939fed889')]
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_empty_data(aggobs, ids):
data = {}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:2], nindex)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_compute_aggregate_missing_data(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
data[ids[-1]] = pd.DataFrame({'value': [1] * 8, 'quality_flag': [0] * 8},
index=nindex[:-2])
aggobs = list(aggobs[:-2]) + [_make_aggobs(ids[-1])]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series(
[3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 4.0, None, None],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
def test_compute_aggregate_deleted_not_removed(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids}
with pytest.raises(ValueError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_deleted_not_removed_yet(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
# with last aggobs, would try and get data before effective_until,
# but was deleted, so raise error
aggobs = list(aggobs[:-2]) + [
_make_aggobs(ids[4], oda=pd.Timestamp('20191009T0000Z'),
eu=pd.Timestamp('20191004T0700Z'))]
with pytest.raises(ValueError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_deleted_but_removed_before(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
# aggobs[-1] properly removed
aggobs = list(aggobs[:-2]) + [aggobs[-1]]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)}))
def test_compute_aggregate_mean(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'mean', aggobs[:-2])
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([1.0] * 10, index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_compute_aggregate_no_overlap(ids):
data = {ids[0]: pd.DataFrame(
{'value': [1, 2, 3], 'quality_flag': [2, 10, 338]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0230Z'])),
ids[1]: pd.DataFrame(
{'value': [3, 2, 1], 'quality_flag': [9, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0200Z', '20191002T0230Z', '20191002T0300Z']))}
aggobs = [_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191002T0200Z'))]
agg = utils.compute_aggregate(data, '30min', 'ending',
'UTC', 'median', aggobs)
expected = pd.DataFrame(
{'value': [1.0, 2.0, None, 2.5, None],
'quality_flag': [2, 10, 9, 338 | 880, 10]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z']))
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_missing_before_effective(ids):
data = {ids[0]: pd.DataFrame(
{'value': [1, 2, 3, 0, 0], 'quality_flag': [2, 10, 338, 0, 0]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z'])),
ids[1]: pd.DataFrame(
{'value': [None, 2.0, 1.0], 'quality_flag': [0, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0200Z', '20191002T0230Z', '20191002T0300Z']))}
aggobs = [_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191002T0201Z'))]
agg = utils.compute_aggregate(data, '30min', 'ending',
'UTC', 'max', aggobs)
expected = pd.DataFrame(
{'value': [1.0, 2.0, 3.0, 2.0, 1.0],
'quality_flag': [2, 10, 338, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z']))
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_bad_cols():
data = {'a': pd.DataFrame([0], index=pd.DatetimeIndex(
['20191001T1200Z']))}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending', 'UTC',
'mean', [_make_aggobs('a')])
def test_compute_aggregate_index_provided(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
the_index = nindex.copy()[::2]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], the_index)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 3.0],
index=the_index),
'quality_flag': pd.Series([0]*5, index=the_index)})
)
@pytest.mark.parametrize('dfindex,missing_idx', [
(pd.date_range(start='20191004T0000Z', freq='1h', periods=11), -1),
(pd.date_range(start='20191003T2300Z', freq='1h', periods=11), 0),
])
def test_compute_aggregate_missing_values_with_index(
aggobs, ids, dfindex, missing_idx):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], dfindex)
assert pd.isnull(agg['value'][missing_idx])
def test_compute_aggregate_partial_missing_values_with_index(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:2]}
data[ids[2]] = pd.DataFrame({'value': [1] * 5, 'quality_flag': [0] * 5},
index=nindex[5:])
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], nindex)
expected = pd.DataFrame(
{'value': pd.Series(
[np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)}
)
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_missing_obs_with_index(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:2]}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending', 'UTC', 'sum',
aggobs[:-2], nindex)
def test_compute_aggregate_out_of_effective(aggobs, ids):
limited_aggobs = [aggob
for aggob in aggobs
if aggob['effective_until'] is not None]
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
max_time = pd.Series([o['effective_until'] for o in limited_aggobs]).max()
ooe_index = pd.date_range(
max_time + pd.Timedelta('1H'),
max_time + pd.Timedelta('25H'),
freq='60min'
)
with pytest.raises(ValueError) as e:
utils.compute_aggregate(data, '1h', 'ending', 'UTC', 'sum',
limited_aggobs, ooe_index)
assert str(e.value) == 'No effective observations in data'
def test__observation_valid(aggobs):
out = utils._observation_valid(
nindex, 'f2844284-ea0a-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(True, index=nindex))
def test__observation_valid_ended(aggobs):
out = utils._observation_valid(
nindex, 'f3e310ba-ea0a-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series([False] * 6 + [True] * 4,
index=nindex))
def test__observation_valid_many(aggobs):
out = utils._observation_valid(
nindex, '09ed7cf6-ea0b-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(
[True, True, True, True, True, False, False, True, True, True],
index=nindex))
def test__observation_valid_deleted(aggobs):
with pytest.raises(ValueError):
utils._observation_valid(
nindex, '0fe9f2ba-ea0b-11e9-a7da-f4939feddd82', aggobs)
def test__observation_valid_deleted_before(aggobs):
out = utils._observation_valid(
nindex, '67ea9200-ea0e-11e9-832b-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(False, index=nindex))
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0700Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0700Z', end='20191004T0745Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0700Z', '20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z',
'20191004T0800Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z'])),
])
def test__make_aggregate_index(length, label, expected):
test_data = {
0: pd.DataFrame(range(5), index=pd.date_range(
'20191004T0700Z', freq='7min', periods=5)), # end 35
1: pd.DataFrame(range(4), index=pd.date_range(
'20191004T0015-0700', freq='10min', periods=4))} # end 45
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0715Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0700Z', end='20191004T0730Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0720Z', '20191004T0740Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z'])),
])
def test__make_aggregate_index_offset_right(length, label, expected):
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20191004T0701Z', freq='7min', periods=6)) # end 35
}
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0700Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0645Z', end='20191004T0730Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0700Z', '20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0600Z',
'20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0640Z', '20191004T0700Z', '20191004T0720Z'])),
('36min', 'ending', pd.DatetimeIndex(['20191004T0712Z',
'20191004T0748Z'])),
('36min', 'beginning', pd.DatetimeIndex(['20191004T0636Z',
'20191004T0712Z'])),
])
def test__make_aggregate_index_offset_left(length, label, expected):
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20191004T0658Z', freq='7min', periods=6)) # end 32
}
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
def test__make_aggregate_index_tz():
length = '30min'
label = 'beginning'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T1600Z', freq='5min', periods=6)) # end 30
}
expected = pd.DatetimeIndex(['20190101T0900'],
tz='America/Denver')
out = utils._make_aggregate_index(test_data, length, label,
'America/Denver')
pdt.assert_index_equal(out, expected)
def test__make_aggregate_index_invalid_length():
length = '33min'
label = 'beginning'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T0158Z', freq='7min', periods=6)) # end 32
}
with pytest.raises(ValueError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
def test__make_aggregate_index_instant():
length = '30min'
label = 'instant'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T0100Z', freq='10min', periods=6)) # end 32
}
with pytest.raises(ValueError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
@pytest.mark.parametrize('start,end', [
(pd.Timestamp('20190101T0000Z'), pd.Timestamp('20190102T0000')),
(pd.Timestamp('20190101T0000'), pd.Timestamp('20190102T0000Z')),
(pd.Timestamp('20190101T0000'), pd.Timestamp('20190102T0000')),
])
def test__make_aggregate_index_localization(start, end):
length = '30min'
label = 'ending'
test_data = {
0: pd.DataFrame(range(1), index=pd.DatetimeIndex([start])),
1: pd.DataFrame(range(1), index=pd.DatetimeIndex([end])),
}
with pytest.raises(TypeError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
@pytest.mark.parametrize('inp,oup', [
(pd.DataFrame(dtype=float), pd.Series(dtype=float)),
(pd.DataFrame(index=pd.DatetimeIndex([]), dtype=float),
pd.DataFrame(dtype=float)),
(pd.Series([0, 1]), pd.Series([0, 1])),
(pd.DataFrame([[0, 1], [1, 2]]), pd.DataFrame([[0, 1], [1, 2]])),
pytest.param(
pd.Series([0, 1]),
pd.Series([0, 1], index=pd.date_range(start='now', freq='1min',
periods=2)),
marks=pytest.mark.xfail(type=AssertionError, strict=True)),
pytest.param(
pd.Series([0, 1]),
pd.Series([1, 0]),
marks=pytest.mark.xfail(type=AssertionError, strict=True))
])
def test_sha256_pandas_object_hash(inp, oup):
assert utils.sha256_pandas_object_hash(inp) == utils.sha256_pandas_object_hash(oup) # NOQA
def test_listhandler():
logger = logging.getLogger('testlisthandler')
handler = utils.ListHandler()
logger.addHandler(handler)
logger.setLevel('DEBUG')
logger.warning('Test it')
logger.debug('What?')
out = handler.export_records()
assert len(out) == 1
assert out[0].message == 'Test it'
assert len(handler.export_records(logging.DEBUG)) == 2
def test_listhandler_recreate():
logger = logging.getLogger('testlisthandler')
handler = utils.ListHandler()
logger.addHandler(handler)
logger.setLevel('DEBUG')
logger.warning('Test it')
logger.debug('What?')
out = handler.export_records()
assert len(out) == 1
assert out[0].message == 'Test it'
assert len(handler.export_records(logging.DEBUG)) == 2
l2 = logging.getLogger('testlist2')
h2 = utils.ListHandler()
l2.addHandler(h2)
l2.error('Second fail')
out = h2.export_records()
assert len(out) == 1
assert out[0].message == 'Second fail'
def test_hijack_loggers(mocker):
old_handler = mocker.MagicMock()
new_handler = mocker.MagicMock()
mocker.patch('solarforecastarbiter.utils.ListHandler',
return_value=new_handler)
logger = logging.getLogger('testhijack')
logger.addHandler(old_handler)
assert logger.handlers[0] == old_handler
with utils.hijack_loggers(['testhijack']):
assert logger.handlers[0] == new_handler
assert logger.handlers[0] == old_handler
def test_hijack_loggers_sentry(mocker):
events = set()
def before_send(event, hint):
events.add(event['logger'])
return
sentry_sdk.init(
"https://[email protected]/0",
before_send=before_send)
logger = logging.getLogger('testlog')
with utils.hijack_loggers(['testlog']):
logging.getLogger('root').error('will show up')
logger.error('AHHH')
assert 'root' in events
assert 'testlog' not in events
events = set()
logging.getLogger('root').error('will show up')
logger.error('AHHH')
assert 'root' in events
assert 'testlog' in events
@pytest.mark.parametrize('data,freq,expected', [
(pd.Series(index=pd.DatetimeIndex([]), dtype=float), '5min',
[pd.Series(index=pd.DatetimeIndex([]), dtype=float)]),
(pd.Series([1.0], index=pd.DatetimeIndex(['2020-01-01T00:00Z'])),
'5min',
[pd.Series([1.0], index=pd.DatetimeIndex(['2020-01-01T00:00Z']))]),
(pd.Series(
[1.0, 2.0, 3.0],
index=pd.date_range('2020-01-01T00:00Z', freq='1h', periods=3)),
'1h',
[pd.Series(
[1.0, 2.0, 3.0],
index=pd.date_range('2020-01-01T00:00Z', freq='1h', periods=3))]),
(pd.Series(
[1.0, 2.0, 4.0],
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z',
'2020-01-01T04:00Z'])),
'1h',
[pd.Series(
[1.0, 2.0],
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z'])),
pd.Series(
[4.0],
index=pd.DatetimeIndex(['2020-01-01T04:00Z'])),
]),
(pd.Series(
[1.0, 3.0, 5.0],
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T03:00Z',
'2020-01-01T05:00Z'])),
'1h',
[pd.Series(
[1.0],
index=pd.DatetimeIndex(['2020-01-01T01:00Z'])),
pd.Series(
[3.0],
index=pd.DatetimeIndex(['2020-01-01T03:00Z'])),
pd.Series(
[5.0],
index=pd.DatetimeIndex(['2020-01-01T05:00Z'])),
]),
(pd.DataFrame(index=pd.DatetimeIndex([]), dtype=float), '1h',
[pd.DataFrame(index=pd.DatetimeIndex([]), dtype=float)]),
(pd.DataFrame(
{'a': [1.0, 2.0, 4.0], 'b': [11.0, 12.0, 14.0]},
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z',
'2020-01-01T04:00Z'])),
'1h',
[pd.DataFrame(
{'a': [1.0, 2.0], 'b': [11.0, 12.0]},
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z'])),
pd.DataFrame(
{'a': [4.0], 'b': [14.0]},
index=pd.DatetimeIndex(['2020-01-01T04:00Z'])),
]),
(pd.DataFrame(
{'_cid': [1.0, 2.0, 4.0], '_cid0': [11.0, 12.0, 14.0]},
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z',
'2020-01-01T04:00Z'])),
'1h',
[pd.DataFrame(
{'_cid': [1.0, 2.0], '_cid0': [11.0, 12.0]},
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z'])),
pd.DataFrame(
{'_cid': [4.0], '_cid0': [14.0]},
index=pd.DatetimeIndex(['2020-01-01T04:00Z'])),
]),
(pd.DataFrame(
[[0.0, 1.0], [2.0, 3.0]],
columns=pd.MultiIndex.from_product([[0], ['a', 'b']]),
index=pd.DatetimeIndex(['2020-01-01T00:00Z', '2020-01-02T00:00Z'])),
'12h',
[pd.DataFrame(
[[0.0, 1.0]],
columns= | pd.MultiIndex.from_product([[0], ['a', 'b']]) | pandas.MultiIndex.from_product |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 29 13:13:47 2019
Implement a Naive Bayes Classifier
@author: liang257
"""
import pandas as pd
import numpy as np
'''read data'''
train_data = | pd.read_csv("trainingSet.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 13 22:45:00 2018
@author: benmo
"""
import pandas as pd, numpy as np, dask.dataframe as ddf
import quandl
import sys, os, socket
import pickle
from dask import delayed
from difflib import SequenceMatcher
from matplotlib.dates import bytespdate2num, num2date
from matplotlib.ticker import Formatter
import re
from itertools import permutations, product, chain
from functools import reduce
import struct
similar = lambda a, b: SequenceMatcher(None, a, b).ratio()
crs4326 = {'init': 'epsg:4326'}
def mostSimilar(x,term):
temp = np.array([x,list(map(lambda x: similar(x,term), x))]).T
return pd.DataFrame(temp,
columns=['Name','Score']).sort_values('Score',ascending=False)
def getEconVars():
varz = pd.read_csv("/home/benmo/Data/Econ/Indicators/indicators.csv")
fedData = pickleLib.fedData()
econDict = {}
for col in varz.columns:
temp = varz[col].dropna()
econDict[col] = {}
for var in temp:
econDict[col][var] = mostSimilar(fedData.item, var).iloc[:5].set_index(
'Name').to_dict()
return econDict
#"/home/benmo/Data/PyObjects/commodities.pkl"
def get_commodities():
oil = quandl.get('CHRIS/CME_WS1', authtoken="<KEY>")
natgas = quandl.get('CHRIS/CME_NG1', authtoken="<KEY>")
gold = quandl.get('CHRIS/CME_GC1', authtoken="<KEY>")
rice = quandl.get('CHRIS/ODE_TR1', authtoken="<KEY>")
grain = quandl.get('CHRIS/EUREX_FCGR1', authtoken="<KEY>")
lumber = quandl.get('CHRIS/CME_LB1', authtoken="<KEY>")
steelCHN = quandl.get('CHRIS/SHFE_WR1', authtoken="<KEY>")
steelUSA = quandl.get('CHRIS/CME_HR1', authtoken="<KEY>")
coal = quandl.get('CHRIS/SGX_CFF1', authtoken="<KEY>")
df = pd.DataFrame([])
for (key, temp) in zip(['Oil', 'Natural Gas', 'Gold', 'Rice', 'Grain',
'Lumber', 'SteelCHN', 'SteelUSA', 'Coal'], [oil, natgas, gold, rice,
grain, lumber, steelCHN,
steelUSA, coal]):
temp['Commodity'] = key
df = df.append(temp)
return df
def get_etfs():
oil = quandl.get('CHRIS/CME_WS1', authtoken="<KEY>")
natgas = quandl.get('CHRIS/CME_NG1', authtoken="<KEY>")
gold = quandl.get('CHRIS/CME_GC1', authtoken="<KEY>")
rice = quandl.get('CHRIS/ODE_TR1', authtoken="<KEY>")
grain = quandl.get('CHRIS/EUREX_FCGR1', authtoken="<KEY>")
lumber = quandl.get('CHRIS/CME_LB1', authtoken="<KEY>")
steelCHN = quandl.get('CHRIS/SHFE_WR1', authtoken="<KEY>")
steelUSA = quandl.get('CHRIS/CME_HR1', authtoken="<KEY>")
coal = quandl.get('CHRIS/SGX_CFF1', authtoken="<KEY>")
df = pd.DataFrame([])
for (key, temp) in zip(['Oil', 'Natural Gas', 'Gold', 'Rice', 'Grain',
'Lumber', 'SteelCHN', 'SteelUSA', 'Coal'], [oil, natgas, gold, rice,
grain, lumber, steelCHN,
steelUSA, coal]):
temp['Commodity'] = key
df = df.append(temp)
return df
def print_lines(fn, N, out=None):
fout=open(out, 'w+') if out == None else None
f=open(fn)
for i in range(N):
line=f.readline()
print(line) if out == None else fout.write(line)
f.close()
fout.close() if out == None else print('no file written')
tuple2str = lambda name: name if isinstance(name, tuple) ==False else reduce(lambda x, y: str(x)
.replace('.0','') + '_' + str(y).replace('.0',''), list(map(lambda xi: str(xi), name)))
def search_str(regx, string):
return True if re.search(regx, string) else False
def returnFiltered(term, data):
temp = list(filter(lambda x: term
in x.lower(), data['item']))
return data[data.isin(temp).item==True]
def egen(data, f, applyto, groupby, column_filt, newcol):
tmp = data[column_filt]
tmp[newcol] = tmp.groupby(groupby).apply(f)
tmp['index'] = tmp.index
return pd.merge(data, tmp, how='inner', left_on=column_filt, right_on =applyto + ['index'])
def read_idx(filename):
with open(filename, 'rb') as f:
zero, data_type, dims = struct.unpack('>HBB', f.read(4))
shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dims))
return np.fromstring(f.read(), dtype=np.uint8).reshape(shape)
class MyComp():
cName = socket.gethostname()
if sys.platform == 'linux':
ffProfilePath = "/home/benmo/.mozilla/firefox/w55ako72.dev-edition-default"
picklePath = "/home/benmo/Data/PyObjects"
else:
if cName == 'DESKTOP-HOKP1GT':
ffProfilePath = "C:/Users/benmo/AppData/Roaming/Mozilla/Firefox/Profiles/it0uu1ch.default"
uofcPath = "D:/OneDrive - University of Calgary"
financePath = "C:/users/benmo/OneDrive/2016& 2017Classes/Financial Econ"
picklePath = "D:/data/pyobjects"
classesPath = "C:/users/benmo/OneDrive/2016& 2017Classes"
else:
ffProfilePath = "C:/Users/benmo/AppData/Roaming/Mozilla/Firefox/Profiles/vpv78y9i.default"
uofcPath = "D:/benmo/OneDrive - University of Calgary"
financePath = "D:/benmo/OneDrive/2016& 2017Classes/Financial Econ"
picklePath = "D:/data/pyobjects"
classesPath = "D:/benmo/OneDrive/2016& 2017Classes"
def mySAS():
bob = pd.read_sas("D:/data/Personal Research/pcg15Public/pcg15Public/epcg15.sas7bdat")
return bob
def collect_csv(path, na_val='NA',skiprows=0, dtype_map=None):
try:
return list(map(lambda x: [x, x.compute()], ddf.read_csv(
path, skiprows=skiprows, dtype=dtype_map)))
except:
try:
return list(map(lambda x: [x, x.compute()], ddf.read_csv(
path, low_memory=False, skiprows=skiprows, dtype=dtype_map)))
except:
try:
return list(map(lambda x: [x, x.compute()], ddf.read_csv(
path, low_memory=False, dtype=str,
skiprows=skiprows)))
except:
return list(map(lambda x: [x, x.compute()], ddf.read_csv(
path, low_memory=False, dtype=str,
na_values=na_val, skiprows=skiprows)))
'''example:
bob = ddf.read_csv('Z:/Electricity/*.csv',skiprows=2,dtype={'Date': str,
'HE': str,
'Import/Export': str,
'Asset Id': str,
'Block Number': str,
'Price': 'float64',
'From': 'int64',
'To': 'int64',
'Size': 'int64',
'Available': 'int64',
'Dispatched': str,
'Dispatched MW': 'int64',
'Flexible': str,
'Offer Control': str})
bob=bob.drop('Effective Date/Time',axis=1)
bob.compute().to_csv('Z:/Electricity/Combined.csv',index=False)
'''
def nestmap(outer, outerf, innerf, mapping=list):
return map(lambda x: outerf(mapping(map(lambda inner: innerf(inner), x))), outer)
def test():
bob = | pd.read_csv("C:/users/benmo/desktop/fedReserve.csv") | pandas.read_csv |
from typing import Dict
from typing import Union
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.transforms import ResampleWithDistributionTransform
DistributionDict = Dict[str, pd.DataFrame]
@pytest.fixture
def daily_exog_ts() -> Dict[str, Union[TSDataset, DistributionDict]]:
df1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_1",
"target": 1,
}
)
df2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_2",
"target": [1] + 23 * [0] + [1] + 23 * [0],
}
)
df = pd.concat([df1, df2], ignore_index=True)
df_exog1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="D", periods=3),
"segment": "segment_1",
"regressor_exog": 2,
}
)
df_exog2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="D", periods=3),
"segment": "segment_2",
"regressor_exog": 40,
}
)
df_exog = pd.concat([df_exog1, df_exog2], ignore_index=True)
target1 = pd.DataFrame(
{
"fold": list(range(24)),
"distribution": 1 / 24,
}
)
target2 = pd.DataFrame(
{
"fold": list(range(24)),
"distribution": [1] + 23 * [0],
}
)
ts = TSDataset(df=TSDataset.to_dataset(df), freq="H", df_exog=TSDataset.to_dataset(df_exog))
distribution = {"segment_1": target1, "segment_2": target2}
return {"ts": ts, "distribution": distribution}
@pytest.fixture
def inplace_resampled_daily_exog_ts() -> TSDataset:
df1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_1",
"target": 1,
}
)
df2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_2",
"target": [1] + 23 * [0] + [1] + 23 * [0],
}
)
df = pd.concat([df1, df2], ignore_index=True)
df_exog1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=72),
"segment": "segment_1",
"regressor_exog": 2 / 24,
}
)
df_exog2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=72),
"segment": "segment_2",
"regressor_exog": [40] + 23 * [0] + [40] + 23 * [0] + [40] + 23 * [0],
}
)
df_exog = pd.concat([df_exog1, df_exog2], ignore_index=True)
ts = TSDataset(df=TSDataset.to_dataset(df), freq="H", df_exog=TSDataset.to_dataset(df_exog))
return ts
@pytest.fixture
def noninplace_resampled_daily_exog_ts() -> TSDataset:
df1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_1",
"target": 1,
}
)
df2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_2",
"target": [1] + 23 * [0] + [1] + 23 * [0],
}
)
df = pd.concat([df1, df2], ignore_index=True)
df_exog1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=72),
"segment": "segment_1",
"regressor_exog": [2] + 23 * [np.NAN] + [2] + 23 * [np.NAN] + [2] + 23 * [np.NAN],
"resampled_exog": 2 / 24,
}
)
df_exog2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=72),
"segment": "segment_2",
"regressor_exog": [40] + 23 * [np.NAN] + [40] + 23 * [np.NAN] + [40] + 23 * [np.NAN],
"resampled_exog": [40] + 23 * [0] + [40] + 23 * [0] + [40] + 23 * [0],
}
)
df_exog = pd.concat([df_exog1, df_exog2], ignore_index=True)
ts = TSDataset(df=TSDataset.to_dataset(df), freq="H", df_exog=TSDataset.to_dataset(df_exog))
return ts
@pytest.fixture
def weekly_exog_same_start_ts() -> Dict[str, Union[TSDataset, DistributionDict]]:
"""Target and exog columns start on Monday."""
df1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="D", periods=14),
"segment": "segment_1",
"target": 1,
}
)
df2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="D", periods=14),
"segment": "segment_2",
"target": [1] + 6 * [0] + [1] + 6 * [0],
}
)
df = pd.concat([df1, df2], ignore_index=True)
df_exog1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="W", periods=3),
"segment": "segment_1",
"regressor_exog": 2,
}
)
df_exog2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="W", periods=3),
"segment": "segment_2",
"regressor_exog": 40,
}
)
df_exog = pd.concat([df_exog1, df_exog2], ignore_index=True)
target1 = pd.DataFrame(
{
"fold": list(range(7)),
"distribution": 1 / 7,
}
)
target2 = pd.DataFrame(
{
"fold": list(range(7)),
"distribution": [1] + 6 * [0],
}
)
distribution = {"segment_1": target1, "segment_2": target2}
ts = TSDataset(df=TSDataset.to_dataset(df), freq="D", df_exog=TSDataset.to_dataset(df_exog))
return {"ts": ts, "distribution": distribution}
@pytest.fixture
def weekly_exog_diff_start_ts() -> Dict[str, Union[TSDataset, DistributionDict]]:
"""Target starts on Thursday and exog starts on Monday."""
df1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-08", freq="D", periods=14),
"segment": "segment_1",
"target": 1,
}
)
df2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-08", freq="D", periods=14),
"segment": "segment_2",
"target": [1] + 6 * [0] + [1] + 6 * [0],
}
)
df = pd.concat([df1, df2], ignore_index=True)
df_exog1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="W", periods=4),
"segment": "segment_1",
"regressor_exog": 2,
}
)
df_exog2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="W", periods=4),
"segment": "segment_2",
"regressor_exog": 40,
}
)
df_exog = pd.concat([df_exog1, df_exog2], ignore_index=True)
target1 = pd.DataFrame(
{
"fold": list(range(7)),
"distribution": 1 / 7,
}
)
target2 = pd.DataFrame(
{
"fold": list(range(7)),
"distribution": [0, 0, 0, 1, 0, 0, 0],
}
)
ts = TSDataset(df=TSDataset.to_dataset(df), freq="D", df_exog=TSDataset.to_dataset(df_exog))
distribution = {"segment_1": target1, "segment_2": target2}
return {"ts": ts, "distribution": distribution}
@pytest.fixture
def incompatible_freq_ts() -> TSDataset:
df1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-01", freq="7T", periods=20),
"segment": "segment_1",
"target": 1,
}
)
df2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-01", freq="7T", periods=20),
"segment": "segment_2",
"target": 2,
}
)
df = | pd.concat([df1, df2], ignore_index=True) | pandas.concat |
import asyncio
from .integration_test_utils import setup_teardown_test, _generate_table_name, V3ioHeaders, V3ioError
from storey import build_flow, ReadCSV, WriteToCSV, Source, Reduce, Map, FlatMap, AsyncSource, WriteToParquet
import pandas as pd
import aiohttp
import pytest
import v3io
import uuid
@pytest.fixture()
def v3io_create_csv():
# Setup
file_path = _generate_table_name('bigdata/csv_test')
asyncio.run(_write_test_csv(file_path))
# Test runs
yield file_path
# Teardown
asyncio.run(_delete_file(file_path))
@pytest.fixture()
def v3io_teardown_file():
# Setup
file_path = _generate_table_name('bigdata/csv_test')
# Test runs
yield file_path
# Teardown
asyncio.run(_delete_file(file_path))
async def _write_test_csv(file_path):
connector = aiohttp.TCPConnector()
v3io_access = V3ioHeaders()
client_session = aiohttp.ClientSession(connector=connector)
try:
data = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
await client_session.put(f'{v3io_access._webapi_url}/{file_path}', data=data,
headers=v3io_access._get_put_file_headers, ssl=False)
finally:
await client_session.close()
async def _delete_file(path):
connector = aiohttp.TCPConnector()
v3io_access = V3ioHeaders()
client_session = aiohttp.ClientSession(connector=connector)
try:
response = await client_session.delete(f'{v3io_access._webapi_url}/{path}',
headers=v3io_access._get_put_file_headers, ssl=False)
if response.status >= 300 and response.status != 404 and response.status != 409:
body = await response.text()
raise V3ioError(f'Failed to delete item at {path}. Response status code was {response.status}: {body}')
finally:
await client_session.close()
def test_csv_reader_from_v3io(v3io_create_csv):
controller = build_flow([
ReadCSV(f'v3io:///{v3io_create_csv}', header=True),
FlatMap(lambda x: x),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 495
def test_csv_reader_from_v3io_error_on_file_not_found():
controller = build_flow([
ReadCSV('v3io:///bigdatra/tests/idontexist.csv', header=True),
]).run()
try:
controller.await_termination()
assert False
except FileNotFoundError:
pass
async def async_test_write_csv_to_v3io(v3io_teardown_csv):
controller = await build_flow([
AsyncSource(),
WriteToCSV(f'v3io:///{v3io_teardown_csv}', columns=['n', 'n*10'], header=True)
]).run()
for i in range(10):
await controller.emit([i, 10 * i])
await controller.terminate()
await controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.aio.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_csv.split('/', 1)
result = await v3io_client.object.get(container, path)
finally:
await v3io_client.close()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_csv_to_v3io(v3io_teardown_file):
asyncio.run(async_test_write_csv_to_v3io(v3io_teardown_file))
def test_write_csv_with_dict_to_v3io(v3io_teardown_file):
file_path = f'v3io:///{v3io_teardown_file}'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['n', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_file.split('/', 1)
result = v3io_client.object.get(container, path)
finally:
v3io_client.close()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_csv_infer_columns_without_header_to_v3io(v3io_teardown_file):
file_path = f'v3io:///{v3io_teardown_file}'
controller = build_flow([
Source(),
WriteToCSV(file_path)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_file.split('/', 1)
result = v3io_client.object.get(container, path)
finally:
v3io_client.close()
expected = "0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_csv_from_lists_with_metadata_and_column_pruning_to_v3io(v3io_teardown_file):
file_path = f'v3io:///{v3io_teardown_file}'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['event_key=$key', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_file.split('/', 1)
result = v3io_client.object.get(container, path)
finally:
v3io_client.close()
expected = "event_key,n*10\nkey0,0\nkey1,10\nkey2,20\nkey3,30\nkey4,40\nkey5,50\nkey6,60\nkey7,70\nkey8,80\nkey9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_to_parquet_to_v3io(setup_teardown_test):
out_dir = f'v3io:///{setup_teardown_test}'
columns = ['my_int', 'my_string']
controller = build_flow([
Source(),
WriteToParquet(out_dir, partition_cols='my_int', columns=columns, max_events=1)
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}'])
expected = pd.DataFrame(expected, columns=columns, dtype='int32')
controller.terminate()
controller.await_termination()
read_back_df = | pd.read_parquet(out_dir, columns=columns) | pandas.read_parquet |
# -*- coding: utf-8 -*-
"""
Created on Sun May 2 22:57:59 2021
@author: <NAME> -Spatial structure index value distribution of urban streetscape
"""
from multiprocessing import Pool
from polar_metrics_pool import polar_metrics_single
from tqdm import tqdm
import glob,os
import pandas as pd
#packages\pylandstats\landscape.py has not been compiled for Transonic-Numba
columns=["fn_stem","fn_key","fn_idx","geometry",]+['total_area', 'proportion_of_landscape', 'number_of_patches',
'patch_density', 'largest_patch_index', 'total_edge', 'edge_density',
'landscape_shape_index', 'effective_mesh_size', 'area_mn', 'area_am',
'area_md', 'area_ra', 'area_sd', 'area_cv', 'perimeter_mn',
'perimeter_am', 'perimeter_md', 'perimeter_ra', 'perimeter_sd',
'perimeter_cv', 'perimeter_area_ratio_mn', 'perimeter_area_ratio_am',
'perimeter_area_ratio_md', 'perimeter_area_ratio_ra',
'perimeter_area_ratio_sd', 'perimeter_area_ratio_cv', 'shape_index_mn',
'shape_index_am', 'shape_index_md', 'shape_index_ra', 'shape_index_sd',
'shape_index_cv', 'fractal_dimension_mn', 'fractal_dimension_am',
'fractal_dimension_md', 'fractal_dimension_ra', 'fractal_dimension_sd',
'fractal_dimension_cv', 'euclidean_nearest_neighbor_mn',
'euclidean_nearest_neighbor_am', 'euclidean_nearest_neighbor_md',
'euclidean_nearest_neighbor_ra', 'euclidean_nearest_neighbor_sd',
'euclidean_nearest_neighbor_cv']
sky_class_level_metrics= | pd.DataFrame(columns=columns) | pandas.DataFrame |
import pandas as pd
df1 = pd.read_csv("student1.csv")
df2 = pd.read_csv("student2.csv")
result = | pd.concat([df1, df2]) | pandas.concat |
import pandas as pd
import numpy as np
import os
import csv
data_path='/Users/paulsharp/Documents/Dissertation_studies/data/QC_Applied'
output_path='/Users/paulsharp/Documents/Dissertation_studies/data'
self_report_path='/Users/paulsharp/Documents/Dissertation_studies/data'
os.chdir(self_report_path)
self_report_data=pd.read_csv('Self_report_full_data_all_timepoints.csv')
os.chdir(data_path)
subs_wave_2=[x for x in os.listdir(os.curdir) if x[17]=='2']
subs_wave_1=[x for x in os.listdir(os.curdir) if x[17]=='1']
sub_order_out=[['Subject_Order']]
os.chdir(output_path)
sub_order_df=pd.read_csv('Subject_Order_GFC_Feature_Matrix_amygdala_only.csv')
subjects=sub_order_df.Subject_Order
sub_order=sub_order_df.Subject_Order.tolist()
print(sub_order)
region_names=['dmpfc_left',
'dmpfc_right',
'vmpfc_left',
'vmpfc_right',
'vlpfc_left',
'vlpfc_right']
mast_csv_w1_Leftamyg=[['Subject','dmpfc_left1','dmpfc_left2','dmpfc_left3',
'dmpfc_right1','dmpfc_right2','vmpfc_left1','vmpfc_left2',
'vmpfc_right1','vmpfc_right2','vmpfc_right3','vmpfc_right4',
'vlpfc_left1','vlpfc_left2','vlpfc_left3','vlpfc_left4','vlpfc_left5',
'vlpfc_right1','vlpfc_right2','vlpfc_right3','vlpfc_right4',
'vlpfc_right5','vlpfc_right6','vlpfc_right7','vlpfc_right8']]
mast_csv_w1_Rightamyg=[['Subject','dmpfc_left1','dmpfc_left2','dmpfc_left3',
'dmpfc_right1','dmpfc_right2','vmpfc_left1','vmpfc_left2',
'vmpfc_right1','vmpfc_right2','vmpfc_right3','vmpfc_right4',
'vlpfc_left1','vlpfc_left2','vlpfc_left3','vlpfc_left4','vlpfc_left5',
'vlpfc_right1','vlpfc_right2','vlpfc_right3','vlpfc_right4',
'vlpfc_right5','vlpfc_right6','vlpfc_right7','vlpfc_right8']]
mast_csv_w2_Leftamyg=[['Subject','dmpfc_left1','dmpfc_left2','dmpfc_left3',
'dmpfc_right1','dmpfc_right2','vmpfc_left1','vmpfc_left2',
'vmpfc_right1','vmpfc_right2','vmpfc_right3','vmpfc_right4',
'vlpfc_left1','vlpfc_left2','vlpfc_left3','vlpfc_left4','vlpfc_left5',
'vlpfc_right1','vlpfc_right2','vlpfc_right3','vlpfc_right4',
'vlpfc_right5','vlpfc_right6','vlpfc_right7','vlpfc_right8']]
mast_csv_w2_Rightamyg=[['Subject','dmpfc_left1','dmpfc_left2','dmpfc_left3',
'dmpfc_right1','dmpfc_right2','vmpfc_left1','vmpfc_left2',
'vmpfc_right1','vmpfc_right2','vmpfc_right3','vmpfc_right4',
'vlpfc_left1','vlpfc_left2','vlpfc_left3','vlpfc_left4','vlpfc_left5',
'vlpfc_right1','vlpfc_right2','vlpfc_right3','vlpfc_right4',
'vlpfc_right5','vlpfc_right6','vlpfc_right7','vlpfc_right8']]
mast_csv_diff_left=[['Subject','dmpfc_left1','dmpfc_left2','dmpfc_left3',
'dmpfc_right1','dmpfc_right2','vmpfc_left1','vmpfc_left2',
'vmpfc_right1','vmpfc_right2','vmpfc_right3','vmpfc_right4',
'vlpfc_left1','vlpfc_left2','vlpfc_left3','vlpfc_left4','vlpfc_left5',
'vlpfc_right1','vlpfc_right2','vlpfc_right3','vlpfc_right4',
'vlpfc_right5','vlpfc_right6','vlpfc_right7','vlpfc_right8']]
mast_csv_diff_right=[['Subject','dmpfc_left1','dmpfc_left2','dmpfc_left3',
'dmpfc_right1','dmpfc_right2','vmpfc_left1','vmpfc_left2',
'vmpfc_right1','vmpfc_right2','vmpfc_right3','vmpfc_right4',
'vlpfc_left1','vlpfc_left2','vlpfc_left3','vlpfc_left4','vlpfc_left5',
'vlpfc_right1','vlpfc_right2','vlpfc_right3','vlpfc_right4',
'vlpfc_right5','vlpfc_right6','vlpfc_right7','vlpfc_right8']]
region_nums=[[96, 97, 104],[107, 116],[99, 102],[2, 110, 111, 112],
[82, 176, 177, 215, 240],[10, 123, 181, 184, 189, 209, 217, 241]]
os.chdir(data_path)
sub_count=0
for sub in sub_order:
sub_wave1=sub
print(sub_wave1)
current_sub=[sub]
sub_order_out.append(current_sub)
sub_wave2='NT2'+sub[-3:]
current_line1_left=[]
current_line1_left.append(sub_wave1)
current_line2_left=[]
current_line2_left.append(sub_wave2)
current_line1_right=[]
current_line1_right.append(sub_wave1)
current_line2_right=[]
current_line2_right.append(sub_wave2)
diff_left=[]
diff_left.append(sub_wave1)
diff_right=[]
diff_right.append(sub_wave1)
for region in region_nums:
for reg in region:
#Define amygdala connectomes
#wave2
wave1_gfc=pd.read_csv('GFC_connectome_{}_QCapplied.csv'.format(sub_wave1))
#determine which ROW each ROI in list region_num is in current dataframe
counter=0
for i in wave1_gfc.columns:
if i == '{}.0'.format(reg):
index_reg=counter
counter+=1
wave2_gfc=pd.read_csv('GFC_connectome_{}_QCapplied.csv'.format(sub_wave2))
amygdala_left_w2=wave2_gfc['243.0'][index_reg]
current_line2_left.append(amygdala_left_w2)
amygdala_right_w2=wave2_gfc['244.0'][index_reg]
current_line2_right.append(amygdala_right_w2)
#wave1
amygdala_left_w1=wave1_gfc['243.0'][index_reg]
current_line1_left.append(amygdala_left_w1)
amygdala_right_w1=wave1_gfc['244.0'][index_reg]
current_line1_right.append(amygdala_right_w2)
#Wave2 - Wave 1 (longitudinal)
diff_amygdalae_left=amygdala_left_w2-amygdala_left_w1
diff_left.append(diff_amygdalae_left)
diff_amygdalae_right=amygdala_right_w2-amygdala_left_w1
diff_right.append(diff_amygdalae_right)
mast_csv_w1_Leftamyg.append(current_line1_left)
mast_csv_w1_Rightamyg.append(current_line1_right)
mast_csv_w2_Leftamyg.append(current_line2_left)
mast_csv_w2_Rightamyg.append(current_line2_right)
mast_csv_diff_left.append(diff_left)
mast_csv_diff_right.append(diff_right)
os.chdir(output_path)
#run correlations between self-report data and ROIs
mast_csv_w1_Leftamyg=pd.DataFrame(mast_csv_w1_Leftamyg[1:],columns=mast_csv_w1_Leftamyg[0])
print(mast_csv_w1_Leftamyg)
mast_csv_w1_Rightamyg=pd.DataFrame(mast_csv_w1_Rightamyg[1:],columns=mast_csv_w1_Rightamyg[0])
mast_csv_w2_Leftamyg=pd.DataFrame(mast_csv_w2_Leftamyg[1:],columns=mast_csv_w2_Leftamyg[0])
mast_csv_w2_Rightamyg=pd.DataFrame(mast_csv_w2_Rightamyg[1:],columns=mast_csv_w2_Rightamyg[0])
mast_csv_diff_left=pd.DataFrame(mast_csv_diff_left[1:],columns=mast_csv_diff_left[0])
mast_csv_diff_right=pd.DataFrame(mast_csv_diff_right[1:],columns=mast_csv_diff_right[0])
pd_1=pd.concat([self_report_data,mast_csv_w1_Leftamyg],axis=1).corr()
pd_2=pd.concat([self_report_data,mast_csv_w1_Rightamyg],axis=1).corr()
pd_3=pd.concat([self_report_data,mast_csv_w2_Leftamyg],axis=1).corr()
pd_4=pd.concat([self_report_data,mast_csv_w2_Rightamyg],axis=1).corr()
pd_5=pd.concat([self_report_data,mast_csv_diff_left],axis=1).corr()
pd_6= | pd.concat([self_report_data,mast_csv_diff_right],axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import glob
from shutil import copyfile
import hashlib
import json
import sys
import subprocess
import logging
from multiprocessing import Pool
import pdb
import time
import pickle
import numpy as np
import pandas as pd
import pydicom as dicom
import png
#pydicom imports needed to handle data errrors
from pydicom import config
from pydicom import datadict
from pydicom import values
with open('config.json', 'r') as f:
niffler = json.load(f)
#Get variables for StoreScp from config.json.
print_images = niffler['PrintImages']
print_only_common_headers = niffler['CommonHeadersOnly']
dicom_home = niffler['DICOMHome'] #the folder containing your dicom files
output_directory = niffler['OutputDirectory']
depth = niffler['Depth']
processes = niffler['UseProcesses'] #how many processes to use.
flattened_to_level = niffler['FlattenedToLevel']
email = niffler['YourEmail']
send_email = niffler['SendEmail']
no_splits = niffler['SplitIntoChunks']
is16Bit = niffler['is16Bit']
png_destination = output_directory + '/extracted-images/'
failed = output_directory +'/failed-dicom/'
maps_directory = output_directory + '/maps/'
meta_directory = output_directory + '/meta/'
LOG_FILENAME = output_directory + '/ImageExtractor.out'
pickle_file = output_directory + '/ImageExtractor.pickle'
# record the start time
t_start = time.time()
if not os.path.exists(output_directory):
os.makedirs(output_directory)
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
if not os.path.exists(maps_directory):
os.makedirs(maps_directory)
if not os.path.exists(meta_directory):
os.makedirs(meta_directory)
if not os.path.exists(png_destination):
os.makedirs(png_destination)
if not os.path.exists(failed):
os.makedirs(failed)
if not os.path.exists(failed + "/1"):
os.makedirs(failed + "/1")
if not os.path.exists(failed + "/2"):
os.makedirs(failed + "/2")
if not os.path.exists(failed + "/3"):
os.makedirs(failed + "/3")
if not os.path.exists(failed + "/4"):
os.makedirs(failed + "/4")
#%%Function for getting tuple for field,val pairs
def get_tuples(plan, outlist = None, key = ""):
if len(key)>0:
key = key + "_"
if not outlist:
outlist = []
for aa in plan.dir():
try:
hasattr(plan,aa)
except TypeError as e:
logging.warning('Type Error encountered')
if (hasattr(plan, aa) and aa!='PixelData'):
value = getattr(plan, aa)
start = len(outlist)
#if dicom sequence extract tags from each element
if type(value) is dicom.sequence.Sequence:
for nn, ss in enumerate(list(value)):
newkey = "_".join([key,("%d"%nn),aa]) if len(key) else "_".join([("%d"%nn),aa])
candidate = get_tuples(ss,outlist=None,key=newkey)
#if extracted tuples are too big condense to a string
if len(candidate)>2000:
outlist.append((newkey,str(candidate)))
else:
outlist.extend(candidate)
else:
if type(value) is dicom.valuerep.DSfloat:
value = float(value)
elif type(value) is dicom.valuerep.IS:
value = str(value)
elif type(value) is dicom.valuerep.MultiValue:
value = tuple(value)
elif type(value) is dicom.uid.UID:
value = str(value)
outlist.append((key + aa, value)) #appends name, value pair for this file. these are later concatenated to the dataframe
return outlist
def extract_headers(f_list_elem):
nn,ff = f_list_elem # unpack enumerated list
plan = dicom.dcmread(ff, force=True) #reads in dicom file
#checks if this file has an image
c=True
try:
check=plan.pixel_array #throws error if dicom file has no image
except:
c = False
kv = get_tuples(plan) #gets tuple for field,val pairs for this file. function defined above
# dicom images should not have more than 300
if len(kv)>500:
logging.debug(str(len(kv)) + " dicoms produced by " + ff)
kv.append(('file',chunk[nn])) #adds my custom field with the original filepath
kv.append(('has_pix_array',c)) #adds my custom field with if file has image
if c:
kv.append(('category','uncategorized')) #adds my custom category field - useful if classifying images before processing
else:
kv.append(('category','no image')) #adds my custom category field, makes note as imageless
return dict(kv)
#%%Function to extract pixel array information
#takes an integer used to index into the global filedata dataframe
#returns tuple of
# filemapping: dicom to png paths (as str)
# fail_path: dicom to failed folder (as tuple)
# found_err: error code produced when processing
def extract_images(i):
ds = dicom.dcmread(filedata.iloc[i].loc['file'], force=True) #read file in
found_err=None
filemapping = ""
fail_path = ""
try:
im=ds.pixel_array #pull image from read dicom
imName=os.path.split(filedata.iloc[i].loc['file'])[1][:-4] #get file name ex: IM-0107-0022
if flattened_to_level == 'patient':
ID=filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.
folderName = hashlib.sha224(ID.encode('utf-8')).hexdigest()
#check for existence of patient folder. Create if it does not exist.
os.makedirs(png_destination + folderName,exist_ok=True)
elif flattened_to_level == 'study':
ID1=filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.
try:
ID2=filedata.iloc[i].loc['StudyInstanceUID'] # Unique identifier for the Study.
except:
ID2='ALL-STUDIES'
folderName = hashlib.sha224(ID1.encode('utf-8')).hexdigest() + "/" + \
hashlib.sha224(ID2.encode('utf-8')).hexdigest()
#check for existence of the folder tree patient/study/series. Create if it does not exist.
os.makedirs(png_destination + folderName,exist_ok=True)
else:
ID1=filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.
try:
ID2=filedata.iloc[i].loc['StudyInstanceUID'] # Unique identifier for the Study.
ID3=filedata.iloc[i].loc['SeriesInstanceUID'] # Unique identifier of the Series.
except:
ID2='ALL-STUDIES'
ID3='ALL-SERIES'
folderName = hashlib.sha224(ID1.encode('utf-8')).hexdigest() + "/" + \
hashlib.sha224(ID2.encode('utf-8')).hexdigest() + "/" + hashlib.sha224(ID3.encode('utf-8')).hexdigest()
#check for existence of the folder tree patient/study/series. Create if it does not exist.
os.makedirs(png_destination + folderName,exist_ok=True)
pngfile = png_destination+folderName+'/' + hashlib.sha224(imName.encode('utf-8')).hexdigest() + '.png'
dicom_path = filedata.iloc[i].loc['file']
image_path = png_destination+folderName+'/' + hashlib.sha224(imName.encode('utf-8')).hexdigest() + '.png'
if is16Bit:
# write the PNG file as a 16-bit greyscale
image_2d = ds.pixel_array.astype(np.double)
# # Rescaling grey scale between 0-255
image_2d_scaled = (np.maximum(image_2d,0) / image_2d.max()) * 65535.0
# # Convert to uint
shape = ds.pixel_array.shape
image_2d_scaled = np.uint16(image_2d_scaled)
with open(pngfile , 'wb') as png_file:
w = png.Writer(shape[1], shape[0], greyscale=True,bitdepth=16)
w.write(png_file, image_2d_scaled)
else:
shape = ds.pixel_array.shape
# # Convert to float to avoid overflow or underflow losses.
image_2d = ds.pixel_array.astype(float)
#
# # Rescaling grey scale between 0-255
image_2d_scaled = (np.maximum(image_2d,0) / image_2d.max()) * 255.0
#
# # Convert to uint
image_2d_scaled = np.uint8(image_2d_scaled)
# # Write the PNG file
with open(pngfile , 'wb') as png_file:
w = png.Writer(shape[1], shape[0], greyscale=True)
w.write(png_file, image_2d_scaled)
filemapping = filedata.iloc[i].loc['file'] + ', ' + pngfile + '\n'
except AttributeError as error:
found_err = error
logging.error(found_err)
fail_path = filedata.iloc[i].loc['file'], failed + '1/' + os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'
except ValueError as error:
found_err = error
logging.error(found_err)
fail_path = filedata.iloc[i].loc['file'], failed + '2/' + os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'
except BaseException as error:
found_err = error
logging.error(found_err)
fail_path = filedata.iloc[i].loc['file'], failed + '3/' + os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'
except Exception as error:
found_err = error
logging.error(found_err)
fail_path = filedata.iloc[i].loc['file'], failed + '4/' + os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'
return (filemapping,fail_path,found_err)
#%%Function when pydicom fails to read a value attempt to read as
#other types.
def fix_mismatch_callback(raw_elem, **kwargs):
try:
values.convert_value(raw_elem.VR, raw_elem)
except TypeError:
for vr in kwargs['with_VRs']:
try:
values.convert_value(vr, raw_elem)
except TypeError:
pass
else:
raw_elem = raw_elem._replace(VR=vr)
break
return raw_elem
def get_path(depth):
directory = dicom_home + '/'
i = 0
while i < depth:
directory += "*/"
i += 1
return directory + "*.dcm"
#%%Function used by pydicom.
def fix_mismatch(with_VRs=['PN', 'DS', 'IS']):
"""A callback function to check that RawDataElements are translatable
with their provided VRs. If not, re-attempt translation using
some other translators.
Parameters
----------
with_VRs : list, [['PN', 'DS', 'IS']]
A list of VR strings to attempt if the raw data element value cannot
be translated with the raw data element's VR.
Returns
-------
No return value. The callback function will return either
the original RawDataElement instance, or one with a fixed VR.
"""
dicom.config.data_element_callback = fix_mismatch_callback
config.data_element_callback_kwargs = {
'with_VRs': with_VRs,
}
fix_mismatch()
if processes == 0.5: # use half the cores to avoid high ram usage
core_count = int(os.cpu_count()/2)
elif processes == 0: # use all the cores
core_count = int(os.cpu_count())
elif processes < os.cpu_count(): # use the specified number of cores to avoid high ram usage
core_count = processes
else:
core_count = int(os.cpu_count())
#%% get set up to create dataframe
dirs = os.listdir(dicom_home)
#gets all dicom files. if editing this code, get filelist into the format of a list of strings,
#with each string as the file path to a different dicom file.
file_path = get_path(depth)
if os.path.isfile(pickle_file):
f=open(pickle_file,'rb')
filelist=pickle.load(f)
else:
filelist=glob.glob(file_path, recursive=True) #this searches the folders at the depth we request and finds all dicoms
pickle.dump(filelist,open(pickle_file,'wb'))
file_chunks = np.array_split(filelist,no_splits)
logging.info('Number of dicom files: ' + str(len(filelist)))
try:
ff = filelist[0] #load first file as a template to look at all
except IndexError:
logging.error("There is no file present in the given folder in " + file_path)
sys.exit(1)
plan = dicom.dcmread(ff, force=True)
logging.debug('Loaded the first file successfully')
keys = [(aa) for aa in plan.dir() if (hasattr(plan, aa) and aa!='PixelData')]
#%%checks for images in fields and prints where they are
for field in plan.dir():
if (hasattr(plan, field) and field!='PixelData'):
entry = getattr(plan, field)
if type(entry) is bytes:
logging.debug(field)
logging.debug(str(entry))
for i,chunk in enumerate(file_chunks):
csv_destination = "{}/meta/metadata_{}.csv".format(output_directory,i)
mappings ="{}/maps/mapping_{}.csv".format(output_directory,i)
fm = open(mappings, "w+")
filemapping = 'Original DICOM file location, PNG location \n'
fm.write(filemapping)
# add a check to see if the metadata has already been extracted
#%%step through whole file list, read in file, append fields to future dataframe of all files
headerlist = []
#start up a multi processing pool
#for every item in filelist send data to a subprocess and run extract_headers func
#output is then added to headerlist as they are completed (no ordering is done)
with Pool(core_count) as p:
res= p.imap_unordered(extract_headers,enumerate(chunk))
for i,e in enumerate(res):
headerlist.append(e)
data = | pd.DataFrame(headerlist) | pandas.DataFrame |
# %% Imports
import os
import sys
import pandas as pd
import numpy as np
# %% Setup paths
HomeDIR='Tentin-Quarantino'
wd=os.path.dirname(os.path.realpath(__file__))
DIR=wd[:wd.find(HomeDIR)+len(HomeDIR)]
os.chdir(DIR)
homedir = DIR
datadir = f"{homedir}/data/us/"
sys.path.append(os.getcwd())
# %% load mobility data
mobility_df = pd.read_csv(datadir + 'mobility/DL-us-m50_index.csv')
# %%
#-- Gather necessary counties from mobility data
# cast fips to integers
mobility_df = mobility_df[mobility_df['fips'].notna()] # remove entries without fips (us aggregate)
mobility_df['fips'] = mobility_df['fips'].astype(int) # cast to int
# Deal with New York City
nyc_fips = ['36061', '36005', '36047', '36081', '36085']
# Average mobility data for these counties
nyc_avg = mobility_df.loc[mobility_df.fips.isin(nyc_fips),'2020-03-01':].mean()
# Put in as values for 36061
mobility_df.loc[mobility_df['fips'] == 36061,'2020-03-01':] = nyc_avg.values
# Keep only relavent counties
#mobility_df = mobility_df[mobility_df.fips.isin([36061, 1073, 56035, 6037])]
# Drop fips < 1000 (ie. non-county id's)
mobility_df = mobility_df[mobility_df['fips'] > 1000]
# %% Convert mobility data column headers to date_processed format
global_dayzero = | pd.to_datetime('2020 Jan 21') | pandas.to_datetime |
### This python script is used to perform the keyword search in several steps, allocate the remaining rows to the specified domains & perform a post-processing task based on manually selected similarity scores. ###
import pandas as pd
import os
import progressbar
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
import json
import logging
import threading
import time
from nltk.corpus import stopwords
import string
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from nltk.tokenize import word_tokenize
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(2)
logging.info("Thread %s: finishing", name)
"""
session = None
def set_global_session():
global session
if not session:
session = requests.Session()
def download_site(url):
with session.get(url) as response:
name = multiprocessing.current_process().name
print(f"{name}:Read {len(response.content)} from {url}")
def download_all_sites(sites):
with multiprocessing.Pool(initializer=set_global_session) as pool:
pool.map(download_site, sites)
"""
path_parent = os.path.dirname(os.getcwd())
product_path = os.path.join(path_parent, 'src/data/product')
cleaned_top100_path = os.path.join(product_path, 'product_top100/cleaned')
cleaned_min3_path = os.path.join(product_path, 'product_minimum3/cleaned')
cluster_path = os.path.join(product_path, 'lspc2020_to_tablecorpus/Cleaned')
notebook_path = os.path.join(path_parent, 'notebooks')
def clean_clusters():
"""
iterate through all cluster_files;
clean them by using only valid top100 and min3 files after language detection;
count how much tables include a certain product
:return:
"""
# list all valid files after language detection
data_files = [file for file in os.listdir(cleaned_min3_path) if file.endswith('.json.gz')]
data_files += [file for file in os.listdir(cleaned_top100_path) if file.endswith('.json.gz')]
cluster_files = [file for file in os.listdir(cluster_path) if file.endswith('.json.gz')]
# generate dictionaries with different information to track product allocation
allocation_with_table_ids_total_dict = {}
allocation_with_table_ids_set_dict = {}
allocation_amount_only_total_dict = {}
allocation_amount_only_set_dict = {}
unique_cluster_ids = []
count_files = 0
for cluster_file in cluster_files:
print(cluster_file)
df = pd.read_json(os.path.join(cluster_path, '{}'.format(cluster_file)), compression='gzip', lines=True)
# design new dataframe with valid tables only
df_cleaned = df[df['table_id'].isin(data_files)]
df_cleaned = df_cleaned.reset_index()
df_cleaned = df_cleaned.drop('index', axis=1)
# generate a unique list of cluster IDs
cluster_ids = df_cleaned['cluster_id'].tolist()
if unique_cluster_ids == []:
new_cluster_ids = list(set(cluster_ids))
else:
new_cluster_ids = list(set(cluster_ids) - set(unique_cluster_ids))
unique_cluster_ids += new_cluster_ids
unique_cluster_ids = list(set(unique_cluster_ids))
# add dictionary keys
new_cluster_ids_tables_dict = {key: [] for key in new_cluster_ids}
new_cluster_ids_amount_dict = {key: 0 for key in new_cluster_ids}
allocation_with_table_ids_total_dict.update(new_cluster_ids_tables_dict)
allocation_with_table_ids_set_dict.update(new_cluster_ids_tables_dict)
allocation_amount_only_total_dict.update(new_cluster_ids_amount_dict)
allocation_amount_only_set_dict.update(new_cluster_ids_amount_dict)
count = 0
with progressbar.ProgressBar(max_value=df_cleaned.shape[0]) as bar:
for i in range(df_cleaned.shape[0]): # iterate over rows
cluster_id = df_cleaned['cluster_id'][i]
table_id = df_cleaned['table_id'][i]
allocation_with_table_ids_total_dict[cluster_id].append(table_id) # write every table_id inside
allocation_amount_only_total_dict[cluster_id] += 1 # increment for every table_id
allocation_with_table_ids_set_dict[cluster_id] = list(
set(allocation_with_table_ids_total_dict[cluster_id])) # write only unique table_ids inside
allocation_amount_only_set_dict[cluster_id] = len(
allocation_with_table_ids_set_dict[cluster_id]) # increment only for unique table_ids
count += 1
bar.update(count)
count_files += 1
print('{} out of {} cluster files done'.format(count_files, len(cluster_files)))
# write to gzip compressed json file
df_cleaned.to_json(os.path.join(cluster_path, '{}'.format(cluster_file)), compression='gzip', orient='records',
lines=True)
# save dictionaries with allocation of products
with open(os.path.join(cluster_path, 'allocation_with_table_ids_total_dict.json'), 'w', encoding='utf-8') as f:
json.dump(allocation_with_table_ids_total_dict, f)
with open(os.path.join(cluster_path, 'allocation_with_table_ids_set_dict.json'), 'w', encoding='utf-8') as f:
json.dump(allocation_with_table_ids_set_dict, f)
with open(os.path.join(cluster_path, 'allocation_amount_only_total_dict.json'), 'w', encoding='utf-8') as f:
json.dump(allocation_amount_only_total_dict, f)
with open(os.path.join(cluster_path, 'allocation_amount_only_set_dict.json'), 'w', encoding='utf-8') as f:
json.dump(allocation_amount_only_set_dict, f)
def get_keywords():
"""
finds all important brands for clothes and electronics
:return: dictionary {'clothes' : [clothes_brand1, clothes_brand2, ...],
'electronics' : [electronics_brand1, electronics_brand2, ...]}
"""
print('get keywords')
# search for clothes brands top100
clothes_html = urlopen('https://fashionunited.com/i/most-valuable-fashion-brands/')
clothes_bsObj = BeautifulSoup(clothes_html.read(), 'lxml')
clothes_table = clothes_bsObj.find('table')
clothes_lines = clothes_table.find('tbody').find_all('tr')
clothes_list = []
for clothes_line in clothes_lines:
clothes_brand = clothes_line.get_text().split('\n')[2].lower()
clothes_list.append(clothes_brand)
# search for top electronic brands
req = Request('https://companiesmarketcap.com/electronics/largest-electronic-manufacturing-by-market-cap/',
headers={'User-Agent': 'Mozilla/5.0'})
electronics_html = urlopen(req)
electronics_bsObj = BeautifulSoup(electronics_html.read(), 'lxml')
electronics_lines = electronics_bsObj.find_all('tr')
electronics_list = []
for electronics_line in electronics_lines:
electronics_brand_info = electronics_line.find('a')
if electronics_brand_info != None:
electronics_brand = electronics_brand_info.find('div').get_text().split('\r')[0].lower()
electronics_list.append(electronics_brand)
# second page
electronics_list2 = ['intel', 'taiwan semiconductor manufacturing', 'samsung electronics',
'hon hai precision industry',
'hitachi', 'sony', 'panasonic', 'lg electronics', 'pegatron', 'mitsubishi electric',
'midea group',
'honeywell international', 'apple', 'dell technologies', 'hp', 'lenovo', 'quanta computer',
'canon',
'compal eLectronics', 'hewlett packard enterprise']
# only top 10
clothes_top10 = []
brands_dict = {'clothes': clothes_list, 'electronics1': electronics_list, 'electronics2': electronics_list2,
'electronics_total': list(set(electronics_list + electronics_list2))}
with open(os.path.join(product_path, 'brands_dict.json'), 'w', encoding='utf-8') as f:
json.dump(brands_dict, f)
print('getting keywords done')
return brands_dict
def get_new_keywords():
print('get keywords')
with open(os.path.join(product_path, 'brands_dict.json'), 'r', encoding='utf-8') as f:
brands_dict = json.load(f)
"""
# for bikes
bikes_html = urlopen('https://bikesreviewed.com/brands/')
bikes_bsObj = BeautifulSoup(bikes_html.read(), 'lxml')
bikes_lines = bikes_bsObj.find_all('h3')
bikes_list = []
for bikes_line in bikes_lines:
if len(bikes_line.get_text().split('. ')) > 1:
bikes_brand = bikes_line.get_text().split('. ')[1].lower()
else:
bikes_brand = bikes_line.get_text().lower()
bikes_list.append(bikes_brand)
bikes_list.append('nonch')
bikes2_html = urlopen('https://www.globalbrandsmagazine.com/top-bicycle-brands-in-the-world-2020/')
bikes2_bsObj = BeautifulSoup(bikes2_html.read(), 'lxml')
bikes2_lines = bikes2_bsObj.find_all('h3')
for bikes2_line in bikes2_lines:
bikes2_brand = bikes2_line.find('a').get_text().lower()
bikes_list.append(bikes2_brand)
bikes_list = [element.split('\u00a0')[1] if element.startswith('\u00a0') else element for element in bikes_list]
bikes_list = [element for element in bikes_list if element not in [
' 8 thoughts on “the best bike brands for 2021 – the top 60 road, mountain, hybrid and bmx bike manufacturers ranked”',
'lifestyle', '11. huffy bikes', 'leave a reply cancel reply', 'all-around brands', 'hybrid', 'road ']]
bikes_list.append('huffy bikes')
# removed giant, electric, folding manually
bikes_list = list(set(bikes_list))
brands_dict['bikes'] = bikes_list
# for drugstore
brands_dict['drugstore'] = ['avène', 'dove', 'jergens', 'mele', 'vichy', 'e.l.f.', 'bevel', 'eucerin', 'acnefree',
'maybelline', 'la roche-posay', 'odele', 'neutrogena', 'flamingo', 'inm', 'shea moisture',
'sheamoisture', 'olay', 'cerave', 'nyx', "pond’s", "pond's", 'ponds', 'pacifica',
'aquaphor', 'schick', 'differin', 'garnier', 'l’oréal paris', "l'oréal paris", 'revlon',
'cetaphil','roc', "burt's bees", "burt’s bees", 'sonia kashuk', 'pantene', 'aveeno', 'no7',
'rimell', 'wet n wild']
brands_dict['drugstore'] = list(set(brands_dict['drugstore']))
# for tools
tools_list1 = ['makita', 'bosch', 'dewalt', 'craftsman', 'stanley black & decker', 'ridgid tools', 'ridgid',
'kobalt', 'skil', 'husky tools', 'irwin', 'ryobi', 'milwaukee', 'ames', 'arrow', 'bostitch',
'channellock', 'cmt', 'dremel', 'duo-fast', 'estwing', 'freud', 'grip-rite', 'hilti',
'hitachi', 'irwin tools', 'leatherman', 'little giant ladder', 'marshalltown',
'master magnetics', 'paslode', 'porter-cable', 'red devil', 'rockwell automation', 'stabila',
'stanley', 'stiletto', 'vermont american', 'wener ladder', 'metabo hpt', 'festool', 'mafell',
'knipex', 'wiha', 'ingersoll-rand', 'senco', 'greenlee', 'knaack', 'caterpillar']
tools_list2 = []
# only if we want more here
tools_html = urlopen('https://www.toolup.com/shop-by-brand')
tools_bsObj = BeautifulSoup(tools_html.read(), 'lxml')
tools_lines = tools_bsObj.find_all('div', {'class':'brand-group'})
for tools_line in tools_lines:
tools_brand = tools_line.find_all('li')
for element in tools_brand:
tools_br = element.get_text().lower()
tools_list2.append(tools_br)
brands_dict['tools'] = list(set(tools_list1 + tools_list2))
# for cars
cars_list = []
req = Request('https://www.thetrendspotter.net/popular-car-brands/', headers={'User-Agent': 'Mozilla/5.0'})
cars_html = urlopen(req)
cars_bsObj = BeautifulSoup(cars_html.read(), 'lxml')
cars_lines = cars_bsObj.find_all('h2')
for cars_line in cars_lines:
if len(cars_line.get_text().split('. ')) > 1:
cars_brand = cars_line.get_text().split('. ')[1].lower()
cars_list.append(cars_brand)
cars_list += ['merce<NAME>', 'vw', 'yamaha', 'ferrari', 'bentley', 'ram trucks', 'pontiac', 'oldsmobile', 'maserati',
'<NAME>', 'bugatti', 'fiat', 'saab', 'suzuki', 'renault', 'peugeot', 'daewoo', 'studebaker',
'hudson', 'citroen', 'mg']
brands_dict['cars'] = list(set(cars_list))
# for technology
brands_dict['technology'] = ['samsung', '3m', 'abb', 'philips', 'schneider electric', 'sennheiser', 'siemens']
# modify in general manually
brands_dict['clothes'] += ['billabong', 'breitling', 'fila', 'hilfiger', 'pandora', 'ray-ban', 'rayban',
'timberland', 'new era', 'bosch']
brands_dict['clothes'] = list(set(brands_dict['clothes']))
brands_dict['electronics_total'] += ['huawei', 'logitech']
#brands_dict['electronics_total'].remove('samsung')
brands_dict['electronics_total'] = list(set(brands_dict['electronics_total']))
"""
random_brands = ['2-POWER', '2-Power', 'A&I Parts', 'ANGELIC DIAMONDS', 'Allison Kaufman',
'American Olean', 'Anuradha Art Jewellery', 'Ariat', 'Bijou Brigitte',
'Birkenstock', 'Black Diamond', 'Brilliant Earth', 'Caratlane', 'Carhartt', 'Casio',
'Chekich', 'DWS Jewellery', 'Dakine', 'Eastpak', 'Emporio Armani', 'Epson',
'Garmin', 'Garrett', 'Hamilton', 'Hopscotch', 'JBL', 'Jordan', 'Kawasaki',
'Kingston', 'LEGO', 'MSI', 'Medline', 'Peacocks', 'Pink Boutique',
'Reebok', '<NAME>', 'SanDisk', 'SareesBazaar',
'Select Fashion', 'Toshiba', 'Tumi', 'Unionwear', 'United Colors of Benetton',
'VOYLLA', '<NAME>', 'Wilson', 'Xerox', 'baginning', 'dorothyperkins', 'evans',
'nihaojewelry.com', 'topman']
random_brands = list(set(brand.lower() for brand in random_brands))
brands_dict['random'] = random_brands
with open(os.path.join(product_path, 'brands_dict.json'), 'w', encoding='utf-8') as f:
json.dump(brands_dict, f)
def clean_keywords():
print('clean keywords')
with open(os.path.join(product_path, 'brands_dict.json'), 'r', encoding='utf-8') as f:
brands_dict = json.load(f)
brands_dict['clothes_cleaned'] = ['prada', '<NAME>', '<NAME>', 'under armour', 'the north face',
'<NAME>', 'dolce & gabbana', 'adidas', 'puma', 'oakley', 'dior', 'chanel',
'gap',
'gucci', '<NAME>', 'patagonia', 'moncler', 'armani', 'burberry', 'nike']
brands_dict['electronics_cleaned'] = ['lenovo', 'canon', 'hitachi', 'resonant', 'sony', 'nvidia', 'nintendo',
'apple',
'samsung', 'yaskawa', 'asus', 'dell', 'hp', 'amd', 'nikon', 'xiaomi', 'cisco',
'panasonic', 'intel', 'flex']
with open(os.path.join(product_path, 'brands_dict.json'), 'w', encoding='utf-8') as f:
json.dump(brands_dict, f)
def keyword_search(data_path):
"""
product selection for phase 1b;
selects only "electronic products" for structured data and "clothes" for unstructured data
:return: two dictionaries for electronics, clothes each containing table and row ids
"""
print('run keyword search')
with open(os.path.join(product_path, 'brands_dict.json'), 'r', encoding='utf-8') as f:
brands_dict = json.load(f)
data_files = [file for file in os.listdir(data_path) if file.endswith('.json.gz')]
# for testing
# brands_dict['clothes_cleaned'].append('nejron') ##
# brands_dict['electronics_cleaned'].append('arip santoso') ##
entity = data_path.split('product_')[1]
print(entity)
# check whether dictionaries already exist
if os.path.isfile(os.path.join(product_path, 'product_clothes_v3', 'clothes_dict.json')):
with open(os.path.join(product_path, 'product_clothes_v3', 'clothes_dict.json'), 'r', encoding='utf-8') as f:
clothes_dict = json.load(f)
else:
clothes_dict = {'top100/cleaned': {key: [] for key in brands_dict['clothes']},
'minimum3/cleaned': {key: [] for key in brands_dict['clothes']}}
if os.path.isfile(os.path.join(product_path, 'product_electronics_v3', 'electronics_dict.json')):
with open(os.path.join(product_path, 'product_electronics_v3', 'electronics_dict.json'), 'r',
encoding='utf-8') as f:
electronics_dict = json.load(f)
else:
electronics_dict = {'top100/cleaned': {key: [] for key in brands_dict['electronics_total']},
'minimum3/cleaned': {key: [] for key in brands_dict['electronics_total']}}
if os.path.isfile(os.path.join(product_path, 'product_bikes', 'bikes_dict.json')):
with open(os.path.join(product_path, 'product_bikes', 'bikes_dict.json'), 'r', encoding='utf-8') as f:
bikes_dict = json.load(f)
else:
bikes_dict = {'top100/cleaned': {key: [] for key in brands_dict['bikes']},
'minimum3/cleaned': {key: [] for key in brands_dict['bikes']}}
if os.path.isfile(os.path.join(product_path, 'product_drugstore', 'drugstore_dict.json')):
with open(os.path.join(product_path, 'product_drugstore', 'drugstore_dict.json'), 'r', encoding='utf-8') as f:
drugstore_dict = json.load(f)
else:
drugstore_dict = {'top100/cleaned': {key: [] for key in brands_dict['drugstore']},
'minimum3/cleaned': {key: [] for key in brands_dict['drugstore']}}
if os.path.isfile(os.path.join(product_path, 'product_tools', 'tools_dict.json')):
with open(os.path.join(product_path, 'product_tools', 'tools_dict.json'), 'r', encoding='utf-8') as f:
tools_dict = json.load(f)
else:
tools_dict = {'top100/cleaned': {key: [] for key in brands_dict['tools']},
'minimum3/cleaned': {key: [] for key in brands_dict['tools']}}
if os.path.isfile(os.path.join(product_path, 'product_technology', 'technology_dict.json')):
with open(os.path.join(product_path, 'product_technology', 'technology_dict.json'), 'r', encoding='utf-8') as f:
technology_dict = json.load(f)
else:
technology_dict = {'top100/cleaned': {key: [] for key in brands_dict['technology']},
'minimum3/cleaned': {key: [] for key in brands_dict['technology']}}
if os.path.isfile(os.path.join(product_path, 'product_cars', 'cars_dict.json')):
with open(os.path.join(product_path, 'product_cars', 'cars_dict.json'), 'r', encoding='utf-8') as f:
cars_dict = json.load(f)
else:
cars_dict = {'top100/cleaned': {key: [] for key in brands_dict['cars']},
'minimum3/cleaned': {key: [] for key in brands_dict['cars']}}
if os.path.isfile(os.path.join(product_path, 'product_random', 'random_dict.json')):
with open(os.path.join(product_path, 'product_random', 'random_dict.json'), 'r', encoding='utf-8') as f:
random_dict = json.load(f)
else:
random_dict = {'top100/cleaned': {key: [] for key in brands_dict['random']},
'minimum3/cleaned': {key: [] for key in brands_dict['random']}}
count = 0
with progressbar.ProgressBar(max_value=len(data_files)) as bar:
for data_file in data_files:
# if data_file == 'Product_3dcartstores.com_September2020.json.gz': ## for testing
df = pd.read_json(os.path.join(data_path, '{}'.format(data_file)), compression='gzip', lines=True)
clothes_row_ids = []
electronics_row_ids = []
bikes_row_ids = []
drugstore_row_ids = []
tools_row_ids = []
technology_row_ids = []
cars_row_ids = []
random_row_ids = []
# iterrate over rows and look for keywords
if 'brand' in df.columns: # check whether column 'brand' exists
for i in range(df.shape[0]): # iterate over rows
# if i < 1000: # only for testing
row_id = int(df['row_id'][i])
cell = df['brand'][i]
if cell != None:
cell = str(cell).lower()
if cell in brands_dict['clothes']:
clothes_dict[entity][cell].append((data_file, row_id))
clothes_row_ids.append(row_id)
elif cell in brands_dict['electronics_total']:
electronics_dict[entity][cell].append((data_file, row_id))
electronics_row_ids.append(row_id)
elif cell in brands_dict['bikes']:
bikes_dict[entity][cell].append((data_file, row_id))
bikes_row_ids.append(row_id)
elif cell in brands_dict['cars']:
cars_dict[entity][cell].append((data_file, row_id))
cars_row_ids.append(row_id)
elif cell in brands_dict['technology']:
technology_dict[entity][cell].append((data_file, row_id))
technology_row_ids.append(row_id)
elif cell in brands_dict['tools']:
tools_dict[entity][cell].append((data_file, row_id))
tools_row_ids.append(row_id)
elif cell in brands_dict['drugstore']:
drugstore_dict[entity][cell].append((data_file, row_id))
drugstore_row_ids.append(row_id)
elif cell in brands_dict['random']:
random_dict[entity][cell].append((data_file, row_id))
random_row_ids.append(row_id)
elif 'name' in df.columns: # if column 'brand' does not exist check for first word in name column
df['brand'] = ''
# iterrate over rows
for i in range(df.shape[0]):
row_id = int(df['row_id'][i])
if df['name'][i] != None:
name_split_list = str(df['name'][i]).split(' ')
# check for first word in name column
cell = str(name_split_list[0]).lower()
if cell in brands_dict['electronics_total']:
electronics_dict[entity][cell].append((data_file, row_id))
electronics_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['clothes']:
clothes_dict[entity][cell].append((data_file, row_id))
clothes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['bikes']:
bikes_dict[entity][cell].append((data_file, row_id))
bikes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['cars']:
cars_dict[entity][cell].append((data_file, row_id))
cars_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['technology']:
technology_dict[entity][cell].append((data_file, row_id))
technology_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['tools']:
tools_dict[entity][cell].append((data_file, row_id))
tools_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['drugstore']:
drugstore_dict[entity][cell].append((data_file, row_id))
drugstore_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['random']:
random_dict[entity][cell].append((data_file, row_id))
random_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif len(name_split_list) > 1:
# check for two words (since ngrams brands)
cell = cell + ' ' + str(name_split_list[1]).lower()
if cell in brands_dict['electronics_total']:
electronics_dict[entity][cell].append((data_file, row_id))
electronics_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['clothes']:
clothes_dict[entity][cell].append((data_file, row_id))
clothes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['bikes']:
bikes_dict[entity][cell].append((data_file, row_id))
bikes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['cars']:
cars_dict[entity][cell].append((data_file, row_id))
cars_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['technology']:
technology_dict[entity][cell].append((data_file, row_id))
technology_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['tools']:
tools_dict[entity][cell].append((data_file, row_id))
tools_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['drugstore']:
drugstore_dict[entity][cell].append((data_file, row_id))
drugstore_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['random']:
random_dict[entity][cell].append((data_file, row_id))
random_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif len(name_split_list) > 2:
# check for three words (since ngrams brands)
cell = cell + ' ' + str(name_split_list[2]).lower()
if cell in brands_dict['electronics_total']:
electronics_dict[entity][cell].append((data_file, row_id))
electronics_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['clothes']:
clothes_dict[entity][cell].append((data_file, row_id))
clothes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['bikes']:
bikes_dict[entity][cell].append((data_file, row_id))
bikes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['cars']:
cars_dict[entity][cell].append((data_file, row_id))
cars_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['technology']:
technology_dict[entity][cell].append((data_file, row_id))
technology_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['tools']:
tools_dict[entity][cell].append((data_file, row_id))
tools_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['drugstore']:
drugstore_dict[entity][cell].append((data_file, row_id))
drugstore_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['random']:
random_dict[entity][cell].append((data_file, row_id))
random_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif len(name_split_list) > 3:
# check for four words (since ngrams brands)
cell = cell + ' ' + str(name_split_list[2]).lower()
if cell in brands_dict['electronics_total']:
electronics_dict[entity][cell].append((data_file, row_id))
electronics_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['clothes']:
clothes_dict[entity][cell].append((data_file, row_id))
clothes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['bikes']:
bikes_dict[entity][cell].append((data_file, row_id))
bikes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['cars']:
cars_dict[entity][cell].append((data_file, row_id))
cars_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['technology']:
technology_dict[entity][cell].append((data_file, row_id))
technology_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['tools']:
tools_dict[entity][cell].append((data_file, row_id))
tools_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['drugstore']:
drugstore_dict[entity][cell].append((data_file, row_id))
drugstore_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['random']:
random_dict[entity][cell].append((data_file, row_id))
random_row_ids.append(row_id)
df.at[i, 'brand'] = cell
count += 1
bar.update(count)
# write selected data into seperate folders
clothes_df = df[df['row_id'].isin(clothes_row_ids)]
electronics_df = df[df['row_id'].isin(electronics_row_ids)]
bikes_df = df[df['row_id'].isin(bikes_row_ids)]
cars_df = df[df['row_id'].isin(cars_row_ids)]
technology_df = df[df['row_id'].isin(technology_row_ids)]
tools_df = df[df['row_id'].isin(tools_row_ids)]
drugstore_df = df[df['row_id'].isin(drugstore_row_ids)]
random_df = df[df['row_id'].isin(random_row_ids)]
if clothes_df.shape[0] > 0:
clothes_df.to_json(os.path.join(product_path, 'product_clothes_v3', data_file), compression='gzip',
orient='records',
lines=True)
if electronics_df.shape[0] > 0:
electronics_df.to_json(os.path.join(product_path, 'product_electronics_v3', data_file),
compression='gzip', orient='records',
lines=True)
if bikes_df.shape[0] > 0:
bikes_df.to_json(os.path.join(product_path, 'product_bikes', data_file),
compression='gzip', orient='records',
lines=True)
if cars_df.shape[0] > 0:
cars_df.to_json(os.path.join(product_path, 'product_cars', data_file),
compression='gzip', orient='records',
lines=True)
if technology_df.shape[0] > 0:
technology_df.to_json(os.path.join(product_path, 'product_technology', data_file),
compression='gzip', orient='records',
lines=True)
if tools_df.shape[0] > 0:
tools_df.to_json(os.path.join(product_path, 'product_tools', data_file),
compression='gzip', orient='records',
lines=True)
if drugstore_df.shape[0] > 0:
drugstore_df.to_json(os.path.join(product_path, 'product_drugstore', data_file),
compression='gzip', orient='records',
lines=True)
if random_df.shape[0] > 0:
random_df.to_json(os.path.join(product_path, 'product_random', data_file),
compression='gzip', orient='records',
lines=True)
## nur alle paar tausend saven
# save dictionaries with selected data
if count % 1000 == 0:
with open(os.path.join(product_path, 'product_clothes', 'clothes_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(clothes_dict, f)
with open(os.path.join(product_path, 'product_electronics', 'electronics_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(electronics_dict, f)
with open(os.path.join(product_path, 'product_bikes', 'bikes_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(bikes_dict, f)
with open(os.path.join(product_path, 'product_cars', 'cars_dict.json'), 'w', encoding='utf-8') as f:
json.dump(cars_dict, f)
with open(os.path.join(product_path, 'product_technology', 'technology_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(technology_dict, f)
with open(os.path.join(product_path, 'product_tools', 'tools_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(tools_dict, f)
with open(os.path.join(product_path, 'product_drugstore', 'drugstore_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(drugstore_dict, f)
with open(os.path.join(product_path, 'product_random', 'random_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(random_dict, f)
# save at the end of running
with open(os.path.join(product_path, 'product_clothes_v3', 'clothes_dict.json'), 'w', encoding='utf-8') as f:
json.dump(clothes_dict, f)
with open(os.path.join(product_path, 'product_electronics_v3', 'electronics_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(electronics_dict, f)
with open(os.path.join(product_path, 'product_bikes', 'bikes_dict.json'), 'w', encoding='utf-8') as f:
json.dump(bikes_dict, f)
with open(os.path.join(product_path, 'product_cars', 'cars_dict.json'), 'w', encoding='utf-8') as f:
json.dump(cars_dict, f)
with open(os.path.join(product_path, 'product_technology', 'technology_dict.json'), 'w', encoding='utf-8') as f:
json.dump(technology_dict, f)
with open(os.path.join(product_path, 'product_tools', 'tools_dict.json'), 'w', encoding='utf-8') as f:
json.dump(tools_dict, f)
with open(os.path.join(product_path, 'product_drugstore', 'drugstore_dict.json'), 'w', encoding='utf-8') as f:
json.dump(drugstore_dict, f)
with open(os.path.join(product_path, 'product_random', 'random_dict.json'), 'w', encoding='utf-8') as f:
json.dump(random_dict, f)
def remove_stopwords(token_vector, stopwords_list):
return token_vector.apply(lambda token_list: [word for word in token_list if word not in stopwords_list])
def remove_punctuation(token_vector):
return token_vector.apply(lambda token_list: [word for word in token_list if word not in string.punctuation])
def jaccard_similarity_score(original, translation):
intersect = set(original).intersection(set(translation))
union = set(original).union(set(translation))
try:
return len(intersect) / len(union)
except ZeroDivisionError:
return 0
def post_cleaning():
"""
Measures the similarity within a cluster_id of our final electronics and clothes entities and removes the ones which do not fit the scores
Post-processing.
:return:
"""
entities = ['Bikes', 'Cars', 'Clothes', 'Drugstore', 'Electronics', 'Technology', 'Tools', 'Random']
# entities = ['Tools']
# generate lists for valid electronics and clothes brands
with open(os.path.join(product_path, 'brands_dict.json'), 'r', encoding='utf-8') as f:
brands_dict = json.load(f)
# read final dataframes with all cluster_ids left
for entity in entities:
print('Running post-processing for {}'.format(entity))
clusters_all_df = pd.read_csv(
os.path.join(cluster_path, '{}_clusters_all_8_tables.csv'.format(entity)),
index_col=None)
final_entities_list = list(set(clusters_all_df['cluster_id']))
# lowercase name column for similarity measure
clusters_all_df['name'] = clusters_all_df['name'].apply(lambda row: str(row).lower())
# use tokenizer for name column to get tokens for training the model, remove stopwords and punctuation
clusters_all_df['tokens'] = clusters_all_df['name'].apply(lambda row: word_tokenize(row))
clusters_all_df['tokens'] = remove_stopwords(clusters_all_df['tokens'], stopwords.words())
clusters_all_df['tokens'] = remove_punctuation(clusters_all_df['tokens'])
# get tagged words
tagged_data = [TaggedDocument(words=_d, tags=[str(i)]) for i, _d in
enumerate(clusters_all_df['tokens'])]
# build model and vocabulary
model = Doc2Vec(vector_size=50, min_count=5, epochs=25, dm=0)
model.build_vocab(tagged_data)
# Train model
model.train(tagged_data, total_examples=model.corpus_count, epochs=25)
# compare for all cluster_ids the similarity between the entries within a cluster_id
if entity == 'Electronics':
all_valid_brands = brands_dict['electronics_total']
else:
all_valid_brands = brands_dict[entity.lower()]
valid_indices_all = []
print('measure similarity')
count = 0
with progressbar.ProgressBar(max_value=len(final_entities_list)) as bar:
for cluster_id in final_entities_list:
single_cluster_id_df = clusters_all_df[clusters_all_df['cluster_id'] == cluster_id]
# measure similarity with Doc2Vec
valid_brands = list(filter(lambda brand: brand in all_valid_brands,
single_cluster_id_df['brand_y'].apply(lambda element: str(element).lower())))
if len(valid_brands) > 0:
most_common_brand = max(valid_brands, key=valid_brands.count)
index_most_common = single_cluster_id_df[single_cluster_id_df['brand_y'].apply(
lambda element: str(element).lower()) == most_common_brand].index[
0] # use this as baseline for similarity comparisons within a certain cluster
# calculate similarity and filter for the ones which are in the current cluster
similar_doc = model.docvecs.most_similar(f'{index_most_common}', topn=clusters_all_df.shape[0])
similar_doc_cluster = [tup for tup in similar_doc if int(tup[0]) in list(
single_cluster_id_df.index)] # similarities as tuples with index and similarity measure compared to baseline product
similar_doc_cluster_df = pd.DataFrame(list(similar_doc_cluster), columns=['index', 'doc2vec'])
similar_doc_cluster_df['index'] = [int(i) for i in
similar_doc_cluster_df['index']] # change indices to numbers
# measure similarity with Jaccard
jaccard_score = single_cluster_id_df['name'].apply(lambda row: jaccard_similarity_score(
row, single_cluster_id_df['name'].loc[int(index_most_common)]))
jaccard_score = jaccard_score.drop(int(index_most_common)).sort_values(ascending=False)
jaccard_score_df = | pd.DataFrame({'index': jaccard_score.index, 'jaccard': jaccard_score.values}) | pandas.DataFrame |
import pandas as pd
from Bio import SeqIO
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.cluster import MeanShift
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def parseFasta(data):
d = {fasta.id : str(fasta.seq) for fasta in SeqIO.parse(data, "fasta")}
| pd.DataFrame([d]) | pandas.DataFrame |
##########################################################################
#
# Functions for calculating signals from share-prices and financial data.
#
##########################################################################
# SimFin - Simple financial data for Python.
# www.simfin.com - www.github.com/simfin/simfin
# See README.md for instructions and LICENSE.txt for license details.
##########################################################################
import pandas as pd
import numpy as np
from simfin.cache import cache
from simfin.derived import free_cash_flow, ncav, netnet, shares
from simfin.rel_change import rel_change
from simfin.resample import reindex
from simfin.utils import apply, add_date_offset
from simfin.names import *
##########################################################################
@cache
def price_signals(df_prices, group_index=TICKER):
"""
Calculate price-signals such as Moving Average and MACD for all stocks
in the given DataFrame.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Pandas DataFrame with share-prices for multiple stocks.
:param group_index:
If the DataFrame has a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:return:
Pandas DataFrame with price-signals.
"""
# Helper-function for calculating signals for a single stock.
def _signals(df_prices):
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df_prices.index)
# Use the closing share-price for all the signals.
df_price = df_prices[CLOSE]
# Moving Average for past 20 days.
df_signals[MAVG_20] = df_price.rolling(window=20).mean()
# Moving Average for past 200 days.
df_signals[MAVG_200] = df_price.rolling(window=200).mean()
# Exponential Moving Average for past 20 days.
df_signals[EMA] = df_price.ewm(span=20).mean()
# Moving Average Convergence Divergence for 12 and 26 days.
# https://en.wikipedia.org/wiki/MACD
df_signals[MACD] = df_price.ewm(span=12).mean() \
- df_price.ewm(span=26).mean()
# MACD with extra smoothing by Exp. Moving Average for 9 days.
df_signals[MACD_EMA] = df_signals[MACD].ewm(span=9).mean()
return df_signals
# Calculate signals and use Pandas groupby if `df` has multiple stocks.
df_signals = apply(df=df_prices, func=_signals, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def trade_signals(df, signal1, signal2, group_index=TICKER):
"""
Create Buy / Sell / Hold signals from two signals in the given DataFrame.
- If `df[signal1] >= df[signal2]` create a Hold signal.
- If `df[signal1]` crosses above `df[signal2]` create a Buy signal.
- if `df[signal1]` crosses below `df[signal2]` create a Sell signal.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df:
Pandas DataFrame with columns `signal1` and `signal2`.
May contain data for one or more stocks.
:param signal1:
String with the name of a column in `df`.
:param signal2:
String with the name of a column in `df`.
:param group_index:
If the DataFrame has a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:return:
Pandas Dataframe with BUY, SELL, HOLD signals.
"""
# Helper-function for calculating signals for a single stock.
def _signals(df):
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df.index)
# Boolean whether signal1 >= signal2.
df_above = (df[signal1] >= df[signal2])
# Boolean whether to buy the stock.
df_signals[BUY] = df_above & ~df_above.shift(1, fill_value=True)
# Boolean whether to sell the stock.
df_signals[SELL] = ~df_above & df_above.shift(1, fill_value=False)
# Boolean whether to keep holding the stock.
df_signals[HOLD] = df_above
return df_signals
# Calculate signals and use Pandas groupby if `df` has multiple stocks.
df_signals = apply(df=df, func=_signals, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def volume_signals(df_prices, df_shares, window=20, fill_method='ffill',
offset=None, date_index=REPORT_DATE,
shares_index=SHARES_BASIC, group_index=TICKER):
"""
Calculate signals for the daily trading-volume of stocks, such as:
- REL_VOL: The daily trading-volume relative to its moving average.
- VOLUME_MCAP: The Market-Capitalization of the daily trading volume.
- VOLUME_TURNOVER: Trading-volume relative to the shares outstanding.
The moving-average is calculated in different ways for the signals.
For REL_VOL it is a part of the formula definition. For VOLUME_MCAP
and VOLUME_TURNOVER the moving-average is calculated afterwards.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Pandas DataFrame with share-prices for multiple stocks.
:param df_shares:
Pandas DataFrame with both columns SHARES_BASIC and SHARES_DILUTED
e.g. `df_shares=df_income_ttm`
:param window:
Integer for the number of days to use in moving-average calculations.
:param fill_method:
String or callable for the method of filling in empty values when
reindexing financial data to daily data-points.
See :obj:`~simfin.resample.reindex` for valid options.
:param offset:
Pandas DateOffset added to the date-index of `df_shares`. Example:
`pd.DateOffset(days=60)`
See :obj:`~simfin.utils.add_date_offset` for more details.
:param date_index:
Name of the date-column for `df_shares` e.g. REPORT_DATE.
:param shares_index:
Name of the column for share-counts in `df_shares`. SHARES_DILUTED
takes the potential diluting impact of stock-options into account,
while SHARES_BASIC does not take potential dilution into account.
:param group_index:
If the DataFrame has a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:return:
Pandas DataFrame with volume-signals.
"""
# Copy the given share-counts (e.g. SHARES_BASIC) and fill in missing
# values with the other share-counts (e.g. SHARES_DILUTED).
df_shares = shares(df=df_shares, index=shares_index)
# Helper-function for calculating signals for a single stock.
def _signals(df):
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df.index)
# Get the relevant data.
df_price = df[CLOSE]
df_volume = df[VOLUME]
# Share-counts from financial reports, reindexed to daily data-points.
df_shares_daily = df[shares_index]
# Moving average for the daily trading volume.
df_volume_mavg = df_volume.rolling(window=window).mean()
# Last trading volume relative to its moving average.
df_rel_vol = df_volume / df_volume_mavg
df_signals[REL_VOL] = np.log(df_rel_vol)
# Calculate Market-Capitalization of the daily trading-volume.
df_vol_mcap = df_volume * df_price
df_signals[VOLUME_MCAP] = df_vol_mcap.rolling(window=window).mean()
# Calculate Volume Turnover as the daily trading-volume
# divided by the total number of shares outstanding.
df_vol_turn = df_volume / df_shares_daily
df_signals[VOLUME_TURNOVER] = df_vol_turn.rolling(window=window).mean()
return df_signals
# Add offset / lag to the dates of the share-counts.
if offset is not None:
df_shares = add_date_offset(df=df_shares, offset=offset,
date_index=date_index)
# Reindex the share-counts to daily data-points.
df_shares_daily = reindex(df_src=df_shares, df_target=df_prices,
method=fill_method, group_index=group_index)
# Combine the relevant data into a single DataFrame.
dfs = [df_prices[[CLOSE, VOLUME]], df_shares_daily]
df = pd.concat(dfs, axis=1)
# Calculate signals and use Pandas groupby if `df` has multiple stocks.
df_signals = apply(df=df, func=_signals, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def fin_signals(df_income_ttm, df_balance_ttm, df_cashflow_ttm, df_prices=None,
offset=None, func=None, fill_method='ffill',
date_index=REPORT_DATE, group_index=TICKER, banks=False, insurance=False):
"""
Calculate financial signals such as Net Profit Margin, Debt Ratio, ROA,
etc. for all stocks in the given DataFrames.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Optional Pandas DataFrame with share-prices for one or more stocks.
If not `None`, then the signals will be reindexed to the same daily
data-points as `df_prices`, otherwise the signals will be quarterly.
:param df_income_ttm:
Pandas DataFrame with Income Statement TTM data for one or more stocks.
:param df_balance_ttm:
Pandas DataFrame with Balance Sheet TTM data for one or more stocks.
:param df_cashflow_ttm:
Pandas DataFrame with Cash-Flow Statement TTM data for one or more stocks.
:param func:
Function to apply on a per-stock basis after the signals have been
calculated, but before they have been reindexed to daily data-points.
This is useful e.g. to calculate multi-year averages.
For example, to calculate the 2-year averages of TTM data:
`func = lambda df: 0.5 * (df + df.shift(4))`
:param fill_method:
String or callable for the method of filling in empty values when
reindexing financial data to daily data-points.
See :obj:`~simfin.resample.reindex` for valid options.
:param offset:
Pandas DateOffset added to the date-index of the Pandas DataFrames with
the financial data. Example: `pd.DateOffset(days=60)` This is useful if
you want to add a lag of e.g. 60 days to the dates of financial reports
with Income Statements, Balance Sheets, and Cash-Flow Statements, because
the REPORT_DATE is not when it was actually made available to the public,
which can be 1, 2 or even 3 months after the REPORT_DATE.
See :obj:`~simfin.utils.add_date_offset` for more details.
:param date_index:
Name of the date-column for the financial data e.g. REPORT_DATE.
:param group_index:
If the DataFrames have a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:param banks:
Boolean whether to use the special datasets for banks.
:param insurance:
Boolean whether to use the special datasets for insurance
companies.
:return:
Pandas DataFrame with financial signals.
"""
# Helper-function for calculating signals for a single stock.
def _signals(df):
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df.index)
# Net Profit Margin.
df_signals[NET_PROFIT_MARGIN] = df[NET_INCOME] / df[REVENUE]
# Gross Profit Margin.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[GROSS_PROFIT_MARGIN] = df[GROSS_PROFIT] / df[REVENUE]
# R&D / Revenue.
# Note: RESEARCH_DEV must be negated.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[RD_REVENUE] = -df[RESEARCH_DEV] / df[REVENUE]
# R&D / Gross Profit.
# Note: RESEARCH_DEV must be negated.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[RD_GROSS_PROFIT] = -df[RESEARCH_DEV] / df[GROSS_PROFIT]
# Return on Research Capital (RORC).
# Note: RESEARCH_DEV must be negated.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[RORC] = df[GROSS_PROFIT] / -df[RESEARCH_DEV]
# Interest Coverage.
# Note: INTEREST_EXP_NET must be negated.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[INTEREST_COV] = df[OPERATING_INCOME] / -df[INTEREST_EXP_NET]
# Current Ratio = Current Assets / Current Liabilities.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[CURRENT_RATIO] = df[TOTAL_CUR_ASSETS] / df[TOTAL_CUR_LIAB]
#: Quick Ratio = (Cash + Equiv. + ST Inv. + Recv.) / Current Liab.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[QUICK_RATIO] = \
(df[CASH_EQUIV_ST_INVEST] + df[ACC_NOTES_RECV].fillna(0.0)) \
/ df[TOTAL_CUR_LIAB]
# Debt Ratio = (Short-term Debt + Long-term Debt) / Total Assets.
df_signals[DEBT_RATIO] = (df[ST_DEBT] + df[LT_DEBT]) / df[TOTAL_ASSETS]
# NOTE: There are different ways of calculating ROA, ROE,
# ASSET_TURNOVER, etc. See Tutorial 04. For example, we could use the
# Assets or Equity from last year instead of from the current year,
# but the resulting ROA, ROE, etc. are usually very similar, and using
# last year's Assets or Equity would cause us to loose one year of
# data-points for the signals we are calculating here.
# Return on Assets = Net Income / Total Assets. See note above.
df_signals[ROA] = df[NET_INCOME] / df[TOTAL_ASSETS]
# Return on Equity = Net Income / Total Equity. See note above.
df_signals[ROE] = df[NET_INCOME] / df[TOTAL_EQUITY]
# Asset Turnover = Revenue / Total Assets. See note above.
df_signals[ASSET_TURNOVER] = df[REVENUE] / df[TOTAL_ASSETS]
# Inventory Turnover = Revenue / Inventory. See note above.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[INVENTORY_TURNOVER] = df[REVENUE] / df[INVENTORIES]
# Payout Ratio = Dividends / Free Cash Flow
# Note the negation because DIVIDENDS_PAID is negative.
df_signals[PAYOUT_RATIO] = -df[DIVIDENDS_PAID].fillna(0) / df[FCF]
# Buyback Ratio = Share Buyback / Free Cash Flow
# Note the negation because CASH_REPURCHASE_EQUITY is negative.
df_signals[BUYBACK_RATIO] = \
-df[CASH_REPURCHASE_EQUITY].fillna(0) / df[FCF]
# Payout + Buyback Ratio = (Dividends + Share Buyback) / Free Cash Flow
# Note the negation because DIVIDENDS_PAID and CASH_REP.. are negative.
df_signals[PAYOUT_BUYBACK_RATIO] = \
-(df[DIVIDENDS_PAID].fillna(0) +
df[CASH_REPURCHASE_EQUITY].fillna(0)) / df[FCF]
# Net Acquisitions & Divestitures / Total Assets.
# Note the negation because NET_CASH_ACQ_DIVEST is usually negative.
# Note: Not available for insurances.
if not insurance:
df_signals[ACQ_ASSETS_RATIO] = \
-df[NET_CASH_ACQ_DIVEST] / df[TOTAL_ASSETS]
# Capital Expenditures / (Depreciation + Amortization).
# Note the negation because CAPEX is negative.
df_signals[CAPEX_DEPR_RATIO] = -df[CAPEX] / df[DEPR_AMOR]
# Log10(Revenue).
df_signals[LOG_REVENUE] = np.log10(df[REVENUE])
return df_signals
# Get relevant data from Income Statements.
if banks or insurance:
columns = [REVENUE, OPERATING_INCOME,
NET_INCOME]
else:
columns = [REVENUE, GROSS_PROFIT, OPERATING_INCOME, INTEREST_EXP_NET,
NET_INCOME, RESEARCH_DEV]
df1 = df_income_ttm[columns]
# Get relevant data from Balance Sheets.
if banks or insurance:
columns = [TOTAL_ASSETS, TOTAL_EQUITY,
ST_DEBT, LT_DEBT]
else:
columns = [TOTAL_ASSETS, TOTAL_CUR_ASSETS, TOTAL_CUR_LIAB, TOTAL_EQUITY,
ST_DEBT, LT_DEBT, INVENTORIES, CASH_EQUIV_ST_INVEST,
ACC_NOTES_RECV]
df2 = df_balance_ttm[columns]
# Get relevant data from Cash-Flow Statements.
if banks:
columns = [DIVIDENDS_PAID, CASH_REPURCHASE_EQUITY, NET_CASH_ACQ_DIVEST,
CAPEX, DEPR_AMOR]
elif insurance:
columns = [DIVIDENDS_PAID, CASH_REPURCHASE_EQUITY,
CAPEX, DEPR_AMOR]
else:
columns = [DIVIDENDS_PAID, CASH_REPURCHASE_EQUITY, NET_CASH_ACQ_DIVEST,
CAPEX, DEPR_AMOR]
df3 = df_cashflow_ttm[columns]
# Calculate Free Cash Flow.
df_fcf = free_cash_flow(df_cashflow=df_cashflow_ttm)
# Combine the data into a single DataFrame.
df = pd.concat([df1, df2, df3, df_fcf], axis=1)
# Add offset / lag to the index-dates of the financial data.
if offset is not None:
df = add_date_offset(df=df, offset=offset, date_index=date_index)
# Calculate signals and use Pandas groupby if `df` has multiple stocks.
df_signals = apply(df=df, func=_signals, group_index=group_index)
# Process the signals using the supplied function e.g. to calculate averages.
if func is not None:
df_signals = apply(df=df_signals, func=func, group_index=group_index)
# Reindex to the same daily data-points as the share-prices.
if df_prices is not None:
df_signals = reindex(df_src=df_signals, df_target=df_prices,
method=fill_method, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def growth_signals(df_income_ttm, df_income_qrt,
df_balance_ttm, df_balance_qrt,
df_cashflow_ttm, df_cashflow_qrt,
df_prices=None, fill_method='ffill',
offset=None, func=None,
date_index=REPORT_DATE, group_index=TICKER):
"""
Calculate growth-signals such as Sales Growth, Earnings Growth, etc.
for all stocks in the given DataFrames.
Three growth-signals are given for each type of financial data, e.g.:
- SALES_GROWTH is calculated from the TTM Revenue divided by the
TTM Revenue from one year ago.
- SALES_GROWTH_YOY is calculated from the Quarterly Revenue divided by
the Quarterly Revenue from one year ago.
- SALES_GROWTH_QOQ is calculated from the Quarterly Revenue divided by
the Quarterly Revenue from the previous quarter.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Optional Pandas DataFrame with share-prices for one or more stocks.
If not `None`, then the signals will be reindexed to the same daily
data-points as `df_prices`, otherwise the signals will be quarterly.
:param df_income_ttm:
Pandas DataFrame with Income Statement TTM data for one or more stocks.
:param df_income_qrt:
Pandas DataFrame with Income Statement Quarterly data for one or more
stocks.
:param df_balance_ttm:
Pandas DataFrame with Balance Sheet TTM data for one or more stocks.
:param df_balance_qrt:
Pandas DataFrame with Balance Sheet Quarterly data for one or more
stocks.
:param df_cashflow_ttm:
Pandas DataFrame with Cash-Flow Statement TTM data for one or more
stocks.
:param df_cashflow_qrt:
Pandas DataFrame with Cash-Flow Statement Quarterly data for one or
more stocks.
:param func:
Function to apply on a per-stock basis after the signals have been
calculated, but before they have been reindexed to daily data-points.
This is useful e.g. to calculate multi-year averages.
For example, to calculate the 2-year averages of TTM data:
`func = lambda df: 0.5 * (df + df.shift(4))`
:param fill_method:
String or callable for the method of filling in empty values when
reindexing financial data to daily data-points.
See :obj:`~simfin.resample.reindex` for valid options.
:param offset:
Pandas DateOffset added to the date-index of the Pandas DataFrames with
the financial data. Example: `pd.DateOffset(days=60)` This is useful if
you want to add a lag of e.g. 60 days to the dates of financial reports
with Income Statements, Balance Sheets, and Cash-Flow Statements, because
the REPORT_DATE is not when it was actually made available to the public,
which can be 1, 2 or even 3 months after the REPORT_DATE.
See :obj:`~simfin.utils.add_date_offset` for more details.
:param date_index:
Name of the date-column for the financial data e.g. REPORT_DATE.
:param group_index:
If the DataFrames have a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:return:
Pandas DataFrame with growth signals.
"""
# This implementation uses sf.rel_change() to calculate the growth-rates,
# which means that several groupby operations are performed. But this is
# easier to implement and for large DataFrames it is only about 10% slower
# than using sf.apply() with a function like _signals() in fin_signals().
###############################
# Annual growth using TTM data.
# Select and combine the data we need.
df_ttm1 = df_income_ttm[[REVENUE, NET_INCOME]]
df_ttm2 = free_cash_flow(df_cashflow_ttm)
df_ttm3 = df_balance_ttm[[TOTAL_ASSETS]]
df_ttm = pd.concat([df_ttm1, df_ttm2, df_ttm3], axis=1)
# Dict mapping to the new column-names.
new_names = {REVENUE: SALES_GROWTH,
NET_INCOME: EARNINGS_GROWTH,
FCF: FCF_GROWTH,
TOTAL_ASSETS: ASSETS_GROWTH}
# Calculate the growth-rates.
df_growth = rel_change(df=df_ttm, freq='q', quarters=4,
future=False, annualized=False,
new_names=new_names)
#############################################
# Year-Over-Year growth using Quarterly data.
# Select and combine the data we need.
df_qrt1 = df_income_qrt[[REVENUE, NET_INCOME]]
df_qrt2 = free_cash_flow(df_cashflow_qrt)
df_qrt3 = df_balance_qrt[[TOTAL_ASSETS]]
df_qrt = pd.concat([df_qrt1, df_qrt2, df_qrt3], axis=1)
# Dict mapping to the new column-names.
new_names = {REVENUE: SALES_GROWTH_YOY,
NET_INCOME: EARNINGS_GROWTH_YOY,
FCF: FCF_GROWTH_YOY,
TOTAL_ASSETS: ASSETS_GROWTH_YOY}
# Calculate the growth-rates.
df_growth_yoy = rel_change(df=df_qrt, freq='q', quarters=4,
future=False, annualized=False,
new_names=new_names)
########################################################
# Quarter-Over-Quarter growth using Quarterly data.
# Note: This uses the same Quarterly DataFrame as above.
# Dict mapping to the new column-names.
new_names = {REVENUE: SALES_GROWTH_QOQ,
NET_INCOME: EARNINGS_GROWTH_QOQ,
FCF: FCF_GROWTH_QOQ,
TOTAL_ASSETS: ASSETS_GROWTH_QOQ}
# Calculate the growth-rates.
df_growth_qoq = rel_change(df=df_qrt, freq='q', quarters=1,
future=False, annualized=False,
new_names=new_names)
##################
# Post-processing.
# Combine into a single DataFrame.
df_signals = pd.concat([df_growth, df_growth_yoy, df_growth_qoq], axis=1)
# Add offset / lag to the index-dates of the signals.
if offset is not None:
df_signals = add_date_offset(df=df_signals, offset=offset,
date_index=date_index)
# Process the signals using the supplied function e.g. to calculate averages.
if func is not None:
df_signals = apply(df=df_signals, func=func, group_index=group_index)
# Reindex to the same daily data-points as the share-prices.
if df_prices is not None:
df_signals = reindex(df_src=df_signals, df_target=df_prices,
method=fill_method, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def val_signals(df_prices, df_income_ttm, df_balance_ttm, df_cashflow_ttm,
fill_method='ffill', offset=None, func=None,
date_index=REPORT_DATE, shares_index=SHARES_DILUTED,
group_index=TICKER, banks=False, insurance=False):
"""
Calculate valuation signals such as P/E and P/Sales ratios for all stocks
in the given DataFrames.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Pandas DataFrame with share-prices for one or more stocks.
:param df_income_ttm:
Pandas DataFrame with Income Statement TTM data for one or more stocks.
:param df_balance_ttm:
Pandas DataFrame with Balance Sheet TTM data for one or more stocks.
:param df_cashflow_ttm:
Pandas DataFrame with Cash-Flow Statement TTM data for one or more stocks.
:param fill_method:
String or callable for the method of filling in empty values when
reindexing financial data to daily data-points.
See :obj:`~simfin.resample.reindex` for valid options.
:param offset:
Pandas DateOffset added to the date-index of the Pandas DataFrames with
the financial data. Example: `pd.DateOffset(days=60)` This is useful if
you want to add a lag of e.g. 60 days to the dates of financial reports
with Income Statements, Balance Sheets, and Cash-Flow Statements, because
the REPORT_DATE is not when it was actually made available to the public,
which can be 1, 2 or even 3 months after the REPORT_DATE.
See :obj:`~simfin.utils.add_date_offset` for more details.
:param func:
Function to apply on a per-stock basis on the financial data, before
calculating the valuation signals. This is useful e.g. to calculate
multi-year averages of the Net Income and Revenue and use those when
calculating P/E and P/Sales ratios.
For example, to calculate the 2-year averages of TTM data:
`func = lambda df: 0.5 * (df + df.shift(4))`
:param date_index:
Name of the date-column for the financial data e.g. REPORT_DATE.
:param shares_index:
String with the column-name for the share-counts. SHARES_DILUTED
takes the potential diluting impact of stock-options into account, so
it results in more conservative valuation ratios than SHARES_BASIC.
:param group_index:
If the DataFrames have a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:param banks:
Boolean whether to use the special datasets for banks.
:param insurance:
Boolean whether to use the special datasets for insurance
companies.
:return:
Pandas DataFrame with valuation signals.
"""
# Get the required data from the Income Statements.
columns = [REVENUE, NET_INCOME_COMMON, SHARES_BASIC, SHARES_DILUTED]
df_inc = df_income_ttm[columns]
# Get the required data from the Balance Sheets.
if banks or insurance:
columns = [TOTAL_ASSETS, TOTAL_LIABILITIES, TOTAL_EQUITY]
else:
columns = [TOTAL_CUR_ASSETS, CASH_EQUIV_ST_INVEST, ACC_NOTES_RECV,
INVENTORIES, TOTAL_LIABILITIES, TOTAL_EQUITY]
df_bal = df_balance_ttm[columns]
# Get the required data from the Cash-Flow Statements.
columns = [DIVIDENDS_PAID]
df_cf = df_cashflow_ttm[columns]
# Combine all the data. This creates a new copy that we can add columns to.
df = pd.concat([df_inc, df_bal, df_cf], axis=1)
# Calculate derived financial data such as Free Cash Flow (FCF),
# and add it as new columns to the DataFrame.
# This is only TTM data with 4 data-points per year, so it is
# faster than calculating it for the daily data-points below.
df[FCF] = free_cash_flow(df_cashflow_ttm)
# Note: Not for banks and insurances.
if not banks and not insurance:
df[NCAV] = ncav(df_balance_ttm)
# Note: Not for banks and insurances.
if not banks and not insurance:
df[NETNET] = netnet(df_balance_ttm)
# Add offset / lag to the index-dates of the financial data.
if offset is not None:
df = add_date_offset(df=df, offset=offset, date_index=date_index)
# Copy the number of shares before applying the user-supplied function,
# which might change the number of shares in the original DataFrame df.
# This tries to use the given share-counts (e.g. SHARES_DILUTED) and
# fill in missing values with the other share-counts (e.g. SHARES_BASIC).
df_shares = shares(df=df, index=shares_index)
# Reindex the share-counts to daily data-points.
df_shares_daily = reindex(df_src=df_shares, df_target=df_prices,
method=fill_method, group_index=group_index)
# Process the financial data using the user-supplied function
# e.g. to calculate multi-year averages of Earnings, Sales, etc.
if func is not None:
df = apply(df=df, func=func, group_index=group_index)
# Calculate Per-Share numbers. It is important to use the share-count
# from before the user-supplied function was applied.
df_per_share = df.div(df_shares, axis=0)
# Reindex the per-share financial data to daily data-points.
df_daily = reindex(df_src=df_per_share, df_target=df_prices,
method=fill_method, group_index=group_index)
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = | pd.DataFrame(index=df_prices.index) | pandas.DataFrame |
Subsets and Splits