prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python
# coding: utf-8
import shutil
import pandas as pd
import pyprojroot
def convert_seg_error_rate_pct(df):
df.avg_segment_error_rate = df.avg_segment_error_rate * 100
return df
def generate_fig3_source_data():
RESULTS_ROOT = pyprojroot.here() / 'results'
FIG_ROOT = pyprojroot.here() / 'doc' / 'figures' / 'mainfig_tweetynet_v_svm'
FIG_ROOT.mkdir(exist_ok=True)
segmentation_map = {
'ground_truth': 'segmented audio, manually cleaned',
'resegment': 'segmented audio, not cleaned',
'semi-automated-cleaning': 'segmented audio, semi-automated cleaning',
'not-cleaned': 'segmented audio, not cleaned',
'manually-cleaned': 'segmented audio, manually cleaned'
}
hvc_dfs = []
csv_filename = 'segment_error_across_birds.hvc.csv'
for species in ('Bengalese_Finches', 'Canaries'):
species_csv = RESULTS_ROOT / f'{species}/hvc/{csv_filename}'
df = pd.read_csv(species_csv)
df['Model'] = 'SVM'
df['Input to model'] = df['segmentation'].map(segmentation_map)
df['Species'] = species
hvc_dfs.append(df)
hvc_df = pd.concat(hvc_dfs)
curve_df = []
for species in ('Bengalese_Finches', 'Canaries'):
LEARNCURVE_RESULTS_ROOT = pyprojroot.here() / 'results' / species / 'learncurve'
error_csv_path = LEARNCURVE_RESULTS_ROOT.joinpath('error_across_birds_with_cleanup.csv')
df = pd.read_csv(error_csv_path)
df = df[df.animal_id.isin(hvc_df.animal_id.unique())]
df['Model'] = 'TweetyNet'
df['Input to model'] = 'spectrogram'
df['Species'] = species
curve_df.append(df)
del df
curve_df = pd.concat(curve_df)
CLEANUP = 'min_segment_dur_majority_vote'
curve_df = curve_df[
curve_df.cleanup == CLEANUP
]
all_df = pd.concat([hvc_df, curve_df])
all_df = convert_seg_error_rate_pct(all_df)
gb = all_df.groupby(by=['Species', 'Model', 'Input to model', 'animal_id', 'train_set_dur'])
df_agg = gb.agg(
mean_seg_err = pd.NamedAgg('avg_segment_error_rate', 'mean'),
median_seg_err = pd.NamedAgg('avg_segment_error_rate', 'median'),
std_seg_err = pd.NamedAgg('avg_segment_error_rate', 'std')
)
data = df_agg.reset_index() # ``data`` DataFrame for use with ``seaborn``
data.to_csv(FIG_ROOT / 'fig3-data1.csv')
def filter_cleanups(df, cleanups):
return df[df.cleanup.isin(cleanups)]
def clean_df(df, species, cleanups):
df = convert_seg_error_rate_pct(df)
df = add_species(df, species)
df = filter_cleanups(df, cleanups)
return df
def add_species(df, species):
df['species'] = species
return df
def generate_fig4_source_data():
PROJ_ROOT = pyprojroot.here()
RESULTS_ROOT = PROJ_ROOT / 'results'
BF_RESULTS_ROOT = RESULTS_ROOT / 'Bengalese_Finches' / 'learncurve'
CANARY_RESULTS_ROOT = RESULTS_ROOT / 'Canaries' / 'learncurve'
FIGS_ROOT = PROJ_ROOT / 'doc' / 'figures'
THIS_FIG_ROOT = fname = FIGS_ROOT / 'mainfig_across_individuals_species'
THIS_FIG_ROOT.mkdir(exist_ok=True)
CLEANUPS = (
'none',
'min_segment_dur_majority_vote'
)
bf_error_csv_path = BF_RESULTS_ROOT.joinpath('error_across_birds_with_cleanup.csv')
bf_curve_df = pd.read_csv(bf_error_csv_path)
bf_curve_df = clean_df(
bf_curve_df,
'Bengalese Finch',
CLEANUPS
)
canary_error_csv_path = CANARY_RESULTS_ROOT.joinpath('error_across_birds_with_cleanup.csv')
canary_curve_df = pd.read_csv(canary_error_csv_path)
canary_curve_df = clean_df(
canary_curve_df,
'Canary',
CLEANUPS
)
for data_num, df in enumerate((bf_curve_df, canary_curve_df)):
df.to_csv(
THIS_FIG_ROOT / f'fig4-data{data_num + 1}.csv'
)
def generate_fig5_source_data():
PROJ_ROOT = pyprojroot.here()
RESULTS_ROOT = PROJ_ROOT / 'results'
BF_RESULTS_ROOT = RESULTS_ROOT / 'Bengalese_Finches' / 'learncurve'
CANARY_RESULTS_ROOT = RESULTS_ROOT / 'Canaries' / 'learncurve'
FIGS_ROOT = PROJ_ROOT / 'doc' / 'figures'
THIS_FIG_ROOT = fname = FIGS_ROOT / 'mainfig_postprocess_error_rates'
THIS_FIG_ROOT.mkdir(exist_ok=True)
# column name is "cleanup" but in the paper we use the term "post-processing"
# to avoid confusion with where we refer to "clean ups" of other models (e.g. SVM)
CLEANUPS = (
'none',
'min_segment_dur_majority_vote'
)
# so we'll add a column 'post-processing' that maps cleanups --> with/without post-process
POST_PROCESS_MAP = {
'none': 'without',
'min_segment_dur_majority_vote': 'with',
}
bf_error_csv_path = BF_RESULTS_ROOT.joinpath('error_across_birds_with_cleanup.csv')
bf_curve_df = pd.read_csv(bf_error_csv_path)
bf_curve_df = clean_df(
bf_curve_df,
'Bengalese Finches',
CLEANUPS
)
canary_error_csv_path = CANARY_RESULTS_ROOT.joinpath('error_across_birds_with_cleanup.csv')
canary_curve_df = pd.read_csv(canary_error_csv_path)
canary_curve_df = clean_df(
canary_curve_df,
'Canaries',
CLEANUPS
)
# only plot canaries mean for training set durations where we have results for all birds, which is > 180
canary_curve_df = canary_curve_df[canary_curve_df.train_set_dur > 180]
curve_df = pd.concat((bf_curve_df, canary_curve_df))
curve_df = curve_df.rename(columns={'species': 'Species'}) # so it's capitalized in figure, legend, etc.
curve_df['Post-processing'] = curve_df['cleanup'].map(POST_PROCESS_MAP)
train_set_durs = sorted(curve_df['train_set_dur'].unique())
dur_int_map = dict(zip(train_set_durs, range(len(train_set_durs))))
curve_df['train_set_dur_ind'] = curve_df['train_set_dur'].map(dur_int_map)
hyperparams_expt_csv_path = RESULTS_ROOT / 'hyperparams_expts' / 'source_data.csv'
hyperparams_expt_df = pd.read_csv(hyperparams_expt_csv_path)
hyperparams_expt_df = filter_cleanups(hyperparams_expt_df,
CLEANUPS)
hyperparams_expt_df['Post-processing'] = hyperparams_expt_df['cleanup'].map(POST_PROCESS_MAP)
curve_df.to_csv(THIS_FIG_ROOT / 'fig5-data1.csv')
hyperparams_expt_df.to_csv(THIS_FIG_ROOT / 'fig5-data2.csv')
def generate_fig6_source_data():
RESULTS_ROOT = pyprojroot.here() / 'results' / 'Bengalese_Finches' / 'behavior'
FIG_ROOT = pyprojroot.here() / 'doc' / 'figures' / 'mainfig_bf_behavior'
EVAL_CSV_FNAME = 'eval-across-days.csv'
eval_across_days_csv = RESULTS_ROOT / EVAL_CSV_FNAME
eval_df = | pd.read_csv(eval_across_days_csv) | pandas.read_csv |
# %% import library
"""Doc.
Created on Thu Jan 6 15:44:41 2022
@author: zhaohuanan
@email: <EMAIL>
@info: 计算DetectSeq和TargetSeq之间的相关性
@version:
0.0.1: new project
@others:
anything
"""
import statsmodels.api as sm
from pandarallel import pandarallel
import datar.all as r
from datar import f
import math
import plotnine as g
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# initialize
pd.set_option('max_colwidth', 250) # column最大宽度
pd.set_option('display.width', 250) # dataframe宽度
pd.set_option('display.max_columns', None) # column最大显示数
pd.set_option('display.max_rows', 50) # row最大显示数
pandarallel.initialize() # 多线程设置,默认使用全部核心 nb_workers=24
# os.chdir('./')
# os.listdir('/')
# %% set main path
os.chdir("/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/"
"2021_DdCBE_topic/20220103_VsDetectAndTarget")
# %% functions
def match_name(x):
"""
Doc.
Parameters
----------
x : dataframe
for apply, axis=1.
Returns
-------
Series
for apply, axis=1.
"""
try:
if 'N4' in x['treatment_TargetSeq']:
return dt_N4[x['region_id']]
elif 'N5-1' in x['treatment_TargetSeq']:
return dt_N5_1[x['region_id']]
elif 'N6' in x['treatment_TargetSeq']:
return dt_N6[x['region_id']]
elif 'untreat' == x['treatment_TargetSeq']:
return x['region_id'] + '_untreat'
except KeyError:
# 剩下的都是share中并非全share的,比如share-new-29,ND4没有,ND5.1和ND6有
return np.NaN
def mapper_detect_score(x):
"""
Doc.
Parameters
----------
x : dataframe
for apply, axis=1.
Returns
-------
Series
for apply, axis=1.
"""
if x['treatment_TargetSeq'] == 'N4-Det':
if x['score_DetectSeq_mean'] <= df_quantile4.loc[0.33]:
return 'low'
elif x['score_DetectSeq_mean'] <= df_quantile4.loc[0.66]:
return 'mid'
elif x['score_DetectSeq_mean'] > df_quantile4.loc[0.66]:
return 'high'
else:
return np.NAN
elif x['treatment_TargetSeq'] == 'N5-1-Det':
if x['score_DetectSeq_mean'] <= df_quantile5_1.loc[0.33]:
return 'low'
elif x['score_DetectSeq_mean'] <= df_quantile5_1.loc[0.66]:
return 'mid'
elif x['score_DetectSeq_mean'] > df_quantile5_1.loc[0.66]:
return 'high'
else:
return np.NAN
elif x['treatment_TargetSeq'] == 'N6-Det':
if x['score_DetectSeq_mean'] <= df_quantile6.loc[0.33]:
return 'low'
elif x['score_DetectSeq_mean'] <= df_quantile6.loc[0.66]:
return 'mid'
elif x['score_DetectSeq_mean'] > df_quantile6.loc[0.66]:
return 'high'
else:
return np.NAN
def match_treatment_and_region_id(x):
"""
Doc.
Parameters
----------
x : dataframe
for apply, axis=1.
Returns
-------
bool
for apply, axis=1, and dataframe selection.
"""
if x['treatment'][:3] in x['region_id']:
return True
else:
return False
def sns_histplot_show_value(histplot, h_v="v", value_fmt='.2f', fontsize=8):
"""
Doc.
Parameters
----------
histplot : a Seaborn.histplot object
add values for each bar.
h_v : str, optional
histplot direction, v/h. The default is "v".
value_fmt : str, optional
string format of added values. The default is '.2f'.
Returns
-------
None.
"""
if h_v == "v":
for p in histplot.patches:
histplot.annotate(
format(p.get_height(), value_fmt),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha='center',
va='center',
xytext=(0, 10),
textcoords='offset points',
fontsize=fontsize
)
elif h_v == "h":
for p in histplot.patches:
# @param format(p.get_width(), '.2f'), word in string format you want to put in the figure
# @param (p.get_width(), p.get_y()+ p.get_height() / 2.), x and y pos of word
# @param xytext, offset of word
histplot.annotate(
format(p.get_width(), value_fmt),
(p.get_width(), p.get_y() + p.get_height() / 2.),
ha='center',
va='center',
xytext=(30, 0),
textcoords='offset points',
fontsize=fontsize
)
def map_nd(x):
"""
Parameters.
----------
x : dataframe
for apply, axis=1.
Returns
-------
str
ND4/ND5.1/ND6/np.NAN.
"""
if 'ND4' in x['region_id_new']:
return 'ND4'
elif 'ND5.1' in x['region_id_new']:
return 'ND5.1'
elif 'ND6' in x['region_id_new']:
return 'ND6'
else:
return np.NAN
# %% step1 对merged TargetSeq和DetectSeq表格处理,匹配新旧region_id
df = pd.read_csv('table/20220104_DdCBE-Merged-info.csv.gz',
dtype={
'A_ratio': np.float64,
'T_ratio': np.float64,
'C_ratio': np.float64,
'G_ratio': np.float64}
)
df.head(2)
# 看一下na情况
print(df.info())
print(df.isna().sum())
# %% read 20220104_name_match.xlsx
df_name = pd.read_excel('table/20220104_name_match.xlsx', sheet_name="Sheet1")
df_name.region_id_new.values
# %% .
df_name_N4 = df_name[df_name['region_id_new'].map(
lambda x: True if 'ND4' in x else False)].copy()
df_name_N5_1 = df_name[df_name['region_id_new'].map(
lambda x: True if 'ND5.1' in x else False)].copy()
df_name_N6 = df_name[df_name['region_id_new'].map(
lambda x: True if 'ND6' in x else False)].copy()
# df_name_N4
# %% .
dt_N4 = df_name_N4.set_index('region_id').to_dict()['region_id_new']
dt_N5_1 = df_name_N5_1.set_index('region_id').to_dict()['region_id_new']
dt_N6 = df_name_N6.set_index('region_id').to_dict()['region_id_new']
# dt_N6
# %% .
df['treatment_TargetSeq'].value_counts()
# %% apply match_name
df['region_id_new'] = df.parallel_apply(match_name, axis=1)
df['region_id_new'].value_counts()
# %% 检查这些NA的点是不是share中不全share的
print(set(df[df['region_id_new'].isnull()]
['treatment_TargetSeq'].value_counts().index.tolist()))
# 检查这些NA的点是不是share中不全share的
print(set(df[df['region_id_new'].isnull()]
['region_id'].value_counts().index.tolist()))
# %% .
df_final = df[df.region_id_new.notnull()].copy()
df_final
# %% .
df_final.isnull().sum()
# %% export 20220104_DdCBE-Merged-info_fix_name.csv.gz
df_final.to_csv('table/20220104_DdCBE-Merged-info_fix_name.csv.gz',
sep=",", index=False, header=True)
# %% step2 每个off-target region中指定base的TargetSeq ratio和DetectSeq ratio
# idx
df_idx = pd.read_excel("./table/20220104_DdCBE_only_one_mut_index_info.xlsx",
sheet_name="Sheet1")
# print(df_idx)
# df_idx.info()
df_idx = df_idx[['region_id', 'relative_pos']].copy()
df_idx
# %% .
# merged table of DetectSeq and TargetSeq
df = pd.read_csv("./table/20220104_DdCBE-Merged-info_fix_name.csv.gz")
df
# %% .
# df.columns
# filter cutoff 3
df = df >> r.select(~f.bmat_name, ~f._merge) >> \
r.filter(f.cutoff == 3)
df
# %% .
df.head()
df.columns
# %% .
# 去掉多余信息
print((df >> r.select(f.treatment_TargetSeq)).value_counts())
print((df >> r.select(f.treatment_DetectSeq)).value_counts())
# %% region_id_new没有的话就是share的点没有这个ND,发现的确在step1中完全除去了na的点
print(df.shape)
df_fix = df >> r.filter(f.region_id_new.notnull())
df_fix
# %% 选择DetectSeq转染条件 -Det, 不要untreat, 去掉on-target
(df_fix >> r.select(f.treatment_TargetSeq)).value_counts()
# %% 过滤TargetSeq条件
df_filtered = df_fix >> r.filter(
(f.treatment_TargetSeq == "N4-Det")
| (f.treatment_TargetSeq == "N5-1-Det")
| (f.treatment_TargetSeq == "N6-Det"),
# region_id != "ND4-on-target"
# | region_id != "ND5.1-on-target"
# | region_id != "ND6-on-target",
)
# %% .
(df_filtered >> r.select(f.treatment_TargetSeq)).value_counts()
# %% 单碱基位点信息intersect进来
df_intersect_idx = df_idx >> r.left_join(
df_filtered, by=["region_id", "relative_pos"])
df_intersect_idx
# %% 查看region个数
df_intersect_idx['region_id_new'].value_counts().index.tolist().__len__()
# %% 选出TargetSeq和DetectSeq条件对应的行
print((df_intersect_idx >> r.select(f.treatment_TargetSeq)).value_counts())
print((df_intersect_idx >> r.select(f.treatment_DetectSeq)).value_counts())
df_one_idx_compare = df_intersect_idx >> r.filter(
# (region_id == "ND4-new-only1"),
((f.treatment_TargetSeq == "N4-Det")
& (f.treatment_DetectSeq == "ND4-Det"))
| ((f.treatment_TargetSeq == "N5-1-Det")
& (f.treatment_DetectSeq == "ND5-1-Det"))
| ((f.treatment_TargetSeq == "N6-Det")
& (f.treatment_DetectSeq == "ND6-Det")),
)
(df_one_idx_compare >> r.select(f.treatment_TargetSeq)).value_counts()
# N4-Det N5-1-Det N6-Det
# 468 576 512
# %% 去掉不要的列
df_plot = df_one_idx_compare >> r.select(
~f.region_seq,
~f.cutoff,
~f.rep_back_target,
~f.treatment_DetectSeq,
~f.site_idx,
~f.sort_chrom,
~f.N, ~f.A_ratio, ~f.G_ratio, ~f.C_ratio, ~f.C_ratio)
# %% 选C2TorG2A,过滤DetectSeq和TargetSeq的depth,并计算ratio_TargetSeq、ratio_DetectSeq、score_DetectSeq
df_plot1 = df_plot >> r.filter(
# filter target-seq
f.mut_count >= 10,
f.total_count >= 2000,
# filter detect-seq
(
(f.ref_base == "C") & (f.mut_base == "T")
& (f.total >= 10) & (f['T'] >= 1)
)
| (
(f.ref_base == "G") & (f.mut_base == "A")
& (f.total >= 10) & (f.A >= 1)
),
# fot test
# region_id == "ND5.1-only-5",
# relative_pos==115,
) >> r.mutate(
ratio_TargetSeq=f.mut_count / f.total_count,
# ratio_DetectSeq1 = if_else(
# mut_base=="T",`T`/ total, A / total),
# mut_count_sqrt_DetectSeq1 = if_else(
# mut_base=="T",sqrt(`T`), sqrt(A)),
# score_DetectSeq1 = ifelse(
# mut_base=="T", (`T`/ total)^2 * `T`, (A / total)^2 * A),
# ratio_DetectSeq2 = if_else(
# strand=="+",`T`/ total, A / total),
# mut_count_sqrt_DetectSeq2 = if_else(
# strand=="+",sqrt(`T`), sqrt(A)),
# score_DetectSeq2 = ifelse(
# mut_base=="+", (`T`/ total)^2 * `T`, (A / total)^2 * A),
# ratio_DetectSeq = max(
# ratio_DetectSeq1, ratio_DetectSeq2),
# mut_count_sqrt_DetectSeq = max(
# mut_count_sqrt_DetectSeq1, mut_count_sqrt_DetectSeq2),
# score_DetectSeq = max(
# score_DetectSeq1, score_DetectSeq2),
# ratio_DetectSeq = if_else(
# strand=="+",`T`/ total, A / total),
# mut_count_sqrt_DetectSeq = if_else(
# strand=="+",sqrt(`T`), sqrt(A)),
# score_DetectSeq = ifelse(
# mut_base=="+", (`T`/ total)^2 * `T`, (A / total)^2 * A),
ratio_DetectSeq=r.if_else(
f['T'] / f.total >= f.A / f.total,
f['T'] / f.total,
f.A / f.total
),
mut_count_sqrt_DetectSeq=r.if_else(
f['T'].map(np.sqrt) >= f.A.map(np.sqrt),
f['T'].map(np.sqrt),
f.A.map(np.sqrt)
),
score_DetectSeq=r.if_else(
(f['T'] / f.total).map(np.square) * f['T'] >= \
(f.A / f.total).map(np.square) * f.A,
(f['T'] / f.total).map(np.square) * f['T'],
(f.A / f.total).map(np.square) * f.A,
)
) >> r.group_by(
f.region_id, f.region_id_new, f.relative_pos, f.absolute_pos,
f.treatment_TargetSeq, f.near_seq
) >> r.summarise(
ratio_TargetSeq_mean=mean(f.ratio_TargetSeq),
ratio_DetectSeq_mean=mean(f.ratio_DetectSeq),
mut_count_sqrt_DetectSeq_mean=mean(f.mut_count_sqrt_DetectSeq),
score_DetectSeq_mean=mean(f.score_DetectSeq),
)
# %% save table for step3
# df_plot1.to_csv('./20220114_df_plot1.csv', index=False)
df_plot1 = | pd.read_csv('./20220114_df_plot1.csv') | pandas.read_csv |
import logging
import sys
import time
import pandas as pd
from sqlalchemy import and_, func
from dataactbroker.helpers.sam_wsdl_helper import config_valid, get_entities
from dataactbroker.helpers.generic_helper import get_client
from dataactcore.models.domainModels import DUNS
from dataactcore.utils.duns import load_duns_by_row
from dataactcore.models.jobModels import FileType, Submission # noqa
from dataactcore.models.userModel import User # noqa
logger = logging.getLogger(__name__)
def sam_config_is_valid():
""" Check if config is valid and should be only run once per load. Returns client obj used to acces SAM API.
Returns:
client object representing the SAM service
"""
if config_valid():
return get_client()
else:
logger.error({
'message': "Invalid SAM wsdl config",
'message_type': 'CoreError'
})
sys.exit(1)
def get_name_from_sam(client, duns_list):
""" Calls SAM API to retrieve DUNS name by DUNS number. Returns DUNS info as Data Frame
Args:
client: the SAM service client
duns_list: list of DUNS to search
Returns:
dataframe representing the DUNS and corresponding names
"""
duns_name = [{
'awardee_or_recipient_uniqu': suds_obj.entityIdentification.DUNS,
'legal_business_name': (suds_obj.entityIdentification.legalBusinessName or '').upper()
}
for suds_obj in get_entities(client, duns_list)
if suds_obj.entityIdentification.legalBusinessName
]
return pd.DataFrame(duns_name)
def get_location_business_from_sam(client, duns_list):
""" Calls SAM API to retrieve DUNS location/business type data by DUNS number. Returns DUNS info as Data Frame
Args:
client: the SAM service client
duns_list: list of DUNS to search
Returns:
dataframe representing the DUNS and corresponding locations/business types
"""
duns_name = [{
'awardee_or_recipient_uniqu': suds_obj.entityIdentification.DUNS,
'address_line_1': getattr(suds_obj.coreData.businessInformation.physicalAddress, 'addressLine1', None),
'address_line_2': getattr(suds_obj.coreData.businessInformation.physicalAddress, 'addressLine2', None),
'city': getattr(suds_obj.coreData.businessInformation.physicalAddress, 'city', None),
'state': getattr(suds_obj.coreData.businessInformation.physicalAddress, 'stateOrProvince', None),
'zip': getattr(suds_obj.coreData.businessInformation.physicalAddress, 'ZIPCode', None),
'zip4': getattr(suds_obj.coreData.businessInformation.physicalAddress, 'ZIPCodePlus4', None),
'country_code': getattr(suds_obj.coreData.businessInformation.physicalAddress, 'country', None),
'congressional_district': getattr(suds_obj.coreData.businessInformation.physicalAddress,
'congressionalDistrict', None),
'business_types_codes': [business_type.code for business_type
in getattr(suds_obj.coreData.generalInformation.listOfBusinessTypes,
'businessType', [])]
}
for suds_obj in get_entities(client, duns_list)
]
return pd.DataFrame(duns_name)
def get_parent_from_sam(client, duns_list):
""" Calls SAM API to retrieve parent DUNS data by DUNS number. Returns DUNS info as Data Frame
Args:
client: the SAM service client
duns_list: list of DUNS to search
Returns:
dataframe representing the DUNS and corresponding parent DUNS data
"""
duns_parent = [{
'awardee_or_recipient_uniqu': suds_obj.entityIdentification.DUNS,
'ultimate_parent_unique_ide': suds_obj.coreData.DUNSInformation.globalParentDUNS.DUNSNumber,
'ultimate_parent_legal_enti': (suds_obj.coreData.DUNSInformation.globalParentDUNS.legalBusinessName
or '').upper()
}
for suds_obj in get_entities(client, duns_list)
if suds_obj.coreData.DUNSInformation.globalParentDUNS.DUNSNumber
or suds_obj.coreData.DUNSInformation.globalParentDUNS.legalBusinessName
]
return | pd.DataFrame(duns_parent) | pandas.DataFrame |
"""
@author: <NAME>
file: main_queue.py
"""
from __future__ import print_function
from scoop import futures
import multiprocessing
import numpy as np
import pandas as pd
import timeit
import ZIPapliences as A_ZIP
class load_generation:
""" Class prepares the system for generating load
Attributes
----------
START_TIME_Q (pandas datetime): start time to generate load data
END_TIME_Q (pandas datetime): end time to generate load data
Queue_type (int): 0=inf; 1=C; 2=Ct
P_U_B (int): percentage upper boud --> e.g. 2 = 200% from the reference
physical_machine (int): 1 = single node 2 = multiple nodes
NUM_WORKERS (int): number of workers used when generating load in a single node
NUM_HOMES (int): number of homes being generated
OUT_PUT_FILE_NAME_pre (str): file path to write output
OUT_PUT_FILE_NAME (str): prefix of file name to be writen
OUT_PUT_FILE_NAME_end (str): end of file name
OUT_PUT_FILE_NAME_summary_pre (str): file path to write output
OUT_PUT_FILE_NAME_summary (str): prefix of summary file name to be writen
TIME_DELT (pandas datetime): 1 minute
TIME_DELT_FH (pandas datetime): 1 hour
TIME_DELT_FD (pandas datetime): 1 day
base_max (float): rescaling load reference uper bound
base_min (float): rescaling load reference lower bound
ref_load (pandas series): reference load
DF_A (pandas dataframe): appliances characteristics
DF_ZIP_summer (pandas dataframe): appliances participation during the summer
DF_ZIP_winter (pandas dataframe): appliances participation during the winter
DF_ZIP_spring (pandas dataframe): appliances participation during the spring
APP_parameter_list (list): input parameters
[(float) p.u. percentage of schedulable appliances 0.5=50%,
(int) appliance set size,
(int) average power rating in Watts,
(int) stander power rating in Watts,
(float) average duration in hours,
(float) stander duration in hours,
(float) average duration of the scheduling window in hours,
(float) stander duration of the scheduling window in hours]
Methods
-------
__init__ : create object with the parameters for the load generation
read_data : load input data
"""
def __init__(self,ST,ET,T,P,M,NW,NH):
""" Create load_generation object
Parameters
----------
ST (str): start time to generate load data e.g. '2014-01-01 00:00:00'
ET (str): end time to generate load data
T (int): 0=inf; 1=C; 2=Ct
P (int): percentage upper boud --> e.g. 2 = 200% from the reference
M (int): 1 = single node 2 = multiple nodes
NW (int): number of workers used when generating load in a single node
NH (int): number of homes being generated
"""
self.START_TIME_Q = pd.to_datetime(ST)
self.END_TIME_Q = pd.to_datetime(ET)
self.Queue_type = T
self.P_U_B = P
self.physical_machine = M
self.NUM_WORKERS = NW
self.NUM_HOMES = NH
self.OUT_PUT_FILE_NAME_pre = 'outputdata/multy/'
self.OUT_PUT_FILE_NAME = 'multHDF'
self.OUT_PUT_FILE_NAME_end = '.h5'
self.OUT_PUT_FILE_NAME_summary_pre = 'outputdata/summary/'
self.OUT_PUT_FILE_NAME_summary = 'summaryHDF'
#Auxiliary variables
self.TIME_DELT = pd.to_timedelta('0 days 00:01:00')
self.TIME_DELT_FH = pd.to_timedelta('0 days 01:00:00')
self.TIME_DELT_FD = pd.to_timedelta('1 days 00:00:00')
self.base_max = 5000.0
self.base_min = 100.0
#From data
self.ref_load = None
self.DF_A = None
self.DF_ZIP_summer = None
self.DF_ZIP_winter = None
self.DF_ZIP_spring = None
#DEFINITIONS APPLIANCES
self.APP_parameter_list = [0.5,100,500,100,0.5,0.25,6.0,2.0]
def read_data(self,IF='inputdata/'):
""" Load reference load and appliance data
Parameters
----------
IF (str): folder of input data
"""
# Reference Energy
sys_load = pd.read_hdf(IF+'load_data.h5')
sys_load = sys_load['load']
sys_load = sys_load[self.START_TIME_Q:self.END_TIME_Q+self.TIME_DELT_FD]#*1e6 #DATA IS IN HOURS
sys_load = sys_load.resample(self.TIME_DELT_FH).max().ffill()#fix empty locations
scale_min = sys_load[self.START_TIME_Q:self.END_TIME_Q].min()
scale_max = sys_load[self.START_TIME_Q:self.END_TIME_Q].max()
ref = sys_load
ref = self.base_min+((ref-scale_min)/(scale_max-scale_min))*(self.base_max-self.base_min)
ref.name = 'Load [W]'
ref = ref.resample(self.TIME_DELT).max().interpolate(method='polynomial', order=0,limit_direction='forward')
self.ref_load = ref
# ZIP load
self.DF_A = pd.read_csv(IF+'ZIP_appliances.csv')
self.DF_ZIP_summer = pd.read_csv(IF+'ZIP_summer.csv')
self.DF_ZIP_winter = pd.read_csv(IF+'ZIP_winter.csv')
self.DF_ZIP_spring = pd.read_csv(IF+'ZIP_spring.csv')
###########################################
# save data to file
###########################################
def save_HD5(a,b,x):
""" Save the generated load to HDF5 files
Parameters
----------
a (pandas dataframe): complete dataframe
b (pandas dataframe): summary dataframe
x (str): string number of the individual home id
"""
a.to_hdf(LG.OUT_PUT_FILE_NAME_pre+LG.OUT_PUT_FILE_NAME+x+LG.OUT_PUT_FILE_NAME_end, key=x,format='table',mode='w',dropna = True)
b.to_hdf(LG.OUT_PUT_FILE_NAME_summary_pre+LG.OUT_PUT_FILE_NAME_summary+x+LG.OUT_PUT_FILE_NAME_end, key=x,format='table',mode='w',dropna = True)
return None
###########################################
#APLAENCES seasson
###########################################
def makeAPP(DF_A,DF_ZIP_summer,DF_ZIP_winter,DF_ZIP_spring,APP_P_L):
""" Generate individual appliances set for homes during the season of the year
Parameters
----------
DF_A (pandas dataframe): appliances characteristics
DF_ZIP_summer (pandas dataframe): appliances participation during the summer
DF_ZIP_winter (pandas dataframe): appliances participation during the winter
DF_ZIP_spring (pandas dataframe): appliances participation during the spring
Returns
----------
APP_L_obj (list of applience objecs): applience objects list for seasons
"""
strataN=4 #from the ZIP study paper
c_index = np.array(DF_ZIP_summer['A_index'])
c_winter = np.array(DF_ZIP_winter.iloc[:,strataN])
c_spring = np.array(DF_ZIP_spring.iloc[:,strataN])
c_summer = np.array(DF_ZIP_summer.iloc[:,strataN])
APP_L_obj = []
APP_L_obj.append(A_ZIP.AppSET(DF_A,c_index,c_spring,APP_P_L))
APP_L_obj.append(A_ZIP.AppSET(DF_A,c_index,c_summer,APP_P_L))
APP_L_obj.append(A_ZIP.AppSET(DF_A,c_index,c_winter,APP_P_L))
return APP_L_obj
def season(date, HEMISPHERE = 'north'):
""" Informe season of the year
Parameters
----------
date (pandas datetime): time being generated
HEMISPHERE (str): north or south hemisphere
Returns
----------
s (int): indicates the season
"""
md = date.month * 100 + date.day
if ((md > 320) and (md < 621)):
s = 0 #spring
elif ((md > 620) and (md < 923)):
s = 1 #summer
elif ((md > 922) and (md < 1223)):
s = 2 #fall
else:
s = 3 #winter
if not HEMISPHERE == 'north':
s = (s + 2) % 3
if s ==2:
s=0 #spring and fall have same loads
if s == 3:
s=2
return s
def SeasonUPdate(temp):
""" Update appliance characteristics given the change in season
Parameters
----------
temp (obj): appliance set object for an individual season
Returns
----------
app_expected_load (float): expected load power in Watts
app_expected_dur (float): expected duration in hours
appliance_set (list of applience objects): applience list for a given season
t_delta_exp_dur (pandas datetime): expected appliance duration
app_index (array): index for each applience
"""
app_expected_load = temp.app_expected_load
app_expected_dur = temp.app_expected_dur
appliance_set = temp.appliance_set
t_delta_exp_dur = temp.t_delta_exp_dur
app_index = np.arange(0,len(temp.appliance_set))
return app_expected_load,app_expected_dur,appliance_set,t_delta_exp_dur,app_index
###########################################
#MAKE QUEUE MODEL C = infinity
###########################################
def solverZIPl_inf(x):
""" Generate load with C = infinity
Parameters
----------
x (str): string number of the individual home id
Returns
----------
x (str): string number of the individual home id
"""
START_TIME_Q = LG.START_TIME_Q
END_TIME_Q = LG.END_TIME_Q
ref_load = LG.ref_load
current_time = START_TIME_Q
customer_loads_GL = (ref_load*0.0).copy()
customer_loads_GL_VAR = (ref_load*0.0).copy()
L1=[];L2=[];L3=[];L4=[];L5=[];L6=[];L7=[];L8=[];L9=[];L10=[];L11=[];L12=[];L13=[];L14=[];
L1=list();L2=list();L3=list();L4=list();L5=list();L6=list();L7=list()
L8=list();L9=list();L10=list();L11=list();L12=list();L13=list();L14=list();
APP_L_obj = makeAPP(LG.DF_A,LG.DF_ZIP_summer,LG.DF_ZIP_winter,LG.DF_ZIP_spring,LG.APP_parameter_list)
app_expected_load,app_expected_dur,appliance_set,t_delta_exp_dur,app_index = SeasonUPdate(APP_L_obj[season(current_time,'north')])
dates = ref_load.index
while current_time < END_TIME_Q:
m_t_plus_delta = ref_load.asof(where=current_time+t_delta_exp_dur)
lambda_t = m_t_plus_delta / (app_expected_load*app_expected_dur) #lam(t) = m(t + E[D])/(E[D]E[L])
delta_t = np.random.exponential(1.0/lambda_t) #lambda_t is the rate parameter, numpy requires the scale which is the reciprocal of rate. Alternatively can switch the calculation of lambda_t, but this way matches the derived equations.
if delta_t < 0.00000003:
delta_t = 0.00000003
current_time += pd.to_timedelta('%s s' % (delta_t*3600.0)) #converted to seconds as some delta_t in hours was too small for pandas to parse correctly
if current_time < END_TIME_Q: #check after time is updated we are still in sim time
###########################################
#Season
###########################################
app_expected_load,app_expected_dur,appliance_set,t_delta_exp_dur,app_index = SeasonUPdate(APP_L_obj[season(current_time,'north')])
app = appliance_set[np.random.choice(app_index,size=1,replace=True)[0]]
add_time = current_time
this_app_endtime = add_time + pd.to_timedelta('%s h' % app.duration)
this_app_curtime = add_time
customer_loads_GL[dates.asof(this_app_curtime):dates.asof(this_app_endtime)] += app.power
customer_loads_GL_VAR[dates.asof(this_app_curtime):dates.asof(this_app_endtime)] += app.reactive
L1.append(dates.asof(this_app_curtime))#['start time']=dates.asof(this_app_curtime)
L2.append(pd.to_timedelta('%s h' % app.duration).round('1min'))#['duration']=pd.to_timedelta('%s h' % app.duration).round('1min')
L3.append(app.power)#['power']=app.power
L4.append(app.skedulable)#['skedulable']=app.skedulable
L5.append(pd.to_timedelta('%s h' % app.SWn).round('1min'))#['shifting window -']=pd.to_timedelta('%s h' % app.SWn).round('1min')
L6.append(pd.to_timedelta('%s h' % app.SWp).round('1min'))#['shifting window +']=pd.to_timedelta('%s h' % app.SWp).round('1min')
L7.append(app.reactive)#['reactive']=app.reactive
L8.append(app.Zp)#['Zp']=app.Zp
L9.append(app.Ip)#['Ip']=app.Ip
L10.append(app.Pp)#['Pp']=app.Pp
L11.append(app.Zq)#['Zq']=app.Zq
L12.append(app.Iq)#['Iq']=app.Iq
L13.append(app.Pq)#['Pq']=app.Pq
L14.append(app.indeX)#['indeX']=app.indeX
sagra = pd.DataFrame({'start time': L1,
'duration': L2,
'power': L3,
'skedulable': L4,
'shifting window -': L5,
'shifting window +': L6,
'reactive': L7,
'Zp': L8,
'Ip': L9,
'Pp': L10,
'Zq': L11,
'Iq': L12,
'Pq': L13,
'indeX': L14
})
sagra = sagra[sagra['start time'] >= START_TIME_Q]
sagra = sagra.reset_index(drop=True)
sagra = sagra[sagra['start time'] <= END_TIME_Q]
sagra = sagra.reset_index(drop=True)
customer_loads_GL = customer_loads_GL[START_TIME_Q:END_TIME_Q]
customer_loads_GL_VAR = customer_loads_GL_VAR[START_TIME_Q:END_TIME_Q]
activeANDreactive = pd.DataFrame({'W':customer_loads_GL, 'VAR':customer_loads_GL_VAR})
save_HD5(sagra,activeANDreactive,x)
return x
###########################################
#MAKE QUEUE MODEL C <-- limited
###########################################
def solverZIPl_C(x):
""" Generate load with C <-- limited
Parameters
----------
x (str): string number of the individual home id
Returns
----------
x (str): string number of the individual home id
"""
START_TIME_Q = LG.START_TIME_Q
END_TIME_Q = LG.END_TIME_Q
P_U_B = LG.P_U_B
ref_load = LG.ref_load
TIME_DELT = LG.TIME_DELT
current_time = START_TIME_Q
customer_loads_GL = (ref_load*0.0).copy()
customer_loads_GL_VAR = (ref_load*0.0).copy()
L1=[];L2=[];L3=[];L4=[];L5=[];L6=[];L7=[];L8=[];L9=[];L10=[];L11=[];L12=[];L13=[];L14=[];
L1=list();L2=list();L3=list();L4=list();L5=list();L6=list();L7=list()
L8=list();L9=list();L10=list();L11=list();L12=list();L13=list();L14=list();
W_TIME = pd.to_timedelta('0 days 22:00:00')
if LG.Queue_type == 1:
S_W = (ref_load*0.0 + 1) * ((ref_load[START_TIME_Q:END_TIME_Q].max())*P_U_B)
if LG.Queue_type == 2:
S_W = ref_load*P_U_B
APP_L_obj = makeAPP(LG.DF_A,LG.DF_ZIP_summer,LG.DF_ZIP_winter,LG.DF_ZIP_spring,LG.APP_parameter_list)
app_expected_load,app_expected_dur,appliance_set,t_delta_exp_dur,app_index = SeasonUPdate(APP_L_obj[season(current_time,'north')])
dates = ref_load.index
while current_time < END_TIME_Q:
m_t_plus_delta = ref_load.asof(where=current_time+t_delta_exp_dur)
lambda_t = m_t_plus_delta / (app_expected_load*app_expected_dur) #lam(t) = m(t + E[D])/(E[D]E[L])
delta_t = np.random.exponential(1.0/lambda_t) #lambda_t is the rate parameter, numpy requires the scale which is the reciprocal of rate. Alternatively can switch the calculation of lambda_t, but this way matches the derived equations.
if delta_t < 0.00000003:
delta_t = 0.00000003
current_time += pd.to_timedelta('%s s' % (delta_t*3600.0)) #converted to seconds as some delta_t in hours was too small for pandas to parse correctly
if current_time < END_TIME_Q: #check after time is updated we are still in sim time
###########################################
#Season
###########################################
app_expected_load,app_expected_dur,appliance_set,t_delta_exp_dur,app_index = SeasonUPdate(APP_L_obj[season(current_time,'north')])
app = appliance_set[np.random.choice(app_index,size=1,replace=True)[0]]
V_W = (customer_loads_GL[dates.asof(current_time):dates.asof(current_time+W_TIME)] + app.power) < (S_W[dates.asof(current_time):dates.asof(current_time+W_TIME)])
add_time = current_time
while add_time <= current_time + W_TIME:
VV_W = V_W[dates.asof(add_time):dates.asof(add_time + pd.to_timedelta('%s h' % app.duration))]
VV_W_L = VV_W.index[VV_W == True].tolist()
if len(VV_W_L) >= VV_W.size:
break
add_time += TIME_DELT
this_app_endtime = add_time + | pd.to_timedelta('%s h' % app.duration) | pandas.to_timedelta |
# import libraries
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Load messages and categories from csv files into a pandas DF.
INPUT:
messages_filepath - path to location of messages csv file
categories_filepath - path to location of categories csv files
OUTPUT:
df - pandas DF with messages and categories
"""
# load messages dataset
messages = pd.read_csv(messages_filepath)
# load categories dataset
categories = pd.read_csv(categories_filepath)
# merge datasets
df = pd.merge(messages, categories, on='id')
return df
def clean_data(df):
"""
Clean data in a pandas DF.
Clean data in a pandas DF by renaming category columns, convert
category values to 0 or 1 and drop duplicates.
INPUT:
df - pandas dataframe with messages and categories in source format
OUTPUT:
df - cleaned pandas dataframe with messages and categories
"""
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(';', expand=True)
# select the first row of the categories dataframe
row = categories.iloc[0]
# use this row to extract a list of new column names for categories.
# one way is to apply a lambda function that takes everything
# up to the second to last character of each string with slicing
category_colnames = list(map(lambda col: col[:-2], row))
# rename the columns of `categories`
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].str[-1:]
# convert column from string to numeric
categories[column] = categories[column].astype(int)
# drop the original categories column from `df`
df = df.drop(['categories'], axis=1)
# concatenate the original dataframe with the new `categories` dataframe
df = | pd.concat([df, categories], axis=1, sort=False) | pandas.concat |
import os
import pandas as pd
import cv2
import scipy.stats as stat
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from .matplotlibstyle import *
import datetime
class Datahandler():
'Matches EL images paths to IV data based on imput columns'
def __init__(self,workdir,ELfolderpath=None,IVfile=None):
'initialize and create folder'
self.dataset_id = None
# Create directory for computation on this dataset
self.pathDic = {
'workdir': workdir,
'ELfolderpath': ELfolderpath,
'IVfile': IVfile,
'figures': workdir+"figures\\",
'models': workdir+"models\\",
'traces': workdir+"traces\\",
'outputs': workdir+"outputs\\",
'Matchfile': workdir+"match.csv",
}
for key, value in self.pathDic.items():
if key in ['ELfolderpath','IVfile','Matchfile']: continue
if not os.path.exists(value): os.mkdir(value)
if os.path.exists(self.pathDic['Matchfile']):
self.loadMatchData()
def readEL(self):
'Read images from ELfolderpath and store in dataframe'
if not self.pathDic['ELfolderpath']: raise ValueError('ELfolderpath not defined')
images = []
for subdir,dirs,files in os.walk(self.pathDic['ELfolderpath']):
for file in files:
ext = os.path.splitext(file)[1]
if ext == ".db": continue
name = os.path.splitext(file)[0]
size = os.path.getsize(subdir+"\\"+file)
location = subdir+"\\"+file
line = size,ext,name,location
images.append(line)
self.ELdf = pd.DataFrame(images)
self.ELdf.columns=['size','extension','filename','path']
def readIV(self,sep=","):
'Read IV data from IVfile csv'
if not self.pathDic['IVfile']: raise ValueError('IVfile not defined')
self.IVdf = pd.read_csv(self.pathDic['IVfile'], sep=sep)
def matchData(self,matchIVcol, matchELcol,keepcol=None):
'Join both EL and IV dataframe and save it in Matchfile as a csv'
# Inner join of both dataframes
self.matchDf= pd.merge(self.ELdf,self.IVdf,left_on=matchELcol,right_on=matchIVcol, how='inner')
self.matchDf.fillna(0, inplace=True)
if keepcol: self.matchDf = self.matchDf[keepcol]
self.matchDf.to_csv(self.pathDic['Matchfile'],encoding='utf-8', index=False)
def loadMatchData(self):
'Load the data if available'
if os.path.exists(self.pathDic['Matchfile']):
self.matchDf = pd.read_csv(self.pathDic['Matchfile'])
self.matchDf.fillna(0, inplace=True)
def computeStatParameters(self,threshold=20):
'Load images stored in path and compute the EL parameters'
stat_feature = {'mu':[],'ICA':[],'kur':[],'skew':[],'en':[],'sp':[],'fw':[], 'md':[], 'sd':[], 'kstat':[], 'var':[], 'mu_img':[], 'med_img':[], 'sum_img':[], 'sd_img':[]}
for file in self.matchDf['path'].values:
img = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
hist2,bins=np.histogram(img,256,[0,256])
threshold=min(threshold,len(hist2)-1)
hist = [h if b>threshold else 0 for b,h in zip(bins,hist2)]
PEL=hist/np.sum(hist)
np.sum(PEL)
len(hist)
stat_feature['mu'].append(np.mean(hist))
stat_feature['md'].append(np.median(PEL))
stat_feature['ICA'].append(100/np.sum(hist2)*np.sum([hist2[i] for i in range(threshold+1)]))
stat_feature['sd'].append(np.std(PEL))
stat_feature['kur'].append(stat.kurtosis(PEL))
stat_feature['skew'].append(stat.skew(PEL))
stat_feature['kstat'].append(stat.kstat(PEL))
stat_feature['var'].append(stat.variation(PEL))
stat_feature['en'].append(stat.entropy(PEL))
stat_feature['sp'].append(np.ptp(PEL))
stat_feature['fw'].append(((5/100)*(np.max(PEL)))-((5/100)*(np.min(PEL))))
stat_feature['mu_img'].append(np.mean(img))
stat_feature['med_img'].append(np.median(img))
stat_feature['sum_img'].append(np.sum(img))
stat_feature['sd_img'].append(np.std(img))
for key,value in stat_feature.items():
self.matchDf[key]=value
self.matchDf.fillna(0, inplace=True)
self.matchDf.to_csv(self.pathDic['Matchfile'],encoding='utf-8', index=False)
def computerDatasetStats(self,targetCol,fMean=20, fStd=0.3):
'Compute the normalized and mmad statistics of the target column'
self.normCol(targetCol=targetCol,fMean=fMean,fStd=fStd)
self.mmadCol(targetCol=targetCol,fMean=fMean,fStd=fStd)
self.matchDf[targetCol+'_std_norm']=[eff*fStd+fMean for eff in self.matchDf[targetCol+'_norm']]
self.matchDf[targetCol+'_std_mmad']=[eff*fStd+fMean for eff in self.matchDf[targetCol+'_mmad']]
df = self.matchDf
line=[
self.dataset_id,
df[targetCol].min(),
df[targetCol].max(),
df[targetCol].mean(),
df[targetCol].median(),
df[targetCol].std(),
df[targetCol].mad(),
df[targetCol+'_std_norm'].min(),
df[targetCol+'_std_norm'].max(),
df[targetCol+'_std_norm'].mean(),
df[targetCol+'_std_norm'].median(),
df[targetCol+'_std_norm'].std(),
df[targetCol+'_std_norm'].mad(),
df[targetCol+'_std_mmad'].min(),
df[targetCol+'_std_mmad'].max(),
df[targetCol+'_std_mmad'].mean(),
df[targetCol+'_std_mmad'].median(),
df[targetCol+'_std_mmad'].std(),
df[targetCol+'_std_mmad'].mad(),
]
return line
def addLabels(self,binCol,binTab):
'Bins match dataset binCol column based on binTab and change matchDf in place'
self.binCol = binCol
self.binTab = binTab
ohe = OneHotEncoder(sparse=False,categories='auto')
self.matchDf['Bins'] = | pd.cut(self.matchDf[binCol],binTab,include_lowest=True) | pandas.cut |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_hydrofunctions
----------------------------------
Tests for `hydrofunctions` module.
"""
from __future__ import (
absolute_import,
print_function,
division,
unicode_literals,
)
from unittest import mock
import unittest
import warnings
from pandas.testing import assert_frame_equal
import pandas as pd
import numpy as np
import pyarrow as pa
import json
import hydrofunctions as hf
from .fixtures import (
fakeResponse,
daily_dupe,
daily_dupe_altered,
tzfail,
JSON15min2day,
two_sites_two_params_iv,
nothing_avail,
mult_flags,
diff_freq,
startDST,
endDST,
recent_only,
)
class TestHydrofunctionsParsing(unittest.TestCase):
"""Test the parsing of hf.extract_nwis_df()
test the following:
Can it handle multiple qualifier flags?
how does it encode mult params & mult sites?
Does it raise HydroNoDataError if nothing returned?
"""
def test_hf_extract_nwis_df_accepts_response_obj(self):
fake_response = fakeResponse()
actual_df, actual_dict = hf.extract_nwis_df(fake_response, interpolate=False)
self.assertIsInstance(
actual_df, pd.core.frame.DataFrame, msg="Did not return a df"
)
self.assertIsInstance(actual_dict, dict, msg="Did not return a dict.")
def test_hf_extract_nwis_df_parse_multiple_flags(self):
actual_df, actual_dict = hf.extract_nwis_df(mult_flags, interpolate=False)
self.assertIsInstance(
actual_df, pd.core.frame.DataFrame, msg="Did not return a df"
)
self.assertIsInstance(actual_dict, dict, msg="Did not return a dict.")
def test_hf_extract_nwis_df_parse_two_sites_two_params_iv_return_df(self):
actual_df, actual_dict = hf.extract_nwis_df(
two_sites_two_params_iv, interpolate=False
)
self.assertIs(
type(actual_df), pd.core.frame.DataFrame, msg="Did not return a df"
)
self.assertIs(type(actual_dict), dict, msg="Did not return a dict.")
# TODO: test that data is organized correctly
def test_hf_extract_nwis_df_parse_two_sites_two_params_iv_return_df(self):
actual_df, actual_dict = hf.extract_nwis_df(
two_sites_two_params_iv, interpolate=False
)
actual_len, actual_width = actual_df.shape
self.assertIs(
type(actual_df), pd.core.frame.DataFrame, msg="Did not return a df"
)
self.assertEqual(actual_len, 93, "Wrong length for dataframe")
self.assertEqual(actual_width, 8, "Wrong width for dataframe")
expected_columns = [
"USGS:01541000:00060:00000",
"USGS:01541000:00060:00000_qualifiers",
"USGS:01541000:00065:00000",
"USGS:01541000:00065:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541200:00065:00000_qualifiers",
]
actual_columns = actual_df.columns.values
self.assertCountEqual(
actual_columns, expected_columns, "column names don't match expected"
)
self.assertTrue(actual_df.index.is_unique, "index has repeated values.")
self.assertTrue(actual_df.index.is_monotonic, "index is not monotonic.")
def test_hf_extract_nwis_df_parse_JSON15min2day_return_df(self):
actual_df, actual_dict = hf.extract_nwis_df(JSON15min2day, interpolate=False)
actual_len, actual_width = actual_df.shape
self.assertIs(
type(actual_df), pd.core.frame.DataFrame, msg="Did not return a df"
)
self.assertEqual(actual_len, 192, "Wrong length for dataframe")
self.assertEqual(actual_width, 2, "Wrong width for dataframe")
expected_columns = [
"USGS:03213700:00060:00000",
"USGS:03213700:00060:00000_qualifiers",
]
actual_columns = actual_df.columns.values
self.assertCountEqual(
actual_columns, expected_columns, "column names don't match expected"
)
self.assertTrue(actual_df.index.is_unique, "index has repeated values.")
self.assertTrue(actual_df.index.is_monotonic, "index is not monotonic.")
def test_hf_extract_nwis_df_parse_mult_flags_return_df(self):
actual_df, actual_dict = hf.extract_nwis_df(mult_flags, interpolate=False)
actual_len, actual_width = actual_df.shape
self.assertIs(
type(actual_df), pd.core.frame.DataFrame, msg="Did not return a df"
)
self.assertEqual(actual_len, 480, "Wrong length for dataframe")
self.assertEqual(actual_width, 2, "Wrong width for dataframe")
expected_columns = [
"USGS:01542500:00060:00000",
"USGS:01542500:00060:00000_qualifiers",
]
actual_columns = actual_df.columns.values
self.assertCountEqual(
actual_columns, expected_columns, "column names don't match expected"
)
self.assertTrue(actual_df.index.is_unique, "index has repeated values.")
self.assertTrue(actual_df.index.is_monotonic, "index is not monotonic.")
def test_hf_extract_nwis_raises_exception_when_df_is_empty(self):
empty_response = {"value": {"timeSeries": []}}
with self.assertRaises(hf.HydroNoDataError):
hf.extract_nwis_df(empty_response, interpolate=False)
def test_hf_extract_nwis_raises_exception_when_df_is_empty_nothing_avail(self):
with self.assertRaises(hf.HydroNoDataError):
hf.extract_nwis_df(nothing_avail, interpolate=False)
@unittest.skip(
"assertWarns errors on Linux. See https://bugs.python.org/issue29620"
)
def test_hf_extract_nwis_warns_when_diff_series_have_diff_freq(self):
with self.assertWarns(hf.HydroUserWarning):
hf.extract_nwis_df(diff_freq, interpolate=False)
def test_hf_extract_nwis_accepts_no_startdate_no_period_no_interpolate(self):
actual_df, actual_dict = hf.extract_nwis_df(recent_only, interpolate=False)
expected_shape = (
2,
4,
) # only the most recent data for two parameters, plus qualifiers = 4 columns; 2 rows: different dates.
self.assertEqual(
actual_df.shape,
expected_shape,
"The dataframe should have four columns and two rows.",
)
def test_hf_extract_nwis_accepts_no_startdate_no_period_interpolate(self):
actual_df, actual_dict = hf.extract_nwis_df(recent_only, interpolate=True)
expected_shape = (
2,
4,
) # only the most recent data for two parameters, plus qualifiers = 4 columns; 2 rows: different dates.
self.assertEqual(
actual_df.shape,
expected_shape,
"The dataframe should have four columns and two rows.",
)
def test_hf_extract_nwis_returns_comma_separated_qualifiers_1(self):
actual_df, actual_dict = hf.extract_nwis_df(mult_flags, interpolate=False)
actual_flags_1 = actual_df.loc[
"2019-01-24T10:30:00.000-05:00", "USGS:01542500:00060:00000_qualifiers"
]
expected_flags_1 = "P,e"
self.assertEqual(
actual_flags_1,
expected_flags_1,
"The data qualifier flags were not parsed correctly.",
)
def test_hf_extract_nwis_returns_comma_separated_qualifiers_2(self):
actual_df, actual_dict = hf.extract_nwis_df(mult_flags, interpolate=False)
actual_flags_2 = actual_df.loc[
"2019-01-28T16:00:00.000-05:00", "USGS:01542500:00060:00000_qualifiers"
]
expected_flags_2 = "P,Ice"
self.assertEqual(
actual_flags_2,
expected_flags_2,
"The data qualifier flags were not parsed correctly.",
)
def test_hf_extract_nwis_replaces_NWIS_noDataValue_with_npNan(self):
actual_df, actual_dict = hf.extract_nwis_df(mult_flags, interpolate=False)
actual_nodata = actual_df.loc[
"2019-01-28T16:00:00.000-05:00", "USGS:01542500:00060:00000"
]
self.assertTrue(
np.isnan(actual_nodata),
"The NWIS no data value was not replaced with np.nan. ",
)
def test_hf_extract_nwis_adds_missing_tags(self):
actual_df, actual_dict = hf.extract_nwis_df(mult_flags, interpolate=False)
actual_missing = actual_df.loc[
"2019-01-24 17:00:00-05:00", "USGS:01542500:00060:00000_qualifiers"
]
self.assertEqual(
actual_missing,
"hf.missing",
"Missing records should be given 'hf.missing' _qualifier tags.",
)
def test_hf_extract_nwis_adds_upsample_tags(self):
actual_df, actual_dict = hf.extract_nwis_df(diff_freq, interpolate=False)
actual_upsample = actual_df.loc[
"2018-06-01 00:15:00-04:00", "USGS:01570500:00060:00000_qualifiers"
]
self.assertEqual(
actual_upsample,
"hf.upsampled",
"New records created by upsampling should be given 'hf.upsample' _qualifier tags.",
)
def test_hf_extract_nwis_interpolates(self):
actual_df, actual_dict = hf.extract_nwis_df(diff_freq, interpolate=True)
actual_upsample_interpolate = actual_df.loc[
"2018-06-01 00:15:00-04:00", "USGS:01570500:00060:00000"
]
self.assertEqual(
actual_upsample_interpolate,
42200.0,
"New records created by upsampling should have NaNs replaced with interpolated values.",
)
@unittest.skip("This feature is not implemented yet.")
def test_hf_extract_nwis_interpolates_and_adds_tags(self):
# Ideally, every data value that was interpolated should have a tag
# added to the qualifiers that says it was interpolated.
actual_df, actual_dict = hf.extract_nwis_df(diff_freq, interpolate=True)
actual_upsample_interpolate_flag = actual_df.loc[
"2018-06-01 00:15:00-04:00", "USGS:01570500:00060:00000_qualifiers"
]
expected_flag = "hf.interpolated"
self.assertEqual(
actual_upsample_interpolate_flag,
expected_flag,
"Interpolated values should be marked with a flag.",
)
def test_hf_extract_nwis_corrects_for_start_of_DST(self):
actual_df, actual_dict = hf.extract_nwis_df(startDST, interpolate=False)
actual_len, width = actual_df.shape
expected = 284
self.assertEqual(
actual_len,
expected,
"Three days including the start of DST should have 3 * 24 * 4 = 288 observations, minus 4 = 284",
)
def test_hf_extract_nwis_corrects_for_end_of_DST(self):
actual_df, actual_dict = hf.extract_nwis_df(endDST, interpolate=False)
actual_len, width = actual_df.shape
expected = 292
self.assertEqual(
actual_len,
expected,
"Three days including the end of DST should have 3 * 24 * 4 = 288 observations, plus 4 = 292",
)
def test_hf_extract_nwis_can_find_tz_in_tzfail(self):
actualDF = hf.extract_nwis_df(tzfail, interpolate=False)
def test_hf_extract_nwis_can_deal_with_duplicated_records_as_input(self):
actualDF = hf.extract_nwis_df(daily_dupe, interpolate=False)
def test_hf_extract_nwis_can_deal_with_duplicated_records_that_have_been_altered_as_input(
self,
):
# What happens if a scientist replaces an empty record with new
# estimated data, and forgets to discard the old data?
actualDF = hf.extract_nwis_df(daily_dupe_altered, interpolate=False)
def test_hf_get_nwis_property(self):
sites = None
bBox = (-105.430, 39.655, -104, 39.863)
# TODO: test should be the json for a multiple site request.
names = hf.get_nwis_property(JSON15min2day, key="name")
self.assertIs(type(names), list, msg="Did not return a list")
class TestHydrofunctions(unittest.TestCase):
@mock.patch("requests.get")
def test_hf_get_nwis_calls_correct_url(self, mock_get):
"""
Thanks to
http://engineroom.trackmaven.com/blog/making-a-mockery-of-python/
"""
site = "A"
service = "iv"
start = "C"
end = "D"
expected_url = "https://waterservices.usgs.gov/nwis/" + service + "/?"
expected_headers = {"Accept-encoding": "gzip", "max-age": "120"}
expected_params = {
"format": "json,1.1",
"sites": "A",
"stateCd": None,
"countyCd": None,
"bBox": None,
"parameterCd": None,
"period": None,
"startDT": "C",
"endDT": "D",
}
expected = fakeResponse()
expected.status_code = 200
expected.reason = "any text"
mock_get.return_value = expected
actual = hf.get_nwis(site, service, start, end)
mock_get.assert_called_once_with(
expected_url, params=expected_params, headers=expected_headers
)
self.assertEqual(actual, expected)
@mock.patch("requests.get")
def test_hf_get_nwis_calls_correct_url_multiple_sites(self, mock_get):
site = ["site1", "site2"]
parsed_site = hf.check_parameter_string(site, "site")
service = "iv"
start = "C"
end = "D"
expected_url = "https://waterservices.usgs.gov/nwis/" + service + "/?"
expected_headers = {"max-age": "120", "Accept-encoding": "gzip"}
expected_params = {
"format": "json,1.1",
"sites": parsed_site,
"stateCd": None,
"countyCd": None,
"bBox": None,
"parameterCd": None,
"period": None,
"startDT": "C",
"endDT": "D",
}
expected = fakeResponse()
expected.status_code = 200
expected.reason = "any text"
mock_get.return_value = expected
actual = hf.get_nwis(site, service, start, end)
mock_get.assert_called_once_with(
expected_url, params=expected_params, headers=expected_headers
)
self.assertEqual(actual, expected)
@mock.patch("requests.get")
def test_hf_get_nwis_service_defaults_dv(self, mock_get):
site = "01541200"
expected_service = "dv"
expected_url = "https://waterservices.usgs.gov/nwis/" + expected_service + "/?"
expected_headers = {"max-age": "120", "Accept-encoding": "gzip"}
expected_params = {
"format": "json,1.1",
"sites": site,
"stateCd": None,
"countyCd": None,
"bBox": None,
"parameterCd": None,
"period": None,
"startDT": None,
"endDT": None,
}
expected = fakeResponse()
expected.status_code = 200
expected.reason = "any text"
mock_get.return_value = expected
actual = hf.get_nwis(site)
mock_get.assert_called_once_with(
expected_url, params=expected_params, headers=expected_headers
)
self.assertEqual(actual, expected)
@mock.patch("requests.get")
def test_hf_get_nwis_converts_parameterCd_all_to_None(self, mock_get):
site = "01541200"
service = "iv"
parameterCd = "all"
expected_parameterCd = None
expected_url = "https://waterservices.usgs.gov/nwis/" + service + "/?"
expected_headers = {"max-age": "120", "Accept-encoding": "gzip"}
expected_params = {
"format": "json,1.1",
"sites": site,
"stateCd": None,
"countyCd": None,
"bBox": None,
"parameterCd": None,
"period": None,
"startDT": None,
"endDT": None,
}
expected = fakeResponse()
expected.status_code = 200
expected.reason = "any text"
mock_get.return_value = expected
actual = hf.get_nwis(site, service, parameterCd=parameterCd)
mock_get.assert_called_once_with(
expected_url, params=expected_params, headers=expected_headers
)
self.assertEqual(actual, expected)
def test_hf_get_nwis_raises_ValueError_too_many_locations(self):
with self.assertRaises(ValueError):
hf.get_nwis("01541000", stateCd="MD")
def test_hf_get_nwis_raises_ValueError_start_and_period(self):
with self.assertRaises(ValueError):
hf.get_nwis("01541000", start_date="2014-01-01", period="P1D")
def test_hf_nwis_custom_status_codes_returns_None_for_200(self):
fake = fakeResponse()
fake.status_code = 200
fake.reason = "any text"
fake.url = "any text"
self.assertIsNone(hf.nwis_custom_status_codes(fake))
@unittest.skip(
"assertWarns errors on Linux. See https://bugs.python.org/issue29620"
)
def test_hf_nwis_custom_status_codes_raises_warning_for_non200(self):
expected_status_code = 400
bad_response = fakeResponse(code=expected_status_code)
with self.assertWarns(SyntaxWarning) as cm:
hf.nwis_custom_status_codes(bad_response)
def test_hf_nwis_custom_status_codes_returns_status_for_non200(self):
expected_status_code = 400
bad_response = fakeResponse(code=expected_status_code)
actual = hf.nwis_custom_status_codes(bad_response)
self.assertEqual(actual, expected_status_code)
def test_hf_calc_freq_returns_Timedelta_and_60min(self):
test_index = pd.date_range("2014-12-29", "2015-01-03", freq="60T")
actual = hf.calc_freq(test_index)
self.assertIsInstance(
actual, pd.Timedelta, "Calc_freq() should return pd.Timedelta."
)
expected = pd.Timedelta("60 minutes")
self.assertEqual(
actual, expected, "Calc_freq() should have converted 60T to 60 minutes."
)
def test_hf_calc_freq_accepts_Day(self):
test_index = pd.date_range("2014-12-29", periods=3)
actual = hf.calc_freq(test_index)
self.assertIsInstance(
actual, pd.Timedelta, "Calc_freq() should return pd.Timedelta."
)
expected = pd.Timedelta("1 day")
self.assertEqual(
actual, expected, "Calc_freq() should have found a 1 day frequency."
)
def test_hf_calc_freq_accepts_hour(self):
test_index = pd.date_range("2014-12-29", freq="1H", periods=30)
actual = hf.calc_freq(test_index)
self.assertIsInstance(
actual, pd.Timedelta, "Calc_freq() should return pd.Timedelta."
)
expected = pd.Timedelta("1 hour")
self.assertEqual(
actual, expected, "Calc_freq() should have found a 1 hour frequency."
)
def test_hf_calc_freq_accepts_1Day_1hour(self):
test_index = pd.date_range("2014-12-29", freq="1D1H2T", periods=30)
actual = hf.calc_freq(test_index)
self.assertIsInstance(
actual, pd.Timedelta, "Calc_freq() should return pd.Timedelta."
)
expected = pd.Timedelta("1 day 1 hour 2 minutes")
self.assertEqual(
actual,
expected,
"Calc_freq() should have found a 1 day, 1 hour, 2 minutes frequency.",
)
def test_hf_calc_freq_accepts_freq_None(self):
dates = ["2014-12-20", "2014-12-22", "2014-12-24", "2014-12-26"]
test_index = | pd.DatetimeIndex(dates) | pandas.DatetimeIndex |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
| pd.Timestamp('2015-01-15') | pandas.Timestamp |
import numpy as np
import pandas as pd
color_file_path = "features/"+"DB"+"/"+"DB2"+"_"+"color"+".csv"
type_file_path = "features/"+"DB"+"/"+"DB2"+"_"+"type"+".csv"
color_f = np.loadtxt(color_file_path, delimiter=",",dtype="float32")
type_f = np.loadtxt(type_file_path, delimiter=",",dtype="float32")
black_color_f = color_f[:8]
blue_color_f = color_f[8:16]
white_color_f = color_f[16:24]
red_color_f = color_f[24:32]
black_type_f,blue_type_f,white_type_f,red_type_f = np.split(type_f[:32], 4)
x = np.array([np.concatenate([a,b]) for (a,b) in zip(color_f, type_f)])
black_x_f,blue_x_f,white_x_f,red_x_f = np.split(x[:32], 4)
output = [[0]*4 for i in range(7)]
for i, x_ in enumerate([black_x_f,blue_x_f,white_x_f,red_x_f]):
output[0][i] = np.average(np.std(x_,axis=0))
output[1][i] = np.average(np.std(x_[[0,2,3,4,5]],axis=0))
output[2][i] = np.average(np.std(x_[[1,2,3,6,7]],axis=0))
output[3][i] = np.average(np.std(x_[[0,1,3,5,7]],axis=0))
output[4][i] = np.average(np.std(x_[[0,1,2,4,6]],axis=0))
output[5][i] = np.average(np.std(x[:32],axis=0))
output[6][i] = np.average(np.std(x,axis=0))
output = np.array(output)
output = | pd.DataFrame(output) | pandas.DataFrame |
import altair as alt
import altair_viewer as view
import pandas as pd
import numpy as np
import sklearn
import scipy.stats.stats
import data_cat
import mushroom_class_fix
import util_func
from altair import pipe, limit_rows, to_values
t = lambda data: pipe(data, limit_rows(max_rows=100000), to_values)
alt.data_transformers.register('custom', t)
alt.data_transformers.enable('custom')
def get_balance_chart(data, **kwargs):
"""
Parameters
----------
data: pandas.DataFrame
DataFrame with nominal or metrical columns
kwargs:
title: str, default="Balance plot",
altair.Chart title
count: bool, default=True,
adds the percentage values of the class values to the title
reindex: list of strs or False, default=False,
nothing if False, else reindexes the class values according to the given list
Returns
-------
var name=chart: altair.Chart,
bar plot of the class value occurences
"""
if 'title' not in kwargs:
kwargs['title'] = "Balance plot"
if 'count' not in kwargs:
kwargs['count'] = True
if 'reindex' not in kwargs:
kwargs['reindex'] = False
if kwargs['count']:
size = len(data)
val_counts = data['class'].value_counts()
if kwargs['reindex']:
val_counts = val_counts.reindex(kwargs['reindex'])
kwargs['title'] += " ("
for val in val_counts.index:
ratio = val_counts[val] / size
kwargs['title'] = "".join([kwargs['title'], val, ": %0.2f" % ratio, ", "])
kwargs['title'] = "".join([kwargs['title'][:-2], ")"])
chart = alt.Chart(data, title=kwargs['title']).mark_bar(size=150).encode(
alt.X('class:N', sort='descending'),
alt.Y('count():Q'),
color=alt.value('grey')
).properties(width=400)
return chart
from dython import nominal
def get_correlation_dataframe(data, **kwargs):
"""
Parameters
----------
data: pandas.DataFrame
DataFrame with nominal or metrical columns
kwargs:
show_progress: bool, default=False
Prints each row if True
Returns
-------
var name=data_corr: pandas.DataFrame,
with two column names and their correlation
"""
if 'show_progress' not in kwargs:
kwargs['show_progress'] = False
data_corr = pd.DataFrame(columns=['variable1', 'variable2', 'correlation', 'correlation_rounded'])
for variable1 in data:
for variable2 in data:
# nominal-nominal -> Theils U
if type(data[variable1][0]) == str and type(data[variable2][0]) == str:
corr = nominal.theils_u(data[variable1], data[variable2], nan_replace_value='f')
# metircal-metrical -> Pearsons R
elif util_func.is_number(data[variable1][0]) and util_func.is_number(data[variable2][0]):
corr = scipy.stats.stats.pearsonr(data[variable1], data[variable2])[0]
# change range from [-1, 1] to [0, 1] as the other metrics
corr = (corr + 1) / 2
# metrical-nominal -> correlation ratio
elif type(data[variable1][0]) == str and util_func.is_number(data[variable2][0]):
corr = nominal.correlation_ratio(data[variable1], data[variable2], nan_replace_value='f')
elif type(data[variable2][0]) == str and util_func.is_number(data[variable1][0]):
corr = nominal.correlation_ratio(data[variable2], data[variable1], nan_replace_value='f')
else:
print('var1-type: ' + str(type(data[variable1][0])) + ', var2-type: ' + str(type(data[variable2][0])))
print('var1: ' + str(data[variable1][0]) + ', var2: ' + str(data[variable2][0]))
new_row = {'variable1': variable1, 'variable2': variable2,
'correlation': corr, 'correlation_rounded': round(corr, 2)}
data_corr = data_corr.append(new_row, ignore_index=True)
if kwargs['show_progress']:
print(new_row)
return data_corr
def get_correlation_chart(data, **kwargs):
"""
Parameters
----------
data: pandas.DataFrame
data with nominal or metrical columns
kwargs:
show_progress: bool, default=False,
prints each row if True
Returns
-------
altair.Chart,
correlation heatmap of the data columns based on get_correlation_dataframe
"""
if 'show_progress' not in kwargs:
kwargs['show_progress'] = False
data_corr = get_correlation_dataframe(data, show_progress=kwargs['show_progress'])
base_chart = alt.Chart(data_corr).encode(
alt.X('variable1:N', sort=data.columns.values),
alt.Y('variable2:N', sort=data.columns.values)
)
corr_chart = base_chart.mark_rect().encode(
alt.Color('correlation:Q', scale=alt.Scale(scheme='greys')),
)
text_chart = base_chart.mark_text().encode(
alt.Text('correlation_rounded:Q'),
color = (alt.condition(
alt.datum.correlation > 0.5,
alt.value('white'),
alt.value('black')
))
)
return corr_chart + text_chart
def get_score_threshold_dataframe(X_train, X_test, y_train, y_test, mode, score):
"""
Parameters
----------
X_train: pandas.DataFrame, attributes without class of the training set
X_test: pandas.DataFrame, attributes without class of the test set
y_train: numpy.ndarray, class of the training set
y_test: numpy.ndarray, class of the test set
mode: str, used classifier, look at mushroom_class_fix.train_model for details
score: str, used scoring method, look at mushroom_class_fix.get_evaluation_scores_dict for details
Returns
-------
var name=data: pandas.DataFrame,
with a threshold column from [0; 1] in 0.1 steps
and a score column with the calculated score for each threshold using mushroom_class_fix.get_y_prob_pred
"""
data = pd.DataFrame(columns=['scores', 'thresholds'], dtype=np.float64)
data.thresholds = [t / 1000 for t in range(0, 1001, 10)]
model = mushroom_class_fix.train_model(X_train, y_train, mode)
scores = []
for threshold in data.thresholds:
y_prob, y_pred = mushroom_class_fix.get_y_prob_pred(X_test, model, threshold=threshold)
scores.append(mushroom_class_fix.get_evaluation_scores_dict(y_test, y_pred, print=False)[score])
data.scores = scores
return data
def get_score_threshold_chart(X_train, X_test, y_train, y_test, mode, score):
"""
Parameters
----------
explained in get_score_threshold_dataframe
Returns
-------
altair.Chart,
threshold scoring plot (to choose the threshold for the best scoring) using get_score_threshold_dataframe
"""
data = get_score_threshold_dataframe(X_train, X_test, y_train, y_test, mode, score)
title = ''.join(['Score-threshold-plot ', mode, ' ', score])
chart = alt.Chart(data, title=title).mark_line().encode(
alt.X('thresholds:Q'),
alt.Y('scores:Q'),
color=alt.value('black')
)
return chart
def get_roc_dataframe(X_train, X_test, y_train, y_test, mode):
"""
Parameters
----------
X_train: pandas.DataFrame, attributes without class of the training set
X_test: pandas.DataFrame, attributes without class of the test set
y_train: numpy.ndarray, class of the training set
y_test: numpy.ndarray, class of the test set
mode: str, used classifier, look at mushroom_class_fix.train_model for details
Returns
-------
var name=data_roc: pandas.DataFrame,
contains the necessary columns for a ROC plot TPR, FPR and threshold
"""
data_roc = | pd.DataFrame(columns=['tpr', 'fpr', 'threshold'], dtype=np.float64) | pandas.DataFrame |
import json
from typing import Tuple, Union
import pandas as pd
import numpy as np
import re
import os
from tableone import TableOne
from collections import defaultdict
from io import StringIO
from .gene_patterns import *
import plotly.express as px
import pypeta
from pypeta import Peta
from pypeta import filter_description
class SampleIdError(RuntimeError):
def __init__(self, sample_id: str, message: str):
self.sample_id = sample_id
self.message = message
class NotNumericSeriesError(RuntimeError):
def __init__(self, message: str):
self.message = message
class UnknowSelectionTypeError(RuntimeError):
def __init__(self, message: str):
self.message = message
class NotInColumnError(RuntimeError):
def __init__(self, message: str):
self.message = message
class GenesRelationError(RuntimeError):
def __init__(self, message: str):
self.message = message
class VariantUndefinedError(RuntimeError):
def __init__(self, message: str):
self.message = message
class ListsUnEqualLengthError(RuntimeError):
def __init__(self, message: str):
self.message = message
class DatetimeFormatError(RuntimeError):
def __init__(self, message: str):
self.message = message
class CDx_Data():
"""[summary]
"""
def __init__(self,
mut_df: pd.DataFrame = None,
cli_df: pd.DataFrame = None,
cnv_df: pd.DataFrame = None,
sv_df: pd.DataFrame = None,
json_str: str = None):
"""Constructor method with DataFrames
Args:
mut_df (pd.DataFrame, optional): SNV and InDel info. Defaults to None.
cli_df (pd.DataFrame, optional): Clinical info. Defaults to None.
cnv_df (pd.DataFrame, optional): CNV info. Defaults to None.
sv_df (pd.DataFrame, optional): SV info. Defaults to None.
"""
self.json_str = json_str
self.mut = mut_df
self.cnv = cnv_df
self.sv = sv_df
if not cli_df is None:
self.cli = cli_df
self.cli = self._infer_datetime_columns()
else:
self._set_cli()
self.crosstab = self.get_crosstab()
def __len__(self):
return 0 if self.cli is None else len(self.cli)
def __getitem__(self, n):
return self.select_by_sample_ids([self.cli.sampleId.iloc[n]])
def __sub__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = None if self.cli is None and cdx.cli is None else pd.concat(
[self.cli, cdx.cli]).drop_duplicates(keep=False)
mut = None if self.mut is None and cdx.mut is None else pd.concat(
[self.mut, cdx.mut]).drop_duplicates(keep=False)
cnv = None if self.cnv is None and cdx.cnv is None else pd.concat(
[self.cnv, cdx.cnv]).drop_duplicates(keep=False)
sv = None if self.sv is None and cdx.sv is None else pd.concat(
[self.sv, cdx.sv]).drop_duplicates(keep=False)
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def __add__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = pd.concat([self.cli, cdx.cli]).drop_duplicates()
mut = pd.concat([self.mut, cdx.mut]).drop_duplicates()
cnv = pd.concat([self.cnv, cdx.cnv]).drop_duplicates()
sv = pd.concat([self.sv, cdx.sv]).drop_duplicates()
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def from_PETA(self,
token: str,
json_str: str,
host='https://peta.bgi.com/api'):
"""Retrieve CDx data from BGI-PETA database.
Args:
token (str): Effective token for BGI-PETA database
json_str (str): The json format restrictions communicating to the database
"""
self.json_str = json_str
peta = Peta(token=token, host=host)
peta.set_data_restriction_from_json_string(json_str)
# peta.fetch_clinical_data() does`not process dtype inference correctly, do manully.
#self.cli = peta.fetch_clinical_data()
self.cli = pd.read_csv(
StringIO(peta.fetch_clinical_data().to_csv(None, index=False)))
self.mut = peta.fetch_mutation_data()
self.cnv = peta.fetch_cnv_data()
self.sv = peta.fetch_sv_data()
# dedup for the same sampleId in different studyIds, discard the duplicated ones from all tables
cli_original = self.cli
self.cli = self.cli.drop_duplicates('sampleId')
if (len(self.cli) < len(cli_original)):
print('Duplicated sampleId exists, drop duplicates and go on')
undup_tuple = [(x, y)
for x, y in zip(self.cli.sampleId, self.cli.studyId)]
self.sv = self.sv[self.sv.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
self.cnv = self.cnv[self.cnv.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
self.mut = self.mut[self.mut.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
# time series
self.cli = self._infer_datetime_columns()
self.crosstab = self.get_crosstab()
return filter_description(json_str)
def filter_description(self):
"""retrun filter description when data load from PETA
Returns:
str: description
"""
return filter_description(self.json_str) if self.json_str else None
def from_file(self,
mut_f: str = None,
cli_f: str = None,
cnv_f: str = None,
sv_f: str = None):
"""Get CDx data from files.
Args:
mut_f (str, optional): File as NCBI MAF format contains SNV and InDel. Defaults to None.
cli_f (str, optional): File name contains clinical info. Defaults to None.
cnv_f (str, optional): File name contains CNV info. Defaults to None.
sv_f (str, optional): File name contains SV info. Defaults to None.
"""
if not mut_f is None:
self.mut = pd.read_csv(mut_f, sep='\t')
if not cnv_f is None:
self.cnv = pd.read_csv(cnv_f, sep='\t')
if not sv_f is None:
self.sv = pd.read_csv(sv_f, sep='\t')
if not cli_f is None:
self.cli = pd.read_csv(cli_f, sep='\t')
else:
self._set_cli()
self.cli = self._infer_datetime_columns()
self.crosstab = self.get_crosstab()
def to_tsvs(self, path: str = './'):
"""Write CDx_Data properties to 4 seprated files
Args:
path (str, optional): Path to write files. Defaults to './'.
"""
if not self.cli is None:
self.cli.to_csv(os.path.join(path, 'sample_info.txt'),
index=None,
sep='\t')
if not self.mut is None:
self.mut.to_csv(os.path.join(path, 'mut_info.txt'),
index=None,
sep='\t')
if not self.cnv is None:
self.cnv.to_csv(os.path.join(path, 'cnv_info.txt'),
index=None,
sep='\t')
if not self.sv is None:
self.sv.to_csv(os.path.join(path, 'fusion_info.txt'),
index=None,
sep='\t')
def to_excel(self, filename: str = './output.xlsx'):
"""Write CDx_Data properties to excel file
Args:
filename (str, optional): target filename. Defaults to './output.xlsx'.
"""
if not filename.endswith('xlsx'):
filename = filename + '.xlsx'
with pd.ExcelWriter(filename) as ew:
if not self.cli is None:
self.cli.to_excel(ew, sheet_name='clinical', index=None)
if not self.mut is None:
self.mut.to_excel(ew, sheet_name='mutations', index=None)
if not self.cnv is None:
self.cnv.to_excel(ew, sheet_name='cnv', index=None)
if not self.sv is None:
self.sv.to_excel(ew, sheet_name='sv', index=None)
def _set_cli(self):
"""Set the cli attribute, generate a void DataFrame when it is not specified.
"""
sample_id_series = []
if not self.mut is None:
sample_id_series.append(
self.mut['Tumor_Sample_Barcode'].drop_duplicates())
if not self.cnv is None:
sample_id_series.append(
self.cnv['Tumor_Sample_Barcode'].drop_duplicates())
if not self.sv is None:
sample_id_series.append(
self.sv['Tumor_Sample_Barcode'].drop_duplicates())
if len(sample_id_series) > 0:
self.cli = pd.DataFrame({
'sampleId': pd.concat(sample_id_series)
}).drop_duplicates()
else:
self.cli = None
def _infer_datetime_columns(self) -> pd.DataFrame:
"""To infer the datetime_columns and astype to datetime64 format
Returns:
pd.DataFrame: CDx.cli dataframe
"""
cli = self.cli
for column in cli.columns:
if column.endswith('DATE'):
try:
cli[column] = pd.to_datetime(cli[column])
except Exception as e:
raise DatetimeFormatError(
f'{column} column end with "DATE" can not be transformed to datetime format'
)
return cli
def get_crosstab(self) -> pd.DataFrame:
"""Generate a Gene vs. Sample_id cross table.
Raises:
SampleIdError: Sample id from the mut, cnv or sv which not exsits in the cli table.
Returns:
pd.DataFrame: CDx_Data.
"""
# 这里cli表中不允许存在相同的样本编号。会造成crosstab的列中存在重复,引入Series的boolen值无法处理的问题
if (self.cli is None) or (len(self.cli) == 0):
return pd.DataFrame([])
sub_dfs = []
# cli
cli_crosstab = self.cli.copy().set_index('sampleId').T
cli_crosstab['track_type'] = 'CLINICAL'
sub_dfs.append(cli_crosstab)
# mut. represent by cHgvs, joined by '|' for mulitple hit
if (not self.mut is None) and (len(self.mut) != 0):
mut_undup = self.mut[[
'Hugo_Symbol', 'Tumor_Sample_Barcode', 'HGVSp_Short'
]].groupby([
'Hugo_Symbol', 'Tumor_Sample_Barcode'
])['HGVSp_Short'].apply(lambda x: '|'.join(x)).reset_index()
mut_crosstab = mut_undup.pivot('Hugo_Symbol',
'Tumor_Sample_Barcode',
'HGVSp_Short')
mut_crosstab['track_type'] = 'MUTATIONS'
sub_dfs.append(mut_crosstab)
# cnv. represent by gain or loss. at first use the virtual column "copy_Num"
if (not self.cnv is None) and (len(self.cnv) != 0):
cnv_undup = self.cnv[[
'Hugo_Symbol', 'Tumor_Sample_Barcode', 'status'
]].groupby([
'Hugo_Symbol', 'Tumor_Sample_Barcode'
])['status'].apply(lambda x: '|'.join(x)).reset_index()
cnv_crosstab = cnv_undup.pivot('Hugo_Symbol',
'Tumor_Sample_Barcode', 'status')
cnv_crosstab['track_type'] = 'CNV'
sub_dfs.append(cnv_crosstab)
# sv. represent by gene1 and gene2 combination. explode one record into 2 lines.
if (not self.sv is None) and (len(self.sv) != 0):
sv_undup = pd.concat([
self.sv,
self.sv.rename(columns={
'gene1': 'gene2',
'gene2': 'gene1'
})
])[['gene1', 'Tumor_Sample_Barcode', 'gene2']].groupby([
'gene1', 'Tumor_Sample_Barcode'
])['gene2'].apply(lambda x: '|'.join(x)).reset_index()
sv_crosstab = sv_undup.pivot('gene1', 'Tumor_Sample_Barcode',
'gene2')
sv_crosstab['track_type'] = 'FUSION'
sub_dfs.append(sv_crosstab)
# pandas does not support reindex with duplicated index, so turn into multiIndex
crosstab = pd.concat(sub_dfs)
crosstab = crosstab.set_index('track_type', append=True)
crosstab = crosstab.swaplevel()
return crosstab
#如何构建通用的选择接口,通过变异、基因、癌种等进行选择,并支持“或”和“且”的逻辑运算
#该接口至关重要,对变异入选条件的选择会影响到crosstab,
#选择后返回一个新的CDX_Data对象
def select(self, conditions: dict = {}, update=True):
"""A universe interface to select data via different conditions.
Args:
conditions (dict, optional): Each key represent one column`s name of the CDx_Data attributes. Defaults to {}.
update (bool, optional): [description]. Defaults to True.
"""
return self
# 数据选择的辅助函数
def _numeric_selector(self, ser: pd.Series, range: str) -> pd.Series:
"""Compute a comparition expression on a numeric Series
Args:
ser (pd.Series): Numeric Series.
range (str): comparition expression like 'x>5'. 'x' is mandatory and represent the input.
Raises:
NotNumericSeriesError: Input Series`s dtype is not a numeric type.
Returns:
pd.Series: Series with boolean values.
"""
if ser.dtype == 'object':
raise NotNumericSeriesError(f'{ser.name} is not numeric')
#return ser.map(lambda x: eval(re.sub(r'x', str(x), range)))
return eval(re.sub(r'x', 'ser', range))
def _catagory_selector(self, ser: pd.Series, range: list) -> pd.Series:
"""Return True if the Series` value in the input range list.
Args:
ser (pd.Series): Catagory Series.
range (list): List of target options.
Returns:
pd.Series: Series with boolean values
"""
return ser.isin(range)
def _selector(self, df: pd.DataFrame, selections: dict) -> pd.DataFrame:
"""Filter the input DataFrame via the dict of conditions.
Args:
df (pd.DataFrame): Input.
selections (dict): Dict format of conditions like "{'Cancer_type':['lung','CRC'],'Age':'x>5'}".
The keys represent a column in the input DataFrame.
The list values represent a catagory target and str values represent a numeric target.
Raises:
NotInColumnError: Key in the dict is not in the df`s columns.
UnknowSelectionTypeError: The type of value in the dict is not str nor list.
Returns:
pd.DataFrame: Filterd DataFrame
"""
columns = df.columns
for key, value in selections.items():
if key not in columns:
raise NotInColumnError(f'{key} is not in the columns')
if isinstance(value, str):
df = df[self._numeric_selector(df[key], value)]
elif isinstance(value, list):
df = df[self._catagory_selector(df[key], value)]
else:
raise UnknowSelectionTypeError(
f'{selections} have values not str nor list')
return df
def _fuzzy_id(self, regex: re.Pattern, text: str) -> str:
"""transform a sample id into fuzzy mode according the regex pattern
Args:
regex (re.Pattern): The info retains are in the capture patterns
text (str): input sample id
Returns:
str: fuzzy mode sample id
"""
matches = regex.findall(text)
if matches:
text = '_'.join(matches[0])
return text
def select_by_sample_ids(self,
sample_ids: list,
fuzzy: bool = False,
regex_str: str = r'(\d+)[A-Z](\d+)',
study_ids: list = []):
"""Select samples via a list of sample IDs.
Args:
sample_ids (list): sample ids list.
fuzzy (bool): fuzzy mode.
regex_str (str): The match principle for fuzzy match. The info in the regex capture patterns must be matched for a certifired record. Default for r'(\d+)[A-Z](\d+)'.
study_ids: (list): The corresponding study id of each sample ids. Length of sample_ids and study_ids must be the same.
Raises:
ListsUnEqualLengthError: Length of sample_ids and study_ids are not equal.
Returns:
CDx: CDx object of selected samples.
"""
if fuzzy:
regex = re.compile(regex_str)
# fuzzy the input ids
target_ids = []
fuzzy_to_origin = defaultdict(list)
transform = lambda x: self._fuzzy_id(regex, x)
for sample_id in sample_ids:
fuzzy_sample_id = self._fuzzy_id(regex, sample_id)
fuzzy_to_origin[fuzzy_sample_id].append(sample_id)
target_ids.append(fuzzy_sample_id)
else:
target_ids = sample_ids
transform = lambda x: x
# match
sample_id_bool = self.cli['sampleId'].map(transform).isin(target_ids)
# no match, return immediately
if not sample_id_bool.any():
return CDx_Data()
# with study ids
if len(study_ids):
if len(study_ids) != len(sample_ids):
raise ListsUnEqualLengthError('Error')
sub_cli_df = self.cli[sample_id_bool]
study_id_bool = sub_cli_df.apply(
lambda x: x['studyId'] == study_ids[target_ids.index(
transform(x['sampleId']))],
axis=1)
sample_id_bool = sample_id_bool & study_id_bool
# construct new CDx_Data object
# CDx_Data always have a cli
cli_df = self.cli[sample_id_bool].copy()
# add a column of query ids for fuzzy match
# multi hit represent as a string
if fuzzy:
cli_df['queryId'] = cli_df['sampleId'].map(
lambda x: ','.join(fuzzy_to_origin[transform(x)])).copy()
if not self.mut is None and len(self.mut) != 0:
mut_df = self.mut[self.mut['Tumor_Sample_Barcode'].isin(
cli_df['sampleId'])].copy()
else:
mut_df = None
if not self.cnv is None and len(self.cnv) != 0:
cnv_df = self.cnv[self.cnv['Tumor_Sample_Barcode'].isin(
cli_df['sampleId'])].copy()
else:
cnv_df = None
if not self.sv is None and len(self.sv) != 0:
sv_df = self.sv[self.sv['Tumor_Sample_Barcode'].isin(
cli_df['sampleId'])].copy()
else:
sv_df = None
return CDx_Data(cli_df=cli_df,
mut_df=mut_df,
cnv_df=cnv_df,
sv_df=sv_df)
#
def set_mut_eligibility(self, **kwargs):
"""Set threshold for SNV/InDels to regrard as a positive sample
Raises:
VariantUndefinedError: mut info not provided by user.
Returns:
CDx_Data: CDx_Data object
"""
if self.mut is None or len(self.mut) == 0:
mut = None
else:
mut = self._selector(self.mut, kwargs)
return CDx_Data(cli_df=self.cli,
mut_df=mut,
cnv_df=self.cnv,
sv_df=self.sv)
def set_cnv_eligibility(self, **kwargs):
"""Set threshold for CNV to regrard as a positive sample.
Raises:
VariantUndefinedError: cnv info not provided by user.
Returns:
CDx_Data: CDx_Data object.
"""
if self.cnv is None or len(self.cnv) == 0:
cnv = None
else:
cnv = self._selector(self.cnv, kwargs)
return CDx_Data(cli_df=self.cli,
mut_df=self.mut,
cnv_df=cnv,
sv_df=self.sv)
def set_sv_eligibility(self, **kwargs):
"""Set threshold for SV to regrard as a positive sample.
Raises:
VariantUndefinedError: SV info not provided by user.
Returns:
CDx_Data: CDx_Data object.
"""
if self.sv is None or len(self.sv) == 0:
sv = None
else:
sv = self._selector(self.sv, kwargs)
return CDx_Data(cli_df=self.cli,
mut_df=self.mut,
cnv_df=self.cnv,
sv_df=sv)
# 指定一个列名,再指定范围。离散型用数组,数值型
# attrdict={'Cancer_type':['lung','CRC'],'Age':'x>5'}
def select_samples_by_clinical_attributes2(self, attr_dict: dict):
"""Select samples via a set of conditions corresponding to the columns in the cli DataFrame.
Args:
attr_dict (dict): Dict format of conditions like "{'Cancer_type':['lung','CRC'],'Age':'x>5'}".
The keys represent a column in the input DataFrame.
The list values represent a catagory target and str values represent a numeric target.
Returns:
CDx: CDx object of selected samples.
"""
cli_df = self._selector(self.cli, attr_dict)
return self.select_by_sample_ids(cli_df['sampleId'])
def select_samples_by_clinical_attributes(self, **kwargs):
"""Select samples via a set of conditions corresponding to the columns in the cli DataFrame.
Args:
Keywords arguments with each key represent a column in the input DataFrame.
like "Cancer_type=['lung','CRC'], Age='x>5'"
The list values represent a catagory target and str values represent a numeric target.
Returns:
CDx: CDx object of selected samples.
"""
cli_df = self._selector(self.cli, kwargs)
return self.select_by_sample_ids(cli_df['sampleId'])
def select_samples_by_date_attributes(
self,
column_name: str = 'SAMPLE_RECEIVED_DATE',
start='',
end: str = '',
days: int = 0,
period: str = '',
):
"""Select samples using a datetime attribute in the cli dataframe
Args:
column_name (str, optional): Column used in the cli dataframe. Defaults to 'SAMPLE_RECEIVED_DATE'.
from (str, optional): Time start point. Defaults to ''.
to (str, optional): Time end point. Defaults to ''.
days (int, optional): Days lasts. Defaults to ''.
exact (str, optional): Exact range,eg '202005' for May in 2020 or '2021' for the whole year. Defaults to ''.
"""
date_ser = self.cli.set_index(column_name)['sampleId']
if period:
cdx = self.select_by_sample_ids(date_ser[period])
elif start and end:
cdx = self.select_by_sample_ids(date_ser[start:end])
elif start and days:
cdx = self.select_by_sample_ids(date_ser[start:(
pd.to_datetime(start) +
pd.to_timedelta(days, 'D')).strftime("%Y-%m-%d")])
elif end and days:
cdx = self.select_by_sample_ids(date_ser[(
pd.to_datetime(end) -
pd.to_timedelta(days, 'D')).strftime("%Y-%m-%d"):end])
return cdx
# 对阳性样本进行选取。基因组合,且或关系,chgvs和ghgvs,基因系列如MMR、HR等
# 基因组合可以做为入参数组来传入
def select_samples_by_mutate_genes(
self,
genes: list = [],
variant_type: list = ['MUTATIONS', 'CNV', 'FUSION'],
how='or'):
"""Select sample via positve variant genes.
Args:
genes (list): Gene Hugo names. Defaults to [] for all mutated genes
variant_type (list, optional): Combination of MUTATIONS, CNV and SV. Defaults to ['MUTATIONS', 'CNV', 'SV'].
how (str, optional): 'and' for variant in all genes, 'or' for variant in either genes. Defaults to 'or'.
Raises:
GenesRelationError: Value of how is not 'and' nor 'or'.
Returns:
CDx: CDx object of selected samples.
"""
variant_crosstab = self.crosstab.reindex(index=variant_type, level=0)
if len(genes) != 0:
variant_crosstab = variant_crosstab.reindex(index=genes, level=1)
# Certain variant_types or genes get a empty table. all.() bug
if len(variant_crosstab) == 0:
return CDx_Data()
gene_num = len(
pd.DataFrame(list(
variant_crosstab.index)).iloc[:, 1].drop_duplicates())
if how == 'or':
is_posi_sample = variant_crosstab.apply(
lambda x: any(pd.notnull(x)))
elif how == 'and':
# reindex multiindex bug
if len(genes) != 0 and len(genes) != gene_num:
return CDx_Data()
is_posi_sample = variant_crosstab.apply(
lambda x: all(pd.notnull(x)))
else:
raise GenesRelationError(
f'value of "how" must be "or" or "and", here comes "{how}"')
# the last column is "track_type"
sample_ids = is_posi_sample[is_posi_sample].index
return self.select_by_sample_ids(sample_ids)
# Analysis
def tableone(self, **kwargs) -> TableOne:
"""Generate summary table1 using tableone library. Please refer to https://github.com/tompollard/tableone
Args:
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
min_max: list, optional
List of variables that should report minimum and maximum, instead of
standard deviation (for normal) or Q1-Q3 (for non-normal).
pval : bool, optional
Display computed P-Values (default: False).
pval_adjust : str, optional
Method used to adjust P-Values for multiple testing.
The P-values from the unadjusted table (default when pval=True)
are adjusted to account for the number of total tests that were performed.
These adjustments would be useful when many variables are being screened
to assess if their distribution varies by the variable in the groupby argument.
For a complete list of methods, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
htest_name : bool, optional
Display a column with the names of hypothesis tests (default: False).
htest : dict, optional
Dictionary of custom hypothesis tests. Keys are variable names and
values are functions. Functions must take a list of Numpy Arrays as
the input argument and must return a test result.
e.g. htest = {'age': myfunc}
missing : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
rename : dict, optional
Dictionary of alternative names for variables.
e.g. `rename = {'sex':'gender', 'trt':'treatment'}`
sort : bool or str, optional
If `True`, sort the variables alphabetically. If a string
(e.g. `'P-Value'`), sort by the specified column in ascending order.
Default (`False`) retains the sequence specified in the `columns`
argument. Currently the only columns supported are: `'Missing'`,
`'P-Value'`, `'P-Value (adjusted)'`, and `'Test'`.
limit : int or dict, optional
Limit to the top N most frequent categories. If int, apply to all
categorical variables. If dict, apply to the key (e.g. {'sex': 1}).
order : dict, optional
Specify an order for categorical variables. Key is the variable, value
is a list of values in order. {e.g. 'sex': ['f', 'm', 'other']}
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: True).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`)
applies the rule per variable, defaulting to 1 place for unspecified
variables. For continuous variables, applies to all summary statistics
(e.g. mean and standard deviation). For categorical variables, applies
to percentage only.
overall : bool, optional
If True, add an "overall" column to the table. Smd and p-value
calculations are performed only using stratified columns.
display_all : bool, optional
If True, set pd. display_options to display all columns and rows.
(default: False)
dip_test : bool, optional
Run Hartigan's Dip Test for multimodality. If variables are found to
have multimodal distributions, a remark will be added below the Table 1.
(default: False)
normal_test : bool, optional
Test the null hypothesis that a sample come from a normal distribution.
Uses scipy.stats.normaltest. If variables are found to have non-normal
distributions, a remark will be added below the Table 1.
(default: False)
tukey_test : bool, optional
Run Tukey's test for far outliers. If variables are found to
have far outliers, a remark will be added below the Table 1.
(default: False)
Returns:
pd.DataFrame: Summary of the Data
"""
table1 = TableOne(self.cli, **kwargs)
return table1
def pathway(self):
pass
def pinpoint(self):
pass
def oncoprint(self):
pass
def survival(self):
pass
def plot_gene_variant_rate(self, genes=genes_688):
freq_mut_s = self.test_positive_rate(
genes_to_observe=genes,
groupby_genes=True,
variant_type_to_observe=['MUTATIONS']) * 100
freq_cnv_s = self.test_positive_rate(genes_to_observe=genes,
groupby_genes=True,
variant_type_to_observe=['CNV'
]) * 100
freq_sv_s = self.test_positive_rate(genes_to_observe=genes,
groupby_genes=True,
variant_type_to_observe=['FUSION'
]) * 100
#判定是否三种变异类型是0并处理
avalible_s = []
variantlist = [freq_mut_s, freq_cnv_s, freq_sv_s]
for i, x in enumerate(variantlist):
if len(x) != 0:
avalible_s.append(x)
if len(avalible_s) == 0:
return 'no data'
if len(freq_mut_s) == 0:
freq_mut_s = pd.Series([0] * len(avalible_s[0]),
index=avalible_s[0].index)
if len(freq_cnv_s) == 0:
freq_cnv_s = pd.Series([0] * len(avalible_s[0]),
index=avalible_s[0].index)
if len(freq_sv_s) == 0:
freq_sv_s = pd.Series([0] * len(avalible_s[0]),
index=avalible_s[0].index)
freq_s = pd.DataFrame({
'Mutations': freq_mut_s,
'CNV': freq_cnv_s,
'SV': freq_sv_s
}).fillna(0)
freq_s['total'] = freq_s.sum(axis=1)
freq_s = freq_s.sort_values(by='total', ascending=False)
fig = px.bar(
freq_s,
x=freq_s.index,
y=['Mutations', 'CNV', 'SV'],
)
#fig.update_traces(texttemplate='%{text:.2%}', textposition='outside',)
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
fig.layout.xaxis.title.text = None
fig.layout.yaxis.title.text = '检出率(%)'
fig.layout.legend.title.text = None
return fig
# 画图的程序是否内置?
def test_positive_rate(
self,
groupby='',
groupby_genes=False,
groupby_variant_type=False,
genes_to_observe=[],
variant_type_to_observe=['MUTATIONS', 'CNV', 'FUSION']):
"""Calculate the positvie rate for CDx object in user defined way
Args:
groupby (str, optional): Column name in the CDx_Data.cli DataFrame. Defaults to ''.
groupby_genes (bool, optional): Groupby mutate genes. Defaults to False.
groupby_variant_type (bool, optional): Groupby variant type, including MUTATIONS, CNV and SV. Defaults to False.
genes_to_observe (list, optional): Genes list that should be considered. Defaults to [].
variant_type_to_observe (list, optional): Variant type that shoud be considered. Defaults to ['MUTATIONS','CNV','SV'].
Returns:
Union[float,pd.Series]: A pd.Series when groupby options passed, a float value when not.
"""
# empty CDx
if len(self) == 0:
return pd.Series([], dtype='float64')
crosstab = self.crosstab.reindex(index=variant_type_to_observe,
level=0)
if genes_to_observe:
crosstab = crosstab.reindex(index=genes_to_observe, level=1)
test_posi_rate = None
# skip the last track_type column
if groupby:
test_posi_rate = crosstab.groupby(
self.crosstab.loc['CLINICAL', groupby],
axis=1).apply(self._crosstab_to_positive_rate)
elif groupby_genes:
test_posi_rate = crosstab.groupby(level=1, sort=False).apply(
self._crosstab_to_positive_rate)
elif groupby_variant_type:
test_posi_rate = crosstab.groupby(level=0, sort=False).apply(
self._crosstab_to_positive_rate)
else:
test_posi_rate = self._crosstab_to_positive_rate(crosstab)
# default to retrun a empty DataFrame which can cause problems
if (not isinstance(test_posi_rate,
float)) and len(test_posi_rate) == 0:
test_posi_rate = pd.Series([], dtype='float64')
return test_posi_rate
def _crosstab_to_positive_rate(self, df: pd.DataFrame):
"""Calculate a crosstab to generate a positive rate value for notnull cell
Args:
df (pd.DataFrame): CDx`s crosstab property
Returns:
float: positive rate
"""
posi_rate = self._positive_rate(df.apply(lambda x: any(pd.notnull(x))),
[True])[-1]
return posi_rate
def _positive_rate(self, values: list,
positive_tags: list) -> Tuple[int, int, float]:
"""Calculate positive tags marked values percentage in the total ones
Args:
values (list): the total values
positive_tags (list): values that are regarded as positive values
Returns:
tuple: tuple for total values number, effective values number and percentage of positive values in the input values
"""
values = list(values)
total_value_num = len(values)
missing_value_num = values.count(np.nan)
effective_value_num = total_value_num - missing_value_num
positvie_event_num = sum([values.count(tag) for tag in positive_tags])
positive_rate = 0 if effective_value_num == 0 else positvie_event_num / effective_value_num
return (total_value_num, effective_value_num, positive_rate)
def sample_size_by_time(self):
pass
def sample_size(self, groupby=''):
"""Return the sample size by the way user defined
Args:
groupby (str, optional): Column name in the CDx_Data DataFrame. Defaults to ''.
Returns:
Union[int, pd.Series]: Sample size. a pd.Series when groupby options passed.
"""
if groupby:
if len(self) == 0:
return | pd.Series([], dtype=float) | pandas.Series |
# Import general libraries
import pymysql
import pandas as pd
import numpy as np
import copy
import ast
from sqlalchemy import create_engine
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# Import dash
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
app = dash.Dash(external_stylesheets=[dbc.themes.FLATLY])
app.title = "UFlix"
app_name = "UFlix"
server = app.server
LOGO = "./assets/logo.png"
""" Start of config """
""" End of config """
""" Start of helper functions """
def get_reco(title, cs_matrix, indices_dict, df):
"""
Given a movie title and a pre-trained similarity matrix, return the top 10 most simililar movies
Input:
1. title in string
2. cosine similarity matrix (Fitted)
3. dictionary of mapping of title to index
4. dataframe to retrieve title given index
Output:
1. list of top 10 movie titles
"""
if title == None:
return ["Generating results..."]
# Get the index of the movie that matches the title
idx = indices_dict[title]
# Get the similarity scores of all 10K movies that are related to this movie & sort it & return top 10
sim_scores = sorted(
list(enumerate(cs_matrix[idx])), key=lambda x: x[1], reverse=True
)[1:11]
# top 10 movie indices
movie_indices = [i[0] for i in sim_scores]
# top 10 movie titles
return df["title"].iloc[movie_indices]
""" End of helper functions """
# Navigation bar
navbar = dbc.Navbar(
[
html.A(
# Use row and col to control vertical alignment of logo / brand
dbc.Row(
[
dbc.Col(html.Img(src=LOGO, height="50px")),
dbc.Col(
dbc.NavbarBrand(
"UFlix",
className="ml-auto",
style={"font-size": 30},
)
),
],
align="left",
no_gutters=True,
),
),
dbc.NavbarToggler(id="navbar-toggler"),
],
color="dark",
dark=True,
style={"width": "100%"},
)
recoTab = html.Div(
children=[
html.H2("Recommendation System", style={"textAlign": "center"}),
# List of random movies with refresh button
html.H3(
"List of random movie names: Click the refresh button to generate new list"
),
html.Div(id="rec-list"),
html.Button(
"Refresh", id="button_refresh", className="btn btn-success btn-lg btn-block"
),
html.Br(),
html.Div(
"Key in a movie title and you will be returned with the top 10 most related movie."
),
dbc.Textarea(
id="movie-input",
className="mb-3",
placeholder="Input in lower caps e.g. barfly",
),
html.Button(
"Generate",
id="button_generate",
className="btn btn-success btn-lg btn-block",
),
html.Br(),
html.Div(id="rec-table"),
],
style={"padding": "20px"},
)
##### For recoTab recommender
@app.callback(
Output("rec-table", "children"),
[
Input("button_generate", "n_clicks"),
Input("movie-input", "value"),
], # upon clicking search button
)
def rec_table(n, input_val):
# read data from cloud sql
con = pymysql.connect(host="172.16.17.32", user="bensjyy", passwd="", db="movies")
query = "SELECT * FROM reco"
rec_df_small = | pd.read_sql(query, con) | pandas.read_sql |
import os
from flask import jsonify, request
from server import app
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from aif360.sklearn.metrics import disparate_impact_ratio, base_rate, consistency_score
def bias_table(Y, prot_attr=None, instance_type=None):
groups = Y.index.unique(prot_attr)
with np.errstate(divide='ignore', invalid='ignore'):
pct = [Y.xs(g, level=prot_attr).shape[0]/Y.shape[0] for g in groups]
data = [[np.divide(1, disparate_impact_ratio(Y[stage].dropna() == outcome, prot_attr=prot_attr, priv_group=g))
for stage in Y.columns for outcome in Y[stage].unique() if not pd.isna(outcome)]
for g in groups]
pct_name = 'proportion at first stage' if instance_type is None else f'proportion of {instance_type}'
num_stages = len(data[0])
col = pd.MultiIndex.from_tuples([(pct_name, '')]
+ list(zip(['disparate impact']*num_stages, [f'{stage} -> {outcome}' for stage in Y.columns for outcome in Y[stage].unique() if not pd.isna(outcome)])))
table = pd.DataFrame(np.c_[pct, data], columns=col, index=groups).sort_index()
table = filter_bias(table)
def colorize(v):
if v < 0.8:
return 'color: red'
elif v > 1.25:
return 'color: blue'
return ''
return table.style.format('{:.3f}').format({(pct_name, ''): '{:.1%}'}
).bar(subset=pct_name, align='left', vmin=0, vmax=1, color='#5fba7d'
).applymap(colorize, subset='disparate impact')
def consistency_table(X, Y):
data = [consistency_score(X.loc[Y[stage].notna()], Y[stage].dropna() == outcome)
for stage in Y.columns for outcome in Y[stage].unique() if not pd.isna(outcome)]
num_stages = len(data)
col = pd.MultiIndex.from_tuples(list(zip(['consistency']*num_stages, [f'{stage} -> {outcome}' for stage in Y.columns for outcome in Y[stage].unique() if not pd.isna(outcome)])))
table = pd.DataFrame([data], columns=col, index=['All Defendants'])
table = filter_bias(table)
def colorize(v):
if v < 0.8:
return 'color: red'
return ''
return table.style.format('{:.3f}').applymap(colorize)
def bias_grid(Y):
num_stages = Y.columns.size - 1
f, axes = plt.subplots(1, num_stages, figsize=(2+4*num_stages, 12), squeeze=True, sharey=True)
for ax, stage, prev in zip(axes, Y.columns[1:], Y.columns):
rates = Y[Y[prev]][stage].groupby(level=['race', 'gender']).apply(base_rate)
sns.heatmap(rates.unstack(), annot=True, fmt='.1%', cmap='RdBu',
center=base_rate(Y[Y[prev]][stage]), robust=True,
cbar=False, square=True, ax=ax);
ax.set_title(f'{prev} -> {stage}')
plt.close()
return f
def filter_bias(df):
return df.loc[:, ((df < 0.8) | (df > 1.25)).any()]
def mean_difference(y_true, y_pred, prot_attr=None):
unique = y_true.index.unique(prot_attr)
groups = y_pred.index.get_level_values(prot_attr)
data = [np.mean(y_pred[groups == g] - y_true[groups == g]) for g in unique]
table = | pd.DataFrame(data, columns=['Sentencing bias'], index=unique) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os, logging, argparse
import pandas as pd
import numpy as np
from time import time
import pickle
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from yellowbrick.regressor import AlphaSelection
def main(processed_path = "data/processed",
models_path = "models",
visualizations_path = "visualizations"):
"""Creates visualizations."""
# logging
logger = logging.getLogger(__name__)
# normalize paths
processed_path = os.path.normpath(processed_path)
logger.debug("Path to processed data normalized: {}"
.format(processed_path))
models_path = os.path.normpath(models_path)
logger.debug("Path to models normalized: {}"
.format(models_path))
visualizations_path = os.path.normpath(visualizations_path)
logger.debug("Path to visualizations normalized: {}"
.format(visualizations_path))
#%% load selected_df
selected_df = pd.read_pickle(os.path.join(processed_path,
'selected_df.pkl'))
logger.info("Loaded selected_df. Shape of df: {}"
.format(selected_df.shape))
# load models
mod = pickle.load(open(
os.path.join(models_path, 'sklearn_ElasticNetCV.pkl'), 'rb'))
logger.info("Loaded sklearn_ElasticNetCV.pkl.")
mod_sm = pickle.load(open(
os.path.join(models_path, 'sm_OLS_fit_regularized.pkl'), 'rb'))
logger.info("Loaded sm_OLS_fit_regularized.")
#%% split selected_df into dependent and independent variables
teams_df = selected_df.iloc[:, :9]
y = selected_df.iloc[:, 9:10]
X = selected_df.iloc[:, 10:]
yX = | pd.concat([y, X], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# #### Introduction:
# #### Strokes are the second leading cause of death and the third leading cause of disability globally. Stroke is the sudden death of some brain cells due to lack of oxygen when the blood flow to the brain is lost by blockage or rupture of an artery to the brain
# #### Probilam statement:
# #### This dataset is used to predict whether a patient is likely to get stroke based on the input parameters like Gender, Age, various diseases
# #### Objective:
# #### Constract a preditive model for predicting stroke and to aassess the accuracy of the medels.We will apply and explore 7 algorithms to see which produces reliable and repeatable results. They are: Decision Tree,Logistic Regression,Random Forest,SVM,KNN,Naive Bayes,KMeans Clustering.
# #### Data Source:
# ##### A population of 5110 people are involved in this study with 2995 females and 2115 males. The dataset for this study is extracted from Kaggle data respositories.
# In[1]:
import numpy as np
import pandas as pd
from IPython import get_ipython
import matplotlib.pyplot as plt
import seaborn as sns
#get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
plt.rcParams['figure.figsize'] = (5,5)
from sklearn.metrics import accuracy_score, f1_score,classification_report,precision_score,recall_score
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.naive_bayes import GaussianNB
from sklearn.cluster import KMeans
import pickle
# In[2]:
#get_ipython().system('pip install imblearn')
# In[3]:
from imblearn.over_sampling import SMOTE
# data importing
data = | pd.read_csv('healthcare-dataset-stroke-data.csv') | pandas.read_csv |
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError):
td + other
with pytest.raises(TypeError):
other + td
with pytest.raises(TypeError):
td - other
with pytest.raises(TypeError):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
with pytest.raises(TypeError):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
# TODO: moved from index tests following #24365, may need de-duplication
def test_ops_ndarray(self):
td = Timedelta("1 day")
# timedelta, timedelta
other = pd.to_timedelta(["1 day"]).values
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
msg = r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
expected = pd.to_timedelta(["0 days"]).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td - np.array([1])
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) - td
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
"ufunc '?multiply'? cannot use operands with types"
r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(["2000-01-01"]).values
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(["1999-12-31"]).values
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="d")
with pytest.raises(TypeError):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td).value == -1 * td.value
assert op(-1.0, td).value == -1.0 * td.value
with pytest.raises(TypeError):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError):
# invalid multiply with another timedelta
op(td, td)
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5.0
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = td / nan
assert result is NaT
result = td // nan
assert result is NaT
# ---------------------------------------------------------------
# Timedelta.__rdiv__
def test_td_rdiv_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = offsets.Hour(1) / td
assert result == 1 / 240.0
assert np.timedelta64(60, "h") / td == 0.25
# ---------------------------------------------------------------
# Timedelta.__floordiv__
def test_td_floordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
assert td // scalar == 1
assert -td // scalar.to_pytimedelta() == -2
assert (2 * td) // scalar.to_timedelta64() == 2
def test_td_floordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64("NaT"))
def test_td_floordiv_offsets(self):
# GH#19738
td = Timedelta(hours=3, minutes=4)
assert td // offsets.Hour(1) == 3
assert td // offsets.Minute(2) == 92
def test_td_floordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
with pytest.raises(TypeError):
td // np.datetime64("2016-01-01", dtype="datetime64[us]")
def test_td_floordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
expected = Timedelta(hours=1, minutes=32)
assert td // 2 == expected
assert td // 2.0 == expected
assert td // np.float64(2.0) == expected
assert td // np.int32(2.0) == expected
assert td // np.uint8(2.0) == expected
def test_td_floordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
# Array-like others
assert td // np.array(scalar.to_timedelta64()) == 1
res = (3 * td) // np.array([scalar.to_timedelta64()])
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")])
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_floordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
ser = pd.Series([1], dtype=np.int64)
res = td // ser
assert res.dtype.kind == "m"
# ---------------------------------------------------------------
# Timedelta.__rfloordiv__
def test_td_rfloordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# scalar others
# x // Timedelta is defined only for timedelta-like x. int-like,
# float-like, and date-like, in particular, should all either
# a) raise TypeError directly or
# b) return NotImplemented, following which the reversed
# operation will raise TypeError.
assert td.__rfloordiv__(scalar) == 1
assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2
assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0
def test_td_rfloordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert np.isnan(td.__rfloordiv__(NaT))
assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT")))
def test_td_rfloordiv_offsets(self):
# GH#19738
assert offsets.Hour(1) // Timedelta(minutes=25) == 2
def test_td_rfloordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
dt64 = np.datetime64("2016-01-01", "us")
with pytest.raises(TypeError):
td.__rfloordiv__(dt64)
def test_td_rfloordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert td.__rfloordiv__(np.nan) is NotImplemented
assert td.__rfloordiv__(3.5) is NotImplemented
assert td.__rfloordiv__(2) is NotImplemented
with pytest.raises(TypeError):
td.__rfloordiv__(np.float64(2.0))
with pytest.raises(TypeError):
td.__rfloordiv__(np.uint8(9))
with pytest.raises(TypeError, match="Invalid dtype"):
# deprecated GH#19761, enforced GH#29797
td.__rfloordiv__(np.int32(2.0))
def test_td_rfloordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = | Timedelta(hours=3, minutes=4) | pandas.Timedelta |
# Grafiek positief getest naar leeftijd door de tijd heen, per leeftijdscategorie
# <NAME>, (@rcsmit) - MIT Licence
# IN: tabel met positief aantal testen en totaal aantal testen per week, gecategoriseerd naar leeftijd
# handmatig overgenomen uit Tabel 14 vh wekelijkse rapport van RIVM
# Wekelijkse update epidemiologische situatie COVID-19 in Nederland
# https://www.rivm.nl/coronavirus-covid-19/actueel/wekelijkse-update-epidemiologische-situatie-covid-19-in-nederland
# Uitdagingen : Kopieren en bewerken Tabel 14. 3 verschillende leeftijdsindelingen. Tot dec. 2020 alles
# cummulatief. X-as in de grafiek
# TODO : - Nog enkele weken toevoegen voorafgaand het huidige begin in de datafile (waren weken met weinig besmettingen).
# - integreren in het dashboard
# - 'Total reported' toevoegen
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def save_df(df,name):
""" _ _ _ """
OUTPUT_DIR = 'C:\\Users\\rcxsm\\Documents\\phyton_scripts\\covid19_seir_models\\output\\'
name_ = OUTPUT_DIR + name+'.csv'
compression_opts = dict(method=None,
archive_name=name_)
df.to_csv(name_, index=False,
compression=compression_opts)
print ("--- Saving "+ name_ + " ---" )
def main():
#url = "C:\\Users\\rcxsm\\Documents\\phyton_scripts\\covid19_seir_models\\input\\covid19_seir_models\input\pos_test_leeftijdscat_wekelijks.csv"
url= "https://raw.githubusercontent.com/rcsmit/COVIDcases/main/pos_test_leeftijdscat_wekelijks.csv"
to_show_in_graph = [ "18-24", "25-29", "30-39", "40-49", "50-59", "60-69", "70+"]
#id;datum;leeftijdscat;methode;mannen_pos;mannen_getest;vrouwen_pos ;vrouwen_getest ;
# totaal_pos;totaal_getest;weeknr2021;van2021;tot2021
df = pd.read_csv(url,
delimiter=";",
low_memory=False)
df["datum"]=pd.to_datetime(df["datum"], format='%d-%m-%Y')
list_dates = df["datum"].unique()
cat_oud = [ "0-4", "05-09", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39",
"40-44", "45-49", "50-54", "55-59", "60-64", "65-69", "70-74", "75-79", "80-84", "85-89", "90-94", "95+" ]
cat_vervanging = [ "0-4", "05-09", "10-14", "15-19", "20-29", "20-29", "30-39", "30-39",
"40-49", "40-49", "50-59", "50-59", "60-69", "60-69", "70+", "70+", "70+", "70+", "70+", "70+" ]
cat_nieuw = [ "0-12", "13-17", "18-24", "25-29", "30-39", "40-49", "50-59", "60-69", "70+", "Niet vermeld"]
cat_nieuwst_code =["A", "B", "C", "D", "E", "F" "G", "H", "I", "J", "K"]
cat_nieuwst= ["0-3", "04-12", "13-17", "18-24", "25-29", "30-39", "40-49", "50-59", "60-69", "70+", "Niet vermeld"]
# Deze grafieken komen uiteindelijk voort in de grafiek
cat_nieuwstx= ["0-12", "0-03", "04-12", "13-17", "18-24", "25-29", "30-39", "40-49", "50-59", "60-69", "70+", "Niet vermeld"]
#####################################################
df_new= pd.DataFrame({'date': [],'cat_oud': [],
'cat_nieuw': [], "positief_testen": [],"totaal_testen": [], "methode":[]})
for i in range(len(df)):
d = df.loc[i, "datum"]
for x in range(len(cat_oud)-1):
c_o,c,p,t,m = None,None,None,None,None
if df.loc[i, "methode"] == "oud":
# print (df.loc[i, "leeftijdscat"])
# print (f"----{df.loc[i, 'leeftijdscat']}----{cat_oud[x]}----")
if df.loc[i, "leeftijdscat"] == cat_oud[x]:
c_o = cat_oud[x]
c = cat_vervanging[x]
# print (f"{c} - {i} - {x} ")
# print (f"----{df.loc[i, 'leeftijdscat']}----{cat_oud[x]}----")
p =df.loc[i, "totaal_pos"]
t = df.loc[i, "totaal_getest"]
m = df.loc[i, "methode"] == "oud"
df_new = df_new.append({ 'date': d, 'cat_oud': c_o, 'cat_nieuw': c, "positief_testen": p,"totaal_testen":t, "methode": m}, ignore_index= True)
c_o,c,p,t,m = None,None,None,None,None
elif (
x <= len(cat_nieuwstx) - 1
and df.loc[i, "leeftijdscat"] == cat_nieuwstx[x]
):
c_o = df.loc[i, "leeftijdscat"]
c = df.loc[i, "leeftijdscat"]
p =df.loc[i, "totaal_pos"]
t = df.loc[i, "totaal_getest"]
m = df.loc[i, "methode"]
df_new = df_new.append({ 'date': d, 'cat_oud': c_o, 'cat_nieuw': c, "positief_testen": p,"totaal_testen":t, "methode": m}, ignore_index= True)
c_o,c,p,t,m = None,None,None,None,None
df_new = df_new.groupby(['date','cat_nieuw'], sort=True).sum().reset_index()
df_new['percentage'] = round((df_new['positief_testen']/df_new['totaal_testen']*100),1)
show_from = "2020-1-1"
show_until = "2030-1-1"
startdate = pd.to_datetime(show_from).date()
enddate = | pd.to_datetime(show_until) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""Cleaning US Census Data.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1HK9UVhluQDGY9TG6OjMq6uRGYD4nlkOz
#Cleaning US Census Data
"""
#Importing datasets
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import glob
"""Using glob, loop through the census files available and load them into DataFrames. Then, concatenate all of those DataFrames together into one DataFrame"""
us_census = glob.glob('states*.csv')
df_list = []
for filename in us_census:
data = pd.read_csv(filename)
df_list.append(data)
df = | pd.concat(df_list) | pandas.concat |
from copy import copy
import numpy as np
from pandas import DataFrame, Series
def variability(model_variability, variable_list=None):
"""Perform thermodynamic variability analysis.
Determine the minimum and maximum values for the input variables (Flux, Gibbs free
energies and metabolite concentrations). if min_growth constraint is applied then
growth is maintained at given percentage of optimum.
Parameters
----------
model_variability : multitfa.core.tmodel
multitfa model after thermodynamic constraints are added
variable_list : List, optional
List of variables to perform TVA on, by default None
Returns
-------
pd.DataFrame
Dataframe of min max ranges of variables
Raises
------
ValueError
If model is infeasible with initial constraints raises valueerror
"""
model = copy(model_variability)
if variable_list == None:
variables = [var.name for var in model.solver.variables]
else:
variables = [var for var in variable_list]
if np.isnan(model.slim_optimize()):
raise ValueError("model infeasible with given constraints")
fluxes_min = np.empty(len(variables))
fluxes_max = np.empty(len(variables))
rxn_name = list()
rxn_ids = [rxn.id for rxn in model.reactions]
for i in range(len(variables)):
# For reaction flux, objective is forward - reverse variables
if variables[i] in rxn_ids:
rxn = model.reactions.get_by_id(variables[i])
objective_exp = 1 * rxn.forward_variable - 1 * rxn.reverse_variable
else:
var = model.solver.variables[variables[i]]
objective_exp = 1 * var
rxn_name.append(variables[i])
model.objective = objective_exp
# minimization
model.objective_direction = "min"
_ = model.slim_optimize()
objective_value = model.objective.value
fluxes_min[i] = objective_value
# maximiztion
model.objective_direction = "max"
_ = model.slim_optimize()
objective_value = model.objective.value
fluxes_max[i] = objective_value
return DataFrame(
{
"minimum": Series(index=rxn_name, data=fluxes_min),
"maximum": Series(index=rxn_name, data=fluxes_max),
}
)
def variability_legacy_gurobi(
model_variability,
variable_list=None,
warm_start={},
params=False,
):
"""Custom function to perform TVA on MIQC problem using gurobi.
Parameters
----------
model_variability : multitfa.core.tmodel
multitfa model after thermodynamic constraints are added
variable_list : List, optional
List of variables to perform TVA on, by default None
warm_start : dict, optional
Optionally can specify warm start to speed up problem. variable name and initial solution, by default {}
params : Bool, optional
If True sets the Timelimit option to 300 sec
Returns
-------
pd.DataFrame
Dataframe of min max ranges of variables
"""
from gurobipy import GRB
gurobi_interface = model_variability.gurobi_interface.copy()
if variable_list == None:
variables = gurobi_interface.getVars()
else:
variables = [var for var in variable_list]
gurobi_interface.optimize()
# Set the time limit searching for solution, useful for pathlogical variables taking long time
if params:
gurobi_interface.params.TimeLimit = 300
fluxes_min = np.empty(len(variables))
fluxes_max = np.empty(len(variables))
rxn_name = list()
rxn_ids = [rxn.id for rxn in model_variability.reactions]
for i in range(len(variables)):
# if the variable is reactions optimize for forward - reverse variables else optimize for the variable
if variables[i] in rxn_ids:
rxn = model_variability.reactions.get_by_id(variables[i])
for_var = gurobi_interface.getVarByName(rxn.forward_variable.name)
rev_var = gurobi_interface.getVarByName(rxn.reverse_variable.name)
obj_exp = for_var - rev_var
else:
obj_exp = gurobi_interface.getVarByName(variables[i])
rxn_name.append(variables[i])
# minimization
if len(warm_start) != 0:
for var in gurobi_interface.getVars():
if var in warm_start:
var.Start = warm_start[var]
gurobi_interface.setObjective(obj_exp, GRB.MINIMIZE)
gurobi_interface.update()
gurobi_interface.optimize()
objective_value = gurobi_interface.ObjVal
fluxes_min[i] = objective_value
# print(rxn.id, "min", objective_value)
warm_start = {}
for var in gurobi_interface.getVars():
if var.VarName.startswith("indicator_"):
warm_start[var] = var.x
# maximiztion
for var in gurobi_interface.getVars():
if var in warm_start:
var.Start = warm_start[var]
gurobi_interface.setObjective(obj_exp, GRB.MAXIMIZE)
gurobi_interface.update()
gurobi_interface.optimize()
objective_value = gurobi_interface.ObjVal
fluxes_max[i] = objective_value
# print(rxn.id, "max", objective_value)
return DataFrame(
{
"minimum": | Series(index=rxn_name, data=fluxes_min) | pandas.Series |
#!/usr/bin/env python
from pandas.io.formats.format import SeriesFormatter
from Bio.SeqUtils import seq1
from Bio import SeqIO
import pandas as pd
import argparse
from pathlib import Path
import numpy as np
from summarise_snpeff import parse_vcf, write_vcf
import csv
import re
from functools import reduce
from bindingcalculator import BindingCalculator
from itertools import takewhile
def get_contextual_bindingcalc_values(residues_list,binding_calculator, option, bindingcalc_data = None):
if option == "res_ret_esc":
residues_df = residues_list.copy()
res_ret_esc_df = binding_calculator.escape_per_site(residues_df.loc[(residues_df["Gene_Name"] == "S") & (residues_df["respos"] >= 331) & (residues_df["respos"] <= 531) & (residues_df["respos"].isin(bindingcalc_data["site"].unique())), "respos"])
res_ret_esc_df["Gene_Name"] = "S"
res_ret_esc_df.rename(columns = {"retained_escape" : "BEC_RES"}, inplace = True)
residues_df = residues_df.merge(res_ret_esc_df[["site", "BEC_RES", "Gene_Name"]], left_on = ["Gene_Name", "respos"], right_on = ["Gene_Name", "site"],how = "left")
residues_df.drop(axis = 1 , columns = ["site"], inplace = True)
return(residues_df)
else:
ab_escape_fraction = 1 - binding_calculator.binding_retained(residues_list)
return(ab_escape_fraction)
def summarise_score(summary_df, metric):
#assumes grouping by sample_id and summarising for each sample
summary_df_info = summary_df.groupby("sample_id").agg({metric: ['sum', 'min', 'max']})
summary_df_info.columns = summary_df_info.columns.droplevel(0)
summary_df_info = summary_df_info.reset_index()
summary_df_info = summary_df_info.rename_axis(None, axis=1)
summary_df_mins = pd.merge(left = summary_df, right = summary_df_info[["sample_id", "min"]], left_on = ["sample_id", metric], right_on = ["sample_id", "min"])
summary_df_mins[metric + "_min"] = summary_df_mins["residues"] + ":" + summary_df_mins[metric].fillna("").astype(str)
summary_df_mins = summary_df_mins[["sample_id",metric + "_min"]].groupby("sample_id").agg({metric + "_min" : lambda x : list(x)})
summary_df_mins[metric + "_min"] = summary_df_mins[metric + "_min"].str.join(",")
summary_df_max = pd.merge(left = summary_df, right = summary_df_info[["sample_id", "max"]], left_on = ["sample_id", metric], right_on = ["sample_id", "max"])
summary_df_max[metric + "_max"] = summary_df_max["residues"] + ":" + summary_df_max[metric].fillna("").astype(str)
summary_df_max = summary_df_max[["sample_id",metric + "_max"]].groupby("sample_id").agg({metric + "_max" : lambda x : list(x)})
summary_df_max[metric + "_max"] = summary_df_max[metric + "_max"].str.join(",")
summary_df_sum = summary_df.groupby("sample_id").agg({metric: sum})
summary_df_sum.columns = [metric + "_sum"]
summary_df_final = summary_df_sum.merge(summary_df_max,on='sample_id').merge(summary_df_mins,on='sample_id')
return(summary_df_final)
def sample_header_format(item,sample,vcf,filtered,vcf_loc):
if vcf == True:
if item.startswith("##bcftools_mergeCommand=merge"):
if filtered:
item = re.sub(r'(?<=merged\.vcf )[a-zA-Z0-9_\. \/]+(?=;)', vcf_loc, item)
else:
item = re.sub(r'(?<=merged\.vcf )[a-zA-Z0-9_\. \/]+(?=;)', vcf_loc, item)
else:
if item.startswith("##reference="):
item = re.sub(r'(?<=muscle\/)[a-zA-Z0-9_\.\/]+(?=\.fasta)', f'{sample}', item)
if item.startswith("##source="):
item = re.sub(r'(?<=muscle\/)[a-zA-Z0-9_\.]+(?=\.fasta)', f'{sample}', item)
item = re.sub(r'(?<=fatovcf\/)[a-zA-Z0-9_\.]+(?=\.vcf)', f'{sample}', item)
if item.startswith("##bcftools_mergeCommand=merge"):
if filtered:
item = re.sub(r'(?<=merged\.vcf )[a-zA-Z0-9_\. \/]+(?=;)', vcf_loc, item)
else:
item = re.sub(r'(?<=merged\.vcf )[a-zA-Z0-9_\. \/]+(?=;)', vcf_loc, item)
return(item)
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('input_vcf', metavar='anno_concat.tsv', type=str,
help='Concatenated SPEAR anno file')
parser.add_argument('output_dir', metavar='spear_vcfs/', type=str,
help='Destination dir for summary tsv files')
parser.add_argument('data_dir', metavar='data/', type=str,
help='Data dir for binding calculator data files')
parser.add_argument('input_header', metavar='merged.vcf', type=str,
help='Merged VCF file for header retrieval')
parser.add_argument('sample_list', metavar='', nargs="+",
help='list of inputs to detect no variant samples ')
parser.add_argument('--is_vcf_input', default="False", type=str,
help = "Set input file type to VCF")
parser.add_argument('--is_filtered', default="False", type=str,
help = "Specify files come from filtered directory")
args = parser.parse_args()
Path(f'{args.output_dir}/per_sample_annotation').mkdir(parents=True, exist_ok=True)
if args.is_vcf_input == True:
if args.is_filtered:
infiles = f'{args.output_dir}/intermediate_output/masked/*.masked.vcf'
else:
infiles = f'{args.output_dir}/intermediate_output/indels/*.indels.vcf'
else:
infiles = f'{args.output_dir}/intermediate_output/indels/*.indels.vcf'
with open(args.input_header, 'r') as fobj:
headiter = takewhile(lambda s: s.startswith('#'), fobj)
merged_header = pd.Series(headiter)
merged_header = merged_header.str.replace('\n','')
merged_header = merged_header.str.replace('"','')
cols = ["#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "sample"]
merged_header = merged_header[~merged_header.str.startswith('#CHROM')]
input_file = | pd.read_csv(args.input_vcf, sep = "\t", names = ["sample_id", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "end"]) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), | Index([1]) | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 21:33:11 2018
@author: ysye
"""
#run CIRCLET for RNA-seq dataset
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.preprocessing import minmax_scale
from math import log
from sklearn.metrics import roc_curve, auc
from scipy import stats
from matplotlib.colors import ListedColormap
#os.chdir('E:/Users/yusen/Project/Project3/Python code/CICRLET_package/src/CIRCLET')
from . import CIRCLET_DEFINE
from . import CIRCLET_CORE
bcolors_3=['#EF4F50','#587FBF','#CCCCCC']
bcolors_6=['#587FBF','#3FA667','#EF4F50','#FFAAA3','#414C50','#D3D3D3']
bcolors_12=['#CC1B62','#FBBC00','#0E8934','#AC1120','#EA7B00','#007AB7',
'#9A35B4','#804E1F' ,'#BEAB81','#D32414','#75AB09','#004084']
def Rnaseq_feature_selection(Rnaseq_data,Rnaseq_dir):
"""
focus on cell cycle annoated genes
"""
#gene_files_add='./result_files/CellCycleGenes'
Type='CellCycle'
Marker_genes_add=Rnaseq_dir+'/GO_term_summary_'+Type+'.xlsx'
MCellCyclegenes= | pd.read_excel(Marker_genes_add,"Annotation",header=0,index_col=None) | pandas.read_excel |
import numpy as np
import pandas as pd
from pathlib import Path
from torch.utils.data import Dataset
from dataset_classes.mvcnn_object_class import MVCNNObjectClass
from dataset_classes.mvcnn_object_class_instance import MVCNNObjectClassInstance
from settings import consts, utils
class MVCNNDataset(Dataset):
"""
Manages all dataset instances
"""
def __init__(self, dataset_type_name, num_classes=None, verbose=True):
self._type_name = dataset_type_name
self._num_classes = utils.get_num_classes(num_classes)
self._classes_list = self._get_classes_list()
self._instances_list = []
self._identify_instances()
if verbose:
self._print_dataset_summary()
def _get_classes_list(self):
"""
Creates object class list to store class metadata
"""
iterator = list(enumerate(Path(consts.DATA_DIR).iterdir()))
classes_list = [
MVCNNObjectClass(class_num, class_path, self._num_classes) for class_num, class_path in iterator if class_num < self._num_classes
]
return classes_list
def _identify_instances(self):
"""
Identifies all dataset instances
"""
for mvcnn_class in self._classes_list:
self._get_class_instances(mvcnn_class)
def _get_class_instances(self, mvcnn_class):
"""
Identifies all instances of given class
"""
class_dataset_path = mvcnn_class.get_path()/self._type_name
class_dataset_img_paths = self._get_dataset_img_paths(class_dataset_path)
class_dataset_instances = self._get_class_dataset_instances(class_dataset_img_paths)
for class_dataset_instance_id in class_dataset_instances:
class_instance_img_paths = self._get_class_instance_img_paths(class_dataset_instance_id, class_dataset_img_paths)
class_instance = MVCNNObjectClassInstance(
mvcnn_class, class_dataset_instance_id, class_instance_img_paths
)
self._instances_list.append(class_instance)
mvcnn_class.update_summary(class_instance)
def _get_dataset_img_paths(self, class_dataset_path):
"""
Returns image paths of a given dataset path
"""
class_image_paths = [
path for path in class_dataset_path.iterdir() if path.suffix.lower() in consts.IMG_SUFFIX_LIST
]
return class_image_paths
def _get_class_dataset_instances(self, class_dataset_img_paths):
"""
Returns instance id's of all given image paths
"""
class_dataset_instances = np.unique(
[path.name.split('_')[-2] for path in class_dataset_img_paths]
)
return class_dataset_instances
def _get_class_instance_img_paths(self, class_dataset_instance, class_dataset_img_paths):
"""
Returns all image paths pertaining to a given class instance
"""
class_instance_img_paths = [
img_path for img_path in class_dataset_img_paths if img_path.name.split('_')[-2] == class_dataset_instance
]
return class_instance_img_paths
def _print_dataset_summary(self):
"""
Displays the summary of the dataset
"""
print('='*60)
print('Dataset type:', self._type_name.upper())
for class_object in self._classes_list:
class_object.print_summary()
def get_summary_df(self):
"""
Returns DataFrame with summary
"""
summary_dict = {
'class_id': [],
'class_name': [],
'num_instances': [],
'num_images': [],
}
for class_object in self._classes_list:
for key, value in class_object.get_summary().items():
summary_dict[key].append(value)
return | pd.DataFrame(summary_dict) | pandas.DataFrame |
import datetime as dt
import json
import os
import numpy as np
import pandas as pd
import tqdm
from typing import Any, Dict, List, Tuple, Optional
from cop_e_cat import covariate_selection as cs
import cop_e_cat.utils.params as pm
from cop_e_cat.utils import combine_stats, pipeline_config, init_configuration, local_verbosity, print_per_verbose
class CopECatParams():
def __init__(self, params_file):
f = open(params_file)
self.params = json.load(f)
self.imputation: bool = self.params['imputation']
self.patientweight: bool = self.params['patientweight']
self.delta: int = self.params['delta']
self.max_los: int = self.params['max_los']
self.min_los: int = self.params['min_los']
self.adults: bool = self.params['adults_only']
self.icd_codes: List[Any] = self.params['icd_codes']
self.vitals_dict: Dict[str, List[int]] = self.params['vitals_dict']
self.labs_dict: Dict[str, List[int]] = self.params['labs_dict']
self.vent_dict: Dict[str, List[int]] = self.params['vent_dict']
self.output_dict: Dict[str, List[int]] = self.params['output_dict']
self.meds_dict: Dict[str, List[int]] = self.params['meds_dict']
self.proc_dict: Dict[str, List[str]] = self.params['procs_dict'] ## NOTE TYPO CHANGE
self.comorbidities_dict: Dict[str, List[str]] = self.params['comorbidities_dict']
self.checkpoint_dir: str = self.params['checkpoint_dir'] ## NOTE TYPO CHANGE
self.output_dir: Optional[str] = self.params['output_dir']
# Initialize the output directories
self.init_dirs()
def init_dirs(self):
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
def _timestamp_in_window(index: Any, reference_time: Any, series: pd.Series, hours_delta: int) -> bool:
"""Determine whether a given timestamp (as 'charttime' field of a dataframe Series entry) is within a given delta
of a reference time.
Notionally, this is intended to be used inside a loop that's iterating over a dataframe (or slice thereof). Each
entry in the corresponding dataframe is expected to have a 'charttime' field with timestamp data.
(It would probably be more elegant to vectorize this.)
Args:
index (Any): Index iterator over dataframe.
reference_time (Any): the 'current' timestamp for a chart.
series (pd.Series): Relevant slice of a dataframe.
hours_delta (int): Size of the window (in hours).
Returns:
bool: True if the entry of series at index 'index' is between reference_time and reference_time + hours_delta,
otherwise False.
"""
delta = dt.timedelta(hours=hours_delta)
try:
entry_chart_time = pd.to_datetime(series.loc[index, 'charttime'])
return (entry_chart_time >= reference_time and entry_chart_time < reference_time + delta)
except:
try:
entry_chart_time = pd.to_datetime(series.loc[index, 'charttime'])
return (entry_chart_time >= reference_time).any() and (entry_chart_time < reference_time + delta).any()
except:
return False
return False
def _update_frame_with_value(chart: pd.DataFrame, key: str, timestamp: Any, index: Any, series: pd.Series) -> pd.DataFrame:
"""Update dataframe `chart` to average in the new value identified by the 'value' key of data series `series` for the index `index`.
Args:
chart (pd.DataFrame): The state space dataframe being built
key (str): Key identifying the covariate being iterated over
timestamp (Any): Timestamp of the current window/bucket.
index (Any): index of iteration over the rows recording covariate observations.
series (pd.Series): Relevant slice of the source data frame: the covariate observations, subsetted to a relevant
group of covariates.
Returns:
pd.DataFrame: The chartframe being built, as modified in-place.
"""
key_lc = key.lower()
chart.loc[timestamp, key_lc] = np.nanmean([chart.loc[timestamp, key_lc], series.loc[index, 'value']])
return chart
def _update_frame_with_valuenum(chart: pd.DataFrame, key: str, timestamp: Any, index: Any, series: pd.Series) -> pd.DataFrame:
"""Update dataframe `chart` to average in the new value identified by the 'valuenum' key of data series `series` for the index `index`.
Args:
chart (pd.DataFrame): The state space dataframe being built
key (str): Key identifying the covariate being iterated over
timestamp (Any): Timestamp of the current window/bucket.
index (Any): index of iteration over the rows recording covariate observations.
series (pd.Series): Relevant slice of the source data frame: the covariate observations, subsetted to a relevant
group of covariates.
Returns:
pd.DataFrame: The chartframe being built, as modified in-place.
"""
key_lc = key.lower()
## As with the function above, I'm concerned about iterative averaging here
chart.loc[timestamp, key_lc] = np.nanmean([chart.loc[timestamp, key_lc], series.loc[index, 'valuenum']])
return chart
def _buildTimeFrame(df_adm: pd.DataFrame, delta: int = 6) -> pd.DataFrame:
"""Generate dataframe with one entry per desired final state space. This is the scaffold
to which we will subsequently add requested features.
Args:
df_adm (pd.DataFrame): Dataframe containing admissions data.
delta (int, optional): Hours per state space. Defaults to 6.
Returns:
pd.DataFrame: Dataframe with timestamps to align features to.
"""
# Get admit and discharge time in numeric form, round down/up respectively to the nearest hour
start = pd.to_datetime(df_adm.intime.unique()).tolist()[0]
start -= dt.timedelta(minutes=start.minute, seconds=start.second, microseconds=start.microsecond)
# Set end time to the end of the hour in which the last patient was dismissed
end = pd.to_datetime(df_adm.outtime.unique()).tolist()[0]
end -= dt.timedelta(minutes=end.minute, seconds=end.second, microseconds=end.microsecond)
end += dt.timedelta(hours=1)
times = []
curr = start
while curr < end:
times.append(curr)
curr += dt.timedelta(hours=delta)
timeFrame = pd.DataFrame(data={'timestamp': times}, index=times)
return timeFrame
class CopECat():
"""Class which generates state spaces for MIMIC-IV data, using specified data values, interpolation, and cohort settings.
"""
def __init__(self, params: CopECatParams):
"""Constructor for CopECat instance.
Args:
params (CopECatParams): Configuration parameters (table names and loaded parameters file).
"""
self.params = params
self.ONE_HOT_LABS = ['Ca', 'Glucose', 'CPK', 'PTH', 'LDH', 'AST', 'ALT']
self.UNIT_SCALES = {'grams': 1000, 'mcg': 0.001, 'pg': 1e-9, 'ml': 1}
def _load_data(self) -> Tuple[List[int], List[Any], Dict[str, float]]:
"""Return the cohort, dataframes containing various features, and the mean of each feature over the cohort population.
Returns:
Tuple[List[int], List[Any], Dict[str, float]]: 3-tuple. First element is a deduped list of unique hadm_ids for every
member of the cohort. Second element is a list of dataframes and/or Tuple[DataFrame, PopulationFeaturesDict]
with the table-level features. Final element is a dictionary mapping feature name (from params.py) to the
mean of that feature over the retrieved cohort population.
"""
admissions, cohort, _ = cs.gather_cohort(adults=self.params.adults, patient_weight=self.params.patientweight,
icd_diagnoses=self.params.icd_codes, min_los=self.params.min_los,
max_los=self.params.max_los, regenerate=True)
vitals, stats = cs.gather_vitals(self.params.vitals_dict, regenerate=True)
labs, stats = cs.gather_labs(self.params.labs_dict, regenerate=True)
outputs = cs.gather_outputs(self.params.output_dict, regenerate=True)
procs, vent = cs.gather_procedures(self.params.vent_dict, self.params.proc_dict)
meds = cs.gather_meds(self.params.meds_dict, regenerate=True)
comorbs = cs.gather_comorbidities(self.params.comorbidities_dict)
popmeans = self._generate_popmeans()
cohort = list(set(set(cohort) & set(vitals['hadm_id']) & set(labs['hadm_id'])))
print("Length of cohort: ", len(cohort))
return cohort, [admissions, vitals, labs, outputs, procs, vent, meds, comorbs], popmeans
def _generate_popmeans(self) -> Dict[str, float]:
"""Write to file the population means of the requested features.
Returns:
Dict[str, float]: Dictionary mapping feature name to population mean.
"""
# Hardcoded vars--these should be the paths to the feature files extracted by pipeline_config
f = open(pipeline_config.vitals_stats)
vitals_stats = json.load(f)
f = open(pipeline_config.labs_stats)
labs_icu_stats = json.load(f)
f = open(pipeline_config.cohort_stats)
cohort_stats = json.load(f)
popmeans = combine_stats([vitals_stats, labs_icu_stats, cohort_stats])
with open(pipeline_config.popmeans_stats, 'w') as f:
json.dump(popmeans, f)
return popmeans
# Modify this line to adjust verbosity level in code, overriding what's set by command line argument
# @local_verbosity(new_level=3)
def _chartFrames(self, hadm_id: str, tables: List[pd.DataFrame], popmeans: Dict[str, float]) -> pd.DataFrame:
"""Build time-indexed dataframe of each patient admission, with resampled values of all variables
Args:
hadm_id (int): Hadm_id for one patient
tables (List[Any]): List of the dataframes (often with PopulationFeaturesDict) for the extracted
table-level data.
popmeans (Dict[str, float]): Mapping of feature name to its mean among the cohort.
Returns:
pd.DataFrame: State space representation for one patient.
"""
use_admissions = True
use_comorbidities = True
use_vitals = True
use_labs = True
use_meds = True
use_procedures = False
use_ventilation = False
admissions, vitals, labs, outputs, procs, vent, meds, comorbidities = tables
delta: int = self.params.delta
def _add_admissions(chart: pd.DataFrame, admission_frame: pd.Series) -> pd.DataFrame:
print_per_verbose(1, 'Admission Data')
for var in ['hadm_id', 'anchor_age', 'patientweight', 'los', 'gender']:
chart[var.lower()] = admission_frame[var].head(1).item()
chart['gender'] = (chart['gender'] == 'F').astype(int)
# chart['expired'] = (chart['dod'] != None).astype(int)
chart['dod'] = admission_frame['dod']
return chart
def _add_comorbidities(chart: pd.DataFrame, comorbidities: pd.DataFrame, hadm_id: int,
comorbidities_dict: Dict[str, List[str]]) -> pd.DataFrame:
print_per_verbose(1, 'Morbidities')
df_comorbs = comorbidities[comorbidities.hadm_id == hadm_id]
for subpop in comorbidities_dict:
subpop_df = df_comorbs[df_comorbs.long_title.isin(comorbidities_dict[subpop])]
if subpop_df.empty:
chart[subpop] = 0
else:
chart[subpop] = 1
return chart
def _add_vitals(chart: pd.DataFrame, vitals: pd.DataFrame, hadm_id: int, delta: int, vitals_dict: Dict[str, List[int]],
popmeans: Dict[str, float]) -> pd.DataFrame:
print_per_verbose(1, 'Vitals')
df_vits = vitals[vitals.hadm_id == hadm_id].drop_duplicates()
for k in sorted(list(vitals_dict.keys())):
chart[k.lower()] = np.nan
for t in chart.timestamp:
subset = df_vits[df_vits.itemid.isin(vitals_dict[k])]
for i in subset.index:
if _timestamp_in_window(i, t, subset, delta):
chart = _update_frame_with_valuenum(chart, k, t, i, subset)
chart[k.lower()] = chart[k.lower()].fillna(method='ffill').fillna(value=popmeans[k])
return chart
def _add_labs(chart: pd.DataFrame, labs: pd.DataFrame, hadm_id: int, delta: int, labs_dict: Dict[str, List[int]],
popmeans: Dict[str, float]) -> pd.DataFrame:
print_per_verbose(1, 'Labs')
df_labs = labs[labs.hadm_id == hadm_id].drop_duplicates()
for k in sorted(list(labs_dict.keys())):
chart[k.lower()] = np.nan
for t in chart.timestamp:
subset = df_labs[df_labs['itemid'].isin(self.params.labs_dict[k])]
for i in subset.index:
if _timestamp_in_window(i, t, subset, delta):
if k not in self.ONE_HOT_LABS:
chart = _update_frame_with_valuenum(chart, k, t, i, subset)
else:
chart.loc[t, k.lower()] = 1
if k not in self.ONE_HOT_LABS:
chart[k.lower()] = chart[k.lower()].fillna(method='ffill').fillna(value=popmeans[k])
else:
chart[k.lower()] = chart[k.lower()].fillna(method='ffill', limit=24 // delta).fillna(value=0)
return chart
def _add_vent(chart: pd.DataFrame, vent: pd.DataFrame, hadm_id: int, delta: int) -> pd.DataFrame:
print_per_verbose(1, 'Ventilation')
## Again, guessing about the table values. I don't understand the selector code.
df_vent = vent[vent.hadm_id == hadm_id].drop_duplicates()
if df_vent.empty:
chart['vent'] = 0
else:
chart['vent'] = np.nan
for t in chart.timestamp:
# TODO: Hand off to interpolation module
for i in df_adm.index:
if _timestamp_in_window(i, t, df_vent, delta):
chart = _update_frame_with_value(chart, 'vent', t, i, df_vent)
else:
chart.loc[t, 'vent'] = 0
return chart
def _add_meds(chart: pd.DataFrame, meds: pd.DataFrame, hadm_id: int, delta: int, meds_dict: Dict[str, List[int]]) -> pd.DataFrame:
print_per_verbose(1, 'Medication')
df_meds = meds[meds.hadm_id == hadm_id].drop_duplicates()
for k in sorted(list(meds_dict.keys())):
chart[k.lower()] = 0
# if k in ['K-IV', 'K-nonIV', 'Mg-IV', 'Mg-nonIV', 'P-IV', 'P-nonIV']:
# chart['hours-' + k.lower()] = 0
subset = df_meds[df_meds.itemid.isin(self.params.meds_dict[k])]
for t in chart.timestamp:
for i, row in subset.iterrows():
if row.amountuom == 'dose':
continue
scaler = self.UNIT_SCALES.get(row.amountuom, 1)
if row.endtime is np.nan:
chart.loc[t, 'hours-' + k.lower()] = float('nan')
continue
td = pd.to_datetime(row.endtime) - pd.to_datetime(row.starttime)
hours = td.days * 24 + td.seconds / 3600
# medicine administered in this bucket
if ((pd.to_datetime(row.starttime) >= t and pd.to_datetime(row.starttime) < t + dt.timedelta(hours=delta)) and (
pd.to_datetime(row.endtime) > t and pd.to_datetime(row.endtime) < t + dt.timedelta(hours=delta))):
chart.loc[t, k.lower()] += scaler * float(row.amount)
chart.loc[t, 'hours-' + k.lower()] = hours
# medicine is administered before and after this bucket
elif ((pd.to_datetime(row.starttime) <= t) and (pd.to_datetime(row.endtime) > t + dt.timedelta(hours=delta))):
med_amount = scaler * float(row.amount)
med_start = pd.to_datetime(row.starttime)
med_end = pd.to_datetime(row.endtime)
denom = med_end - med_start
denom = denom.days * 24 + denom.seconds / 3600
# Scaling by hours
chart.loc[t, k.lower()] += (delta / denom) * med_amount
chart.loc[t, 'hours-' + k.lower()] = delta
# Starts before the bucket, ends during the bucket
elif ((pd.to_datetime(row.starttime) < t) and (
pd.to_datetime(row.endtime) >= t and pd.to_datetime(row.endtime) < t + dt.timedelta(hours=delta))):
med_amount = scaler * float(row.amount)
med_start = pd.to_datetime(row.starttime)
med_end = pd.to_datetime(row.endtime)
num = med_end - t
num = num.days * 24 + num.seconds / 3600
denom = med_end - med_start
denom = denom.days * 24 + denom.seconds / 3600
chart.loc[t, k.lower()] += (num / denom) * med_amount
chart.loc[t, 'hours-' + k.lower()] = num
# Starts during bucket, ends after bucket
elif ((pd.to_datetime(row.starttime) >= t) and (pd.to_datetime(row.starttime) < t + dt.timedelta(hours=delta)) and (
pd.to_datetime(row.endtime) > t + dt.timedelta(hours=delta))):
med_amount = scaler * float(row.amount)
med_start = | pd.to_datetime(row.starttime) | pandas.to_datetime |
from flask import Flask, render_template, request, send_from_directory, send_file
from pandas import read_excel, DataFrame
import os
import numpy as np
UPLOAD_FOLDER = 'uploads/'
app = Flask("excel-app")
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route("/")
def homepage():
return render_template("home.html")
@app.route("/output", methods=["POST"])
def output():
if request.method == "POST":
if request.form.get("openFile"):
file = request.files["excel_file"]
fileName = file.filename
file.save(os.path.join(app.config['UPLOAD_FOLDER'], fileName))
db = read_excel("./"+UPLOAD_FOLDER+"/"+fileName)
columnNames = db.columns
columnNames = columnNames.to_list()
data = db.values
return render_template("output.html", columnNames=columnNames, data=data, fileName=fileName)
elif request.form.get("newData"):
fileName = request.form.get("fileName")
db = read_excel("./"+UPLOAD_FOLDER+"/"+fileName)
columnNames = db.columns
columnNames = columnNames.to_list()
data = db.values
row = np.array([])
for field in columnNames:
row = np.hstack([row, request.form.get(field)])
data = np.vstack([data,row])
newdb = | DataFrame(data=data, columns=columnNames) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from .._utils import color_digits, color_background
from ..data import Data, DataSamples
#from ..woe import WOE
import pandas as pd
#import math as m
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.colors import LinearSegmentedColormap
import seaborn as sns
from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split, GridSearchCV, PredefinedSplit
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score, roc_curve, auc
#rom scipy.stats import chi2, chisquare, ks_2samp, ttest_ind
#import statsmodels.formula.api as sm
import warnings
from abc import ABCMeta, abstractmethod
#from sklearn.feature_selection import GenericUnivariateSelect, f_classif
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
import re
import ast
import os
import xlsxwriter
from PIL import Image
import datetime
from dateutil.relativedelta import *
import gc
#import weakref
import copy
import itertools
import calendar
#from ..cross import DecisionTree, Crosses
import networkx as nx
from operator import itemgetter
import matplotlib.ticker as mtick
try:
import fastcluster
except Exception:
print('For fullness analysis using hierarchical clustering please install fastcluster package.')
from scipy.cluster.hierarchy import fcluster
try:
import hdbscan
except Exception:
print('For fullness analysis using HDBSCAN clustering please install hdbscan package.')
from sklearn.cluster import KMeans
from sklearn.tree import export_graphviz
from os import system
from IPython.display import Image as Display_Image
#from joblib import Parallel, delayed
# Created by <NAME> and <NAME>
warnings.simplefilter('ignore')
plt.rc('font', family='Verdana')
plt.style.use('seaborn-darkgrid')
pd.set_option('display.precision', 3)
class Processor(metaclass = ABCMeta):
"""
Base class for processing objects of Data class
"""
@abstractmethod
def __init__(self):
'''
self.stats is a DataFrame with statistics about self.work()
'''
self.stats = pd.DataFrame()
@abstractmethod
def work(self, data, parameters):
pass
def param_dict_to_stats(self, data, d):
'''
TECH
Transforms a dict of parameters to self.stats
Parameters
-----------
data: Data object being processed
d: dictionary {action : list_of_features} where action is a string with action description and list_of_features is a list of features' names to apply the action to
'''
col1 = []
col2 = []
for (how, features) in d.items():
col1 = col1 + [how + ' (' + str(round(data.dataframe[features[i]].mean(), 3)) + ')' if how == 'mean' else how for i in range(len(features))]
col2 = col2 + features
self.stats = pd.DataFrame({'action' : col1, 'features': col2})
#---------------------------------------------------------------
class MissingProcessor(Processor):
'''
Class for missing values processing
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, parameters, quantiles=100, precision=4):
'''
Deals with missing values
Parameters:
-----------
data: an object of Data type that should be processed
inplace: whether to change the data or to create a new Data object
parameters: {how_to_process : features_to_process}
how_to_process takes:
'delete' - to delete samples where the value of any feature from features_to_process is missing
'mean' - for each feature from features_to_process to fill missings with the mean value
'distribution' - for each feature from features_to_process to fill missings according to non-missing distribution
a value - for each feature from features_to_process to fill missings with this value
features_to_process takes list of features from data
quantiles: number of quantiles for 'distribution' type of missing process - all values are divided into quantiles,
then missing values are filled with average values of quantiles. If number of unique values is less then number of quantiles
or field type is not int, float, etc, then no quantiles are calculated - missings are filled with existing values according
to their frequency
precision: precision for quantile edges and average quantile values
Returns:
----------
A copy of data with missings processed for features mentioned in parameters
'''
for how in parameters:
if isinstance(parameters[how], str):
parameters[how] = [parameters[how]]
result = data.dataframe.copy()
for how in parameters:
if how == 'delete':
for feature in parameters[how]:
result = result[result[feature].isnull() == False]
if data.features != None and feature in data.features:
data.features.remove(feature)
elif how == 'mean':
for feature in parameters[how]:
result[feature].fillna(result[feature].mean(), inplace = True)
elif how == 'distribution':
for feature in parameters[how]:
if data.dataframe[feature].dtype not in (float, np.float32, np.float64, int, np.int32, np.int64) or data.dataframe[feature].unique().shape[0]<quantiles:
summarized=data.dataframe[[feature]].dropna().groupby(feature).size()
summarized=summarized.reset_index().rename({feature:'mean', 0:'size'}, axis=1)
else:
summarized=data.dataframe[[feature]].rename({feature:'value'}, axis=1).join(pd.qcut(data.dataframe[feature].dropna(), q=quantiles, precision=4, duplicates='drop')).groupby(feature).agg(['mean', 'size'])
summarized.columns=summarized.columns.droplevel()
summarized=summarized.reset_index(drop=True)
#summarized=summarized.reset_index()
summarized['p']=summarized['size']/summarized['size'].sum()
result[feature]=result[feature].apply(lambda x: np.random.choice(summarized['mean'].round(precision), p=summarized['p']) if pd.isnull(x) else x)
else:
result[parameters[how]] = result[parameters[how]].fillna(how)
# statistics added on Dec-04-2018
self.param_dict_to_stats(data, parameters)
return Data(result, data.target, data.features, data.weights, data.name)
#---------------------------------------------------------------
class StabilityAnalyzer(Processor):
'''
For stability analysis
'''
def __init__(self):
self.stats = pd.DataFrame({'sample_name' : [], 'parameter' : [], 'meaning': []})
def work(self, data, time_column, sample_name = None, psi = None, event_rate=None, normalized=True, date_format = "%d.%m.%Y", time_func = (lambda x: 100*x.year + x.month),
yellow_zone = 0.1, red_zone = 0.25, figsize = None, out = True, out_images = 'StabilityAnalyzer/', sep=';', base_period_index=0):
'''
Calculates the dynamic of feature (or groups of values) changes over time so it should be used only for discrete or WOE-transformed
features. There are 2 types of analysis:
PSI. Represents a heatmap (Stability Table) of features stability that contains 3 main zones: green (the feature is
stable), yellow (the feature is not very stable) and red (the feature is unstable). StabilityIndex (PSI) is calculated for each
time period relatively to the first period.
Stability index algorithm:
For each feature value and time period number of samples is calculated: e.g., N[i, t] is number of samples for value i and time period t.
StabilityIndex[t] = (N[i, t]/sum_i(N[i, t]) - (N[i, 0]/sum_i(N[i, 0])))* log(N[i, t]/sum_i(N[i, t])/(N[i, 0]/sum_i(N[i, 0])))
ER (event rate). Calculates average event rate and number of observations for each feature's value over time.
After calculation displays the Stability Table (a heatmap with stability indexes for each feature value and time period)
and Event rate graphs
Parameters:
-----------
data: data to analyze (type Data)
time_column: name of a column with time values to calculate time periods
sample_name: name of sample for report
psi: list of features for PSI analysis (if None then all features from the input Data object will be used)
event_rate: list of features for event rate and distribution in time analysis (if None then all features from the input Data object will be used)
date_format: format of time values in time_column. Codes for format:
%a Weekday as locale’s abbreviated name. Sun, Mon, …, Sat (en_US)
%A Weekday as locale’s full name. Sunday, Monday, …, Saturday (en_US)
%w Weekday as a decimal number, where 0 is Sunday and 6 is Saturday. 0, 1, …, 6
%d Day of the month as a zero-padded decimal number. 01, 02, …, 31
%b Month as locale’s abbreviated name. Jan, Feb, …, Dec (en_US)
%B Month as locale’s full name. January, February, …, December (en_US)
%m Month as a zero-padded decimal number. 01, 02, …, 12
%y Year without century as a zero-padded decimal number. 00, 01, …, 99
%Y Year with century as a decimal number. 1970, 1988, 2001, 2013
%H Hour (24-hour clock) as a zero-padded decimal number. 00, 01, …, 23
%I Hour (12-hour clock) as a zero-padded decimal number. 01, 02, …, 12
%p Locale’s equivalent of either AM or PM. AM, PM (en_US)
%M Minute as a zero-padded decimal number. 00, 01, …, 59
%S Second as a zero-padded decimal number. 00, 01, …, 59
%f Microsecond as a decimal number, zero-padded on the left. 000000, 000001, …, 999999
%z UTC offset in the form +HHMM or -HHMM (empty string if the the
object is naive). (empty), +0000, -0400, +1030
%Z Time zone name (empty string if the object is naive). (empty), UTC, EST, CST
%j Day of the year as a zero-padded decimal number. 001, 002, …, 366
%U Week number of the year (Sunday as the first day of the week)
as a zero padded decimal number. All days in a new year preceding
the first Sunday are considered to be in week 0. 00, 01, …, 53 (6)
%W Week number of the year (Monday as the first day of the week) as
a decimal number. All days in a new year preceding the first
Monday are considered to be in week 0. 00, 01, …, 53 (6)
%c Locale’s appropriate date and time representation. Tue Aug 16 21:30:00 1988 (en_US)
%x Locale’s appropriate date representation. 08/16/88 (None); 08/16/1988 (en_US)
%X Locale’s appropriate time representation. 21:30:00 (en_US)
time_func: function for time_column parsing (changes date to some value, representing time period) or
a period type for dt.to_period() function. Codes for available periods:
B business day frequency
C custom business day frequency (experimental)
D calendar day frequency
W weekly frequency
M month end frequency
BM business month end frequency
CBM custom business month end frequency
MS month start frequency
BMS business month start frequency
CBMS custom business month start frequency
Q quarter end frequency
BQ business quarter endfrequency
QS quarter start frequency
BQS business quarter start frequency
A year end frequency
BA business year end frequency
AS year start frequency
BAS business year start frequency
BH business hour frequency
H hourly frequency
T, min minutely frequency
S secondly frequency
L, ms milliseconds
U, us microseconds
N nanoseconds
yellow_zone: the lower border for the yellow stability zone ('not very stable') in percents of derivation
red_zone: the lower border for the red stability zone ('unstable') in percents of derivation
figsize: matplotlib figsize of the Stability Table
out: a boolean for image output or a path for xlsx output file to export the Stability Tables
out_images: a path for image output (default - StabilityAnalyzer/)
sep: the separator to be used in case of csv export
base_period_index: index of period (starting from 0) for other periods to compare with (0 for the first, -1 for the last)
'''
print('Warning: only for discrete features!!!')
if sample_name is None:
if pd.isnull(data.name):
sample_name = 'sample'
else:
sample_name = data.name
out_images = out_images + sample_name + '/'
self.stats = self.stats.append(pd.DataFrame({'sample_name' : [sample_name], 'parameter' : ['out'], 'meaning' : [out]}))
self.stats = self.stats.append(pd.DataFrame({'sample_name' : [sample_name], 'parameter' : ['out_images'], 'meaning' : [out_images]}))
psi = data.features.copy() if psi is None else [x for x in psi if x in data.features]
event_rate = data.features.copy() if event_rate is None else [x for x in event_rate if x in data.features]
all_features=list(set(psi+event_rate))
if figsize is None:
figsize=(12, max(1,round(len(psi)/2,0)))
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
writer = pd.ExcelWriter(out, engine='openpyxl')
tmp_dataset = data.dataframe[all_features + [time_column, data.target] + ([] if data.weights is None else [data.weights])].copy()
tmp_dataset[time_column] = pd.to_datetime(tmp_dataset[time_column], format=date_format, errors='coerce')
if callable(time_func):
tmp_dataset['tt'] = tmp_dataset[time_column].map(time_func)
elif isinstance(time_func, str):
try:
tmp_dataset['tt'] = tmp_dataset[time_column].dt.to_period(time_func).astype(str)
except Exception:
print('No function or correct period code was provided. Return None.')
return None
c = 0
for feature in sorted(all_features):
print (feature)
if data.weights is not None:
feature_stats=tmp_dataset[[feature, 'tt', data.target, data.weights]]
feature_stats['---weight---']=feature_stats[data.weights]
else:
feature_stats=tmp_dataset[[feature, 'tt', data.target]]
feature_stats['---weight---']=1
feature_stats[data.target]=feature_stats[data.target]*feature_stats['---weight---']
feature_stats=feature_stats[[feature, 'tt', data.target, '---weight---']].groupby([feature, 'tt'], as_index=False).\
agg({'---weight---':'size', data.target:'mean'}).rename({feature:'value', '---weight---':'size', data.target:'mean'}, axis=1)
feature_stats['feature']=feature
if c == 0:
all_stats = feature_stats
c = c+1
else:
all_stats = all_stats.append(feature_stats, ignore_index=True)
all_stats['size']=all_stats['size'].astype(float)
all_stats['mean']=all_stats['mean'].astype(float)
if len(psi)>0:
stability1=all_stats[all_stats.feature.isin(psi)][['feature', 'value', 'tt', 'size']].pivot_table(values='size', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
stability1.columns.name=None
#display(stability1)
dates = stability1.drop(['feature', 'value'], 1).columns.copy()
stability2 = stability1[['feature', 'value']].copy()
for date in dates:
stability2[date] = list(stability1[date]/list(stability1.drop(['value'], 1).groupby(by = 'feature').sum()[date][:1])[0])
#display(stability2)
start_date = dates[base_period_index]
stability3 = stability2[['feature', 'value']]
for date in dates:
stability3[date] = round(((stability2[date]-stability2[start_date])*np.log(stability2[date]/stability2[start_date])).fillna(0), 2).replace([])
#display(stability3)
stability4 = stability3.drop(['value'], 1).groupby(by = 'feature').sum()
#display(stability4)
fig, ax = plt.subplots(figsize = figsize)
ax.set_facecolor("red")
sns.heatmap(stability4, ax=ax, yticklabels=stability4.index, annot = True, cmap = 'RdYlGn_r', center = yellow_zone, vmax = red_zone, linewidths = .05, xticklabels = True)
if out==True or isinstance(out, str):
plt.savefig(out_images+"stability.png", dpi=100, bbox_inches='tight')
plt.show()
if isinstance(out, str):
if out[-4:]=='.xls' or out[-5:]=='.xlsx':
stability4.style.apply(color_background,
mn=0, mx=red_zone, cntr=yellow_zone).to_excel(writer, engine='openpyxl', sheet_name='PSI')
worksheet = writer.sheets['PSI']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
worksheet.freeze_panes = worksheet['B2']
else:
print('Unknown or unacceptable format for export several tables. Use .xlsx. Skipping export.')
if len(event_rate)>0:
for_event_rate=all_stats[all_stats['feature'].isin(event_rate)]
date_base=pd.DataFrame(all_stats['tt'].unique(), columns=['tt']).sort_values('tt')
for feature in sorted(for_event_rate['feature'].unique()):
cur_feature_data=for_event_rate[for_event_rate['feature']==feature].copy()
#display(cur_feature_data)
if normalized:
for tt in sorted(cur_feature_data['tt'].unique(), reverse=True):
cur_feature_data.loc[cur_feature_data['tt']==tt, 'percent']=cur_feature_data[cur_feature_data['tt']==tt]['size']/cur_feature_data[cur_feature_data['tt']==tt]['size'].sum()
#display(cur_feature_data)
fig, ax = plt.subplots(1,1, figsize=(15, 5))
ax2 = ax.twinx()
ax.grid(False)
ax2.grid(False)
sorted_values=sorted(cur_feature_data['value'].unique(), reverse=True)
for value in sorted_values:
to_visualize='percent' if normalized else 'size'
value_filter = (cur_feature_data['value']==value)
er=date_base.merge(cur_feature_data[value_filter], on='tt', how='left')['mean']
height=date_base.merge(cur_feature_data[value_filter], on='tt', how='left')[to_visualize].fillna(0)
bottom=date_base.merge(cur_feature_data[['tt',to_visualize]][cur_feature_data['value']>value].groupby('tt', as_index=False).sum(), on='tt', how='left')[to_visualize].fillna(0)
ax.bar(range(date_base.shape[0]), height, bottom=bottom if value!=sorted_values[0] else None, edgecolor='white', alpha=0.3)
ax2.plot(range(date_base.shape[0]), er, label=str(round(value,3)), linewidth=2)
plt.xticks(range(date_base.shape[0]), date_base['tt'])
fig.autofmt_xdate()
ax2.set_ylabel('Event Rate')
ax2.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.2%}'.format(y)))
if normalized:
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.2%}'.format(y)))
ax2.annotate('Obs:', xy=(0, 1), xycoords=('axes fraction', 'axes fraction'), xytext=(-25, 5), textcoords='offset pixels', color='blue', size=11)
for i in range(date_base.shape[0]):
ax2.annotate(str(int(cur_feature_data[cur_feature_data['tt']==date_base['tt'][i]]['size'].sum())),
xy=(i, 1),
xycoords=('data', 'axes fraction'),
xytext=(0, 5),
textcoords='offset pixels',
#rotation=60,
ha='center',
#va='bottom',
color='blue',
size=11)
ax.set_ylabel('Total obs')
plt.xlabel(time_column)
plt.suptitle(feature + ' event rate in time' if callable(time_func) else feature + ' event rate in time, period = '+time_func)
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles[::-1], labels[::-1], loc=0, fancybox=True, framealpha=0.3)
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+".png", dpi=100, bbox_inches='tight')
plt.show()
if isinstance(out, str):
if out[-4:]=='.xls' or out[-5:]=='.xlsx':
event_rate_df=all_stats[['feature', 'value', 'tt', 'mean']].pivot_table(values='mean', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
event_rate_df.columns.name=None
event_rate_df.style.apply(color_background,
mn=0, mx=all_stats['mean'].mean()+2*all_stats['mean'].std(), cntr=None,
cmap=matplotlib.cm.RdYlGn_r, subset=pd.IndexSlice[:, [x for x in event_rate_df.columns if x not in ['value','feature']]]).to_excel(writer, engine='openpyxl', sheet_name='Event Rate', index=False)
worksheet = writer.sheets['Event Rate']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
if x[0].column!='B':
for cell in worksheet[x[0].column]:
if cell.row!=1:
cell.number_format = '0.000%'
worksheet.freeze_panes = worksheet['C2']
size_df=all_stats[['feature', 'value', 'tt', 'size']].pivot_table(values='size', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
size_df.columns.name=None
size_df.style.apply(color_background,
mn=0, mx=all_stats['size'].mean()+2*all_stats['size'].std(), cntr=None,
cmap=matplotlib.cm.RdYlGn, subset=pd.IndexSlice[:, [x for x in size_df.columns if x not in ['value','feature']]]).to_excel(writer, engine='openpyxl', sheet_name='Observations', index=False)
worksheet = writer.sheets['Observations']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
worksheet.freeze_panes = worksheet['C2']
else:
print('Unknown or unacceptable format for export several tables. Use .xlsx. Skipping export.')
if isinstance(out, str):
writer.close()
#---------------------------------------------------------------
class DataVisualizer(Processor):
'''
Supports different types of data visualization
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, distribution = True, factorplot = True, factorplot_separate = False, pairplot = None,
out=False, out_images='DataVisualizer/', plot_cells=20, categorical=None):
'''
Produces distribution plot, factorplot, pairplot
Parameters:
-----------
data: data to visualize
distribution: parameter for a distribution plot,
if True - plot for data.features, if list - plot for features from the list, if False - do not use distribution plot
factorplot: parameter for a factorplot,
if True - plot for data.features, if list - plot for features from the list, if False - do not use factorplot
factorplot_separate: if True then separate plots for each target value
pairplot: list of features to make a pairplot for
out: a boolean for images output or a path for xlsx output file
out_images: a path for images output (default - DataVisualizer/)
plot_cells: how many cells would plots get in output excel
categorical: a list of features to be treated as categorical (countplots will be produced instead of distplots)
'''
if pairplot is None:
pairplot=[]
if categorical is None:
categorical=[]
dataframe_t = data.dataframe[data.features + [data.target]].copy()
data = Data(dataframe_t, features = data.features, target = data.target)
if out is not None:
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
# Create an new Excel file and add a worksheet.
workbook = xlsxwriter.Workbook(out)
worksheet = workbook.add_worksheet('Data Visualization')
# Widen the first column to make the text clearer.
worksheet.set_column('A:A', 100)
current_plot_number=0
if distribution:
print ('Distributions of features: ')
if type(distribution) == type([1, 1]):
features = distribution
else:
if data.features == None:
print ('No features claimed. Please set data.features = ')
return None
features = data.features
for feature in features:
current_plot_number=current_plot_number+1
if data.dataframe[feature].dtype==object or feature in categorical:
f, axes = plt.subplots()
sns.countplot(data.dataframe[feature].dropna())
f.autofmt_xdate()
else:
sns.distplot(data.dataframe[feature].dropna())
if data.dataframe[feature].isnull().any():
plt.title(feature+' (miss = ' + str(round(data.dataframe[feature].isnull().value_counts()[True]/data.dataframe.shape[0],3))+')')
else:
plt.title(feature+' (miss = 0)')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_d.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_d.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Distribution plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_d.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
print ('---------------------------------------\n')
if factorplot:
print ('Factorplot: ')
if type(factorplot) == type([1, 1]):
features = factorplot
else:
if data.features == None:
print ('No features claimed. Please set data.features = ')
return None
features = data.features
if factorplot_separate:
for feature in features:
current_plot_number=current_plot_number+1
# edited 21-Jun-2018 by <NAME>
f, axes = plt.subplots(data.dataframe[data.target].drop_duplicates().shape[0], 1, figsize=(4, 4), sharex=True)
f.autofmt_xdate()
#for target_v in data.dataframe[data.target].drop_duplicates():
targets = list(data.dataframe[data.target].drop_duplicates())
for target_i in range(len(targets)):
if data.dataframe[data.dataframe[data.target]==targets[target_i]][feature].isnull().any():
x_label=feature + ': ' + data.target + ' = ' + str(targets[target_i]) + ', miss = ' + str(round(data.dataframe[data.dataframe[data.target]==targets[target_i]][feature].isnull().value_counts()[True]/data.dataframe[data.dataframe[data.target]==targets[target_i]].shape[0],3))
else:
x_label=feature + ': ' + data.target + ' = ' + str(targets[target_i]) + ', miss = 0'
if data.dataframe[feature].dtype==object or feature in categorical:
ax=sns.countplot(data.dataframe[data.dataframe[data.target] == targets[target_i]][feature].dropna(),
ax=axes[target_i], color = 'm')
ax.set(xlabel=x_label)
else:
sns.distplot(data.dataframe[data.dataframe[data.target] == targets[target_i]][feature].dropna(),
ax=axes[target_i],
axlabel=x_label, color = 'm')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_f.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_f.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Factor plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_f.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
else:
for feature in features:
current_plot_number=current_plot_number+1
sns.factorplot(x=feature, hue = data.target, data = data.dataframe, kind='count', palette = 'Set1')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_f.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_f.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Factor plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_f.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
print ('---------------------------------------\n')
if pairplot != []:
current_plot_number=current_plot_number+1
print ('Pairplot')
sns.pairplot(data.dataframe[pairplot].dropna())
if out==True or isinstance(out, str):
plt.savefig(out_images+"pairplot.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Pair plot for '+str(pairplot)+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+"pairplot.png")
plt.show()
if isinstance(out, str):
workbook.close()
#---------------------------------------------------------------
class TargetTrendVisualizer(Processor):
'''
Supports target trend visualization
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, features=None, quantiles=100, magnify_trend=False, magnify_std_number=2, hide_every_even_tick_from=50,
min_size=10, out=False, out_images='TargetTrendVisualizer/', plot_cells=20):
'''
Calculates specified quantiles/takes categories, calculates target rates and sizes, then draws target trends
Parameters:
-----------
data: an object of Data type
features: the list of features to visualize, can be omitted
quantiles: number of quantiles to cut feature values on
magnify_trend: if True, then axis scale for target rate will be corrected to exclude outliers
magnify_std_number: how many standard deviations should be included in magnified scale
hide_every_even_tick_from: if there is too many quantiles then every second tick on x axis will be hidden
out: a boolean for images output or a path for xlsx output file
out_images: a path for images output (default - TargetTrendVisualizer/)
plot_cells: how many cells would plots get in output excel
'''
if features is None:
cycle_features=data.features.copy()
else:
cycle_features=features.copy()
if out is not None:
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
# Create an new Excel file and add a worksheet.
workbook = xlsxwriter.Workbook(out)
worksheet = workbook.add_worksheet('Target Trend Visualization')
# Widen the first column to make the text clearer.
worksheet.set_column('A:A', 100)
current_feature_number=0
for f in cycle_features:
if f not in data.dataframe:
print('Feature', f, 'not in input dataframe. Skipping..')
else:
print('Processing', f,'..')
current_feature_number=current_feature_number+1
if data.dataframe[f].dtype not in (float, np.float32, np.float64, int, np.int32, np.int64) or data.dataframe[f].unique().shape[0]<quantiles:
summarized=data.dataframe[[f, data.target]].groupby([f]).agg(['mean', 'size'])
else:
if data.dataframe[f].dropna().shape[0]<min_size*quantiles:
current_quantiles=int(data.dataframe[f].dropna().shape[0]/min_size)
if current_quantiles==0:
print('The number of non-missing observations is less then', min_size,'. No trend to visualize.')
if isinstance(out, str):
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.write((current_feature_number-1)*(plot_cells+1)+1, 0, 'The number of non-missing observations is less then '+str(min_size)+'. No trend to visualize.')
continue
else:
print('Too few non-missing observations for', quantiles, 'quantiles. Calculating', current_quantiles, 'quantiles..')
else:
current_quantiles=quantiles
summarized=data.dataframe[[data.target]].join(pd.qcut(data.dataframe[f], q=current_quantiles, precision=4, duplicates='drop')).groupby([f]).agg(['mean', 'size'])
small_quantiles=summarized[data.target][summarized[data.target]['size']<min_size]['size']
#display(small_quantiles)
if small_quantiles.shape[0]>0:
current_quantiles=int(small_quantiles.sum()/min_size)+summarized[data.target][summarized[data.target]['size']>=min_size].shape[0]
print('There are quantiles with size less then', min_size,'. Attempting', current_quantiles, 'quantiles..')
summarized=data.dataframe[[data.target]].join(pd.qcut(data.dataframe[f], q=current_quantiles, precision=4, duplicates='drop')).groupby([f]).agg(['mean', 'size'])
summarized.columns=summarized.columns.droplevel()
summarized=summarized.reset_index()
if pd.isnull(data.dataframe[f]).any():
with_na=data.dataframe[[f,data.target]][pd.isnull(data.dataframe[f])]
summarized.loc[-1]=[np.nan, with_na[data.target].mean(), with_na.shape[0]]
summarized=summarized.sort_index().reset_index(drop=True)
if summarized.shape[0]==1:
print('Too many observations in one value, so only 1 quantile was created. Increasing quantile number is recommended. No trend to visualize.')
if isinstance(out, str):
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.write((current_feature_number-1)*(plot_cells+1)+1, 0, 'Too many observations in one value, so only 1 quantile was created. Increasing quantile number is recommended. No trend to visualize.')
continue
fig = plt.figure(figsize=(15,5))
ax = fig.add_subplot(111)
ax.set_ylabel('Observations')
# blue is for the distribution
if summarized.shape[0]>hide_every_even_tick_from:
plt.xticks(range(summarized.shape[0]), summarized[f].astype(str), rotation=60, ha="right")
xticks = ax.xaxis.get_major_ticks()
for i in range(len(xticks)):
if i%2==0:
xticks[i].label1.set_visible(False)
else:
plt.xticks(range(summarized.shape[0]), summarized[f].astype(str), rotation=45, ha="right")
ax.bar(range(summarized.shape[0]), summarized['size'], zorder=0, alpha=0.3)
ax.grid(False)
ax.grid(axis='y', zorder=1, alpha=0.6)
ax2 = ax.twinx()
ax2.set_ylabel('Target Rate')
ax2.grid(False)
#display(summarized)
if magnify_trend:
ax2.set_ylim([0, np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))])
for i in range(len(summarized['mean'])):
if summarized['mean'][i]>np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size'])):
ax2.annotate(str(round(summarized['mean'][i],4)),
xy=(i, np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))),
xytext=(i, np.average(summarized['mean'], weights=summarized['size'])+(magnify_std_number+0.05)*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))),
rotation=60,
ha='left',
va='bottom',
color='red',
size=8.5
)
# red is for the target rate values
ax2.plot(range(summarized.shape[0]), summarized['mean'], 'ro-', linewidth=2.0, zorder=4)
if out==True or isinstance(out, str):
plt.savefig(out_images+f+".png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+f+".png").size[1]
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.insert_image((current_feature_number-1)*(plot_cells+1)+1, 0, out_images+f+".png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
if isinstance(out, str):
workbook.close()
class CorrelationAnalyzer(Processor):
'''
Produces correlation analysis
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, drop_features = True, features = None, features_to_leave = None, threshold=0.6, method = 'spearman',
drop_with_most_correlations=True, verbose=False, out_before=None, out_after=None, sep=';', cdict = None):
'''
Calculates the covariance matrix and correlation coefficients for each pair of features.
For each highly correlated pair the algorithm chooses the less significant feature and adds it to the delete list.
Parameters
-----------
data: a Data or DataSamples object to check (in case of DataSamples, train sample will be checked)
drop_features: permission to delete correlated features and return a Data object without them
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be deleted from the feature list
threshold: the lowest value of a correlation coefficient for two features to be considered correlated
method: method for correlation calculation
drop_with_most_correlations: should the features with the highest number of correlations be excluded first (otherwise just with any number of correlations and the lowest gini)
verbose: flag for detailed output
out_before: file name for export of correlation table before feature exclusion (.csv and .xlsx types are supported)
out_after: file name for export of correlation table after feature exclusion (.csv and .xlsx types are supported)
sep: the separator in case of .csv export
Returns
--------
Resulting Data or DataSamples object and the correlation table
'''
if features is None:
features=[]
if features_to_leave is None:
features_to_leave=[]
self.stats = pd.DataFrame({'drop_features' : [drop_features], 'threshold' : [threshold], 'method' : [method], 'out_before' : out_before, 'out_after' : out_after})
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if len(sample.ginis)==0:
print('No calculated ginis in datasamples.train/data object. Set calc_gini=True while using WOE.transform or use Data.calc_gini. Return None')
return None
if features == [] or features is None:
candidates = sample.features.copy()
else:
candidates = features.copy()
features_to_drop = []
correlations = sample.dataframe[candidates].corr(method = method)
cor_out=correlations.copy()
if cdict is None:
cdict = {'red' : ((0.0, 0.9, 0.9),
(0.5, 0.05, 0.05),
(1.0, 0.9, 0.9)),
'green': ((0.0, 0.0, 0.0),
(0.5, 0.8, 0.8),
(1.0, 0.0, 0.0)),
'blue' : ((0.0, 0.1, 0.1),
(0.5, 0.1, 0.1),
(1.0, 0.1, 0.1))}
#edited 21.08.2018 by <NAME> - added verbose variant, optimized feature dropping
# edited on Dec-06-18 by <NAME>: added png
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
if out_before is not None:
out_before_png = 'corr_before.png'
if out_before[-4:]=='.csv':
draw_corr.round(2).to_csv(out_before, sep = sep)
out_before_png = out_before[:-4] + '.png'
elif out_before[-5:]=='.xlsx' or out_before[-4:]=='.xls':
draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2).to_excel(out_before, engine='openpyxl', sheet_name='Correlation (before)')
out_before_png = out_before[:-5] + '.png' if out_before[-5:]=='.xlsx' else out_before[:-4] + '.png'
elif out_before[-4:]=='.png':
out_before_png = out_before
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
fig_before = sns.heatmap(draw_corr.round(2), annot = True, cmap = LinearSegmentedColormap('mycmap', cdict), cbar = False, center = 0, yticklabels = True, xticklabels = True).figure
fig_before.set_size_inches(draw_corr.shape[0]/2, draw_corr.shape[0]/2)
fig_before.savefig(out_before_png, bbox_inches='tight')
plt.close()
self.stats['out_before'] = out_before_png
if verbose:
display(draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2))
to_check_correlation=True
while to_check_correlation:
to_check_correlation=False
corr_number={}
significantly_correlated={}
for var in correlations:
var_corr=correlations[var].apply(lambda x: abs(x))
var_corr=var_corr[(var_corr.index!=var) & (var_corr>threshold)].sort_values(ascending=False).copy()
corr_number[var]=var_corr.shape[0]
significantly_correlated[var]=str(var_corr.index.tolist())
if drop_with_most_correlations:
with_correlation={x:sample.ginis[x] for x in corr_number if corr_number[x]==max({x:corr_number[x] for x in corr_number if x not in features_to_leave}.values()) and corr_number[x]>0 and x not in features_to_leave}
else:
with_correlation={x:sample.ginis[x] for x in corr_number if corr_number[x]>0 and x not in features_to_leave}
if len(with_correlation)>0:
feature_to_drop=min(with_correlation, key=with_correlation.get)
features_to_drop.append(feature_to_drop)
if verbose:
print('Dropping %(v)s because of high correlation with features: %(f)s (Gini=%(g)0.2f)' % {'v':feature_to_drop, 'f':significantly_correlated[feature_to_drop], 'g':with_correlation[feature_to_drop]})
correlations=correlations.drop(feature_to_drop,axis=1).drop(feature_to_drop,axis=0).copy()
to_check_correlation=True
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
out_after_png = 'corr_after.png'
if out_after is not None:
if out_after[-4:]=='.csv':
draw_corr.round(2).to_csv(out_after, sep = sep)
out_after_png = out_after[:-4] + '.png'
elif out_after[-5:]=='.xlsx' or out_after[-4:]=='.xls':
draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2).to_excel(out_after, engine='openpyxl', sheet_name='Correlation (after)')
out_after_png = out_after[:-5] + '.png' if out_after[-5:]=='.xlsx' else out_after[:-4] + '.png'
elif out_after[-4:]=='.png':
out_after_png = out_after
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
#sns.heatmap(draw_corr.round(2), annot = True, cmap = 'RdBu_r', cbar = False, center = 0).figure.savefig(out_after_png, bbox_inches='tight')
fig_after = sns.heatmap(draw_corr.round(2), annot = True, cmap = LinearSegmentedColormap('mycmap', cdict), cbar = False, center = 0, yticklabels = True, xticklabels = True).figure
fig_after.set_size_inches(draw_corr.shape[0]/2, draw_corr.shape[0]/2)
fig_after.savefig(out_after_png, bbox_inches='tight')
plt.close()
if verbose:
display(draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2))
self.stats['out_after'] = out_after_png
result_data = copy.deepcopy(data)
if drop_features:
result_data.features_exclude(features_to_drop, verbose=False)
if verbose:
print('Dropped (if drop_features=True):', features_to_drop)
return result_data, cor_out
def find_correlated_groups(self, data, features = None, features_to_leave = None, threshold=0.6, method = 'spearman',
verbose=False, figsize=(12,12), corr_graph_type='connected'):
'''
Calculates the covariance matrix and correlation coefficients for each pair of features and
returns groups of significantly correlated features
Parameters
-----------
data: a Data or DataSamples object to check (in case of DataSamples it's train sample will be checked)
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be included in analysis
threshold: the lowest value of a correlation coefficient for two features to be considered correlated
method: method for correlation calculation
verbose: flag for detailed output
figsize: the size of correlation connections graph (printed if verbose)
corr_graph_type: type of connectivity to persue in finding groups of correlated features
'connected' - groups are formed from features directly or indirectly connected by singnificant correlation
'complete' - groups are formed from features that are directly connected to each other by significant
correlation (each pair of features from a group will have a significant connection)
Returns
--------
a list of lists representing correlated group
'''
if features is None:
features=[]
if features_to_leave is None:
features_to_leave=[]
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if features == [] or features is None:
candidates = [x for x in sample.features if x not in features_to_leave]
else:
candidates = [x for x in features if x not in features_to_leave]
correlations = sample.dataframe[candidates].corr(method = method)
if verbose:
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
display(draw_corr.round(2).style.applymap(color_digits,threshold_red=threshold))
G=nx.Graph()
for i in range(correlations.shape[0]):
for j in range(i+1, correlations.shape[0]):
if correlations.loc[correlations.columns[i], correlations.columns[j]]>threshold:
G.add_nodes_from([correlations.columns[i], correlations.columns[j]])
G.add_edge(correlations.columns[i], correlations.columns[j], label=str(round(correlations.loc[correlations.columns[i], correlations.columns[j]],3)))
if verbose:
plt.figure(figsize=(figsize[0]*1.2, figsize[1]))
pos = nx.spring_layout(G, k=100)
edge_labels = nx.get_edge_attributes(G,'label')
nx.draw(G, pos, with_labels=True)
nx.draw_networkx_edge_labels(G, pos, edge_labels = edge_labels)
plt.margins(x=0.2)
plt.show()
correlated_groups=[]
if corr_graph_type=='connected':
for x in nx.connected_components(G):
correlated_groups.append(sorted(list(x)))
elif corr_graph_type=='complete':
for x in nx.find_cliques(G):
correlated_groups.append(sorted(x))
else:
print('Unknown correlation graph type. Please use "connected" or "complete". Return None.')
return None
return correlated_groups
#---------------------------------------------------------------
class VIF(Processor):
'''
Calculates variance inflation factor for each feature
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, drop_features = False, features=None, features_to_leave=None, threshold = 5,
drop_with_highest_VIF=True, verbose=True, out=None, sep=';'):
'''
Parameters
-----------
data: a Data or DataSamples object to check VIF on (in case of DataSamples it's train sample will be checked)
drop_features: permition to delete excluded features and return a Data object without them
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be deleted from the feature list
threshold: the lowest value of VIF for feature to be excluded
drop_with_highest_VIF: should the features with the highest VIF be excluded first (otherwise just with the lowest gini)
verbose: flag for detailed output
out: file name for export of VIF values (.csv and .xlsx types are supported)
sep: the separator in case of .csv export
Returns
---------
Data or DataSamples object without excluded features
A pandas DataFrame with VIF values on different iterations
'''
if features_to_leave is None:
features_to_leave=[]
self.stats = pd.DataFrame({'drop_features' : [drop_features], 'threshold' : [threshold], 'out' : [out]})
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if len(sample.ginis)==0:
print('No calculated ginis in datasamples.train/data object. Set calc_gini=True while using WOE.transform or use Data.calc_gini. Return None')
return None
if features is None:
features = sample.features.copy()
features_to_drop = []
to_check_VIF = True
vifs_df=pd.DataFrame(index=features)
iteration=-1
while to_check_VIF:
to_check_VIF = False
iteration=iteration+1
s = sample.target + ' ~ '
for f in features:
s = s + f + '+'
s = s[:-1]
# Break into left and right hand side; y and X
y_, X_ = dmatrices(formula_like=s, data=sample.dataframe, return_type="dataframe")
# For each Xi, calculate VIF
vifs = {features[i-1]:variance_inflation_factor(X_.values, i) for i in range(1, X_.shape[1])}
vifs_df=vifs_df.join(pd.DataFrame(vifs, index=[iteration]).T)
if drop_with_highest_VIF:
with_high_vif={x:sample.ginis[x] for x in vifs if vifs[x]==max({x:vifs[x] for x in vifs if x not in features_to_leave}.values()) and vifs[x]>threshold and x not in features_to_leave}
else:
with_high_vif={x:sample.ginis[x] for x in vifs if vifs[x]>threshold and x not in features_to_leave}
if len(with_high_vif)>0:
feature_to_drop=min(with_high_vif, key=with_high_vif.get)
features_to_drop.append(feature_to_drop)
if verbose:
print('Dropping %(v)s because of high VIF (VIF=%(vi)0.2f, Gini=%(g)0.2f)' % {'v':feature_to_drop, 'vi':vifs[feature_to_drop], 'g':with_high_vif[feature_to_drop]})
features.remove(feature_to_drop)
to_check_VIF=True
result_data = copy.deepcopy(data)
if drop_features:
result_data.features_exclude(features_to_drop, verbose=False)
out_png = 'VIF.png'
if out is not None:
if out[-4:]=='.csv':
vifs_df.round(2).to_csv(out, sep = sep)
out_png = out[:-4] + '.png'
elif out[-5:]=='.xlsx' or out[-4:]=='.xls':
vifs_df.round(2).style.applymap(color_digits, threshold_red=threshold).to_excel(out, engine='openpyxl', sheet_name='Variance Inflation Factor')
out_png = out[:-5] + '.png' if out[-5:]=='.xlsx' else out[:-4] + '.png'
elif out[-4:] == '.png':
out_png = out
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
vif_fig = sns.heatmap(vifs_df.round(2).sort_values(0, ascending = False), xticklabels = False, annot = True,
cmap = 'RdYlBu_r',
cbar = False, vmax = 5, yticklabels = True).figure
vif_fig.set_size_inches(vifs_df.shape[0]/4, vifs_df.shape[0]/2)
vif_fig.savefig(out_png, bbox_inches='tight')
plt.close()
self.stats['out'] = out_png
if verbose:
display(vifs_df.round(2).style.applymap(color_digits, threshold_red=threshold))
print('Dropped (if drop_features=True):', features_to_drop)
return result_data, vifs_df
#---------------------------------------------------------------
class FeatureEncoder(Processor):
'''
For processing non-numeric features
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, how_to_code, inplace = False):
'''
Parameters
-----------
data: data to process, Data type
how_to_code: a dictionary {how: features_list} where 'how' can be 'one_hot' or 'seq'(means 'sequential') and 'features_list' is a list of columns in data to process
inplace: whether to change the data or to create a new Data object
Returns
---------
Data with additional features and dictionary for sequantial encoding
'''
result = data.dataframe.copy()
feature_list = data.features.copy()
d = {}
for how in how_to_code:
if how == 'one_hot':
for feature in how_to_code[how]:
one_hot = pd.get_dummies(result[feature])
one_hot.columns = [feature + '_' + str(c) for c in one_hot.columns]
feature_list = feature_list + list(one_hot.columns)
result = result.join(one_hot)
elif how == 'seq':
for feature in how_to_code[how]:
for (i, j) in enumerate(result[feature].drop_duplicates()):
d[j] = i
result[feature + '_code'] = result[feature].apply(lambda x: d[x])
feature_list = feature_list + [feature + '_code']
else:
print ('Do not understand your command. Please use "one_hot" or "seq" for how_to_code. Good luck.')
return None
self.param_dict_to_stats(data, how_to_code)
# for sequential, saves actual encoding
self.stats.loc[self.stats.action == 'seq', 'action'] = str(d)
if inplace:
data = Data(result, features = feature_list, target = data.target, weights = data.weights)
return d
else:
return Data(result, features = feature_list, target = data.target, weights = data.weights), d
#---------------------------------------------------------------
# Author - <NAME>
class GiniChecker(Processor):
'''
Class for gini checking
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, feature, datasamples, gini_threshold=5, gini_decrease_threshold=0.2, gini_increase_restrict=True, verbose=False, with_test=False,
out=False, out_images='GiniChecker/'):
'''
Checks if gini of the feature is significant and stable enough
Parameters
-----------
feature: an object of FeatureWOE type that should be checked
datasamples: an object of DataSamples type containing the samples to check input feature on
gini_threshold: gini on train and validate/95% bootstrap should be greater then this
gini_decrease_threshold: gini decrease from train to validate/95% bootstrap deviation from mean to mean should be greater then this
gini_increase_restrict: if gini increase should also be restricted
verbose: if comments and graphs should be printed
with_test: add checking of gini values on test (calculation is always on)
out: a boolean for image output or a path for csv/xlsx output file to export gini values
out_images: a path for image output (default - GiniChecker/)
Returns
----------
Boolean - whether the check was successful
and if isinstance(out,str) then dictionary of gini values for all available samples
'''
if out:
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if verbose:
print('Checking', feature.feature)
gini_correct=True
d=feature.transform(datasamples.train, original_values=True)
fpr, tpr, _ = roc_curve(d.dataframe[d.target], -d.dataframe[feature.feature+'_WOE'])
gini_train= (2*auc(fpr, tpr)-1)*100
if verbose:
print('Train gini = '+str(round(gini_train,2)))
if gini_train<gini_threshold:
gini_correct=False
if verbose:
print('Train gini is less then threshold '+str(gini_threshold))
samples=[datasamples.validate, datasamples.test]
sample_names=['Validate', 'Test']
gini_values={'Train':gini_train}
for si in range(len(samples)):
if samples[si] is not None:
d=feature.transform(samples[si], original_values=True)
fpr, tpr, _ = roc_curve(d.dataframe[d.target], -d.dataframe[feature.feature+'_WOE'])
gini = (2*auc(fpr, tpr)-1)*100
gini_values[samples[si].name]=gini
if verbose:
print(samples[si].name+' gini = '+str(round(gini,2)))
if with_test or samples[si].name!='Test':
if gini<gini_threshold:
gini_correct=False
if verbose:
print(samples[si].name+' gini is less then threshold '+str(gini_threshold))
decrease=1-gini/gini_train
if decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini decrease from Train to '+samples[si].name+' is greater then threshold: '+str(round(decrease,5))+' > '+str(gini_decrease_threshold))
if gini_increase_restrict and -decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini increase from Train to '+samples[si].name+' is greater then threshold: '+str(round(-decrease,5))+' > '+str(gini_decrease_threshold))
else:
gini_values[sample_names[si]]=None
gini_list=[]
if datasamples.bootstrap_base is not None:
db=feature.transform(datasamples.bootstrap_base.keep(feature.feature), original_values=True)
for bn in range(len(datasamples.bootstrap)):
d=db.dataframe.iloc[datasamples.bootstrap[bn]]
fpr, tpr, _ = roc_curve(d[db.target], -d[feature.feature+'_WOE'])
roc_auc = auc(fpr, tpr)
gini_list.append(round((roc_auc*2 - 1)*100, 2))
mean=np.mean(gini_list)
std=np.std(gini_list)
if verbose:
sns.distplot(gini_list)
plt.axvline(x=mean, linestyle='--', alpha=0.5)
plt.text(mean, 0, ' Mean = '+str(round(mean,2))+', std = '+str(round(std,2)),
horizontalalignment='right', verticalalignment='bottom', rotation=90)
plt.xlabel('Gini values in bootstrap')
plt.ylabel('Distribution')
plt.title(feature.feature, fontsize = 16)
if out:
plt.savefig(out_images+feature.feature+".png", dpi=100, bbox_inches='tight')
plt.show()
if mean-1.96*std<gini_threshold:
gini_correct=False
if verbose:
print('Less then 95% of gini distribution is greater then threshold: (mean-1.96*std) '+str(round(mean-1.96*std,5))+' < '+str(gini_threshold))
val_decrease=1.96*std/mean
if val_decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini deviation from mean for 95% of distribution is greater then threshold: (1.96*std/mean) '+str(round(val_decrease,5))+' > '+str(gini_decrease_threshold))
if isinstance(out, str):
gini_values.update({'Bootstrap'+str(i):gini_list[i] for i in range(len(gini_list))})
return gini_correct, gini_values
else:
return gini_correct
#added 13.08.2018 by <NAME>
def work_all(self, woe, features=None, drop_features=False, gini_threshold=5, gini_decrease_threshold=0.2,
gini_increase_restrict=True, verbose=False, with_test=False, out=False, out_images='GiniChecker/', sep=';'):
'''
Checks if gini of all features from WOE object is significant and stable enough
Parameters
-----------
woe: an object of WOE type that should be checked
drop_features: should the features be dropped from WOE.feature_woes list in case of failed checks
gini_threshold: gini on train and validate/95% bootstrap should be greater then this
gini_decrease_threshold: gini decrease from train to validate/95% bootstrap deviation from mean to mean should be greater then this
gini_increase_restrict: if gini increase should also be restricted
verbose: if comments and graphs should be printed
with_test: add checking of gini values on test (calculation is always on)
out: a boolean for image output or a path for csv/xlsx output file to export gini values
out_images: a path for image output (default - GiniChecker/)
sep: the separator to be used in case of csv export
Returns
----------
Dictionary with results of check for all features from input WOE object
'''
if features is None:
cycle_features=list(woe.feature_woes)
else:
cycle_features=list(features)
not_in_features_woe=[x for x in cycle_features if x not in woe.feature_woes]
if len(not_in_features_woe)>0:
print('No', not_in_features_woe, 'in WOE.feature_woes. Abort.')
return None
if out:
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
gini_correct={}
if isinstance(out, str):
gini_df=pd.DataFrame(columns=['Train', 'Validate', 'Test']+['Bootstrap'+str(i) for i in range(len(woe.datasamples.bootstrap))])
for feature in cycle_features:
if isinstance(out, str):
gini_correct[feature], gini_values=self.work(woe.feature_woes[feature], datasamples=woe.datasamples, gini_threshold=gini_threshold,
gini_decrease_threshold=gini_decrease_threshold,
gini_increase_restrict=gini_increase_restrict, verbose=verbose, with_test=with_test,
out=out, out_images=out_images)
#print(feature, gini_values)
gini_df=gini_df.append(pd.DataFrame(gini_values, index=[feature]))
else:
gini_correct[feature]=self.work(woe.feature_woes[feature], datasamples=woe.datasamples, gini_threshold=gini_threshold,
gini_decrease_threshold=gini_decrease_threshold,
gini_increase_restrict=gini_increase_restrict, verbose=verbose, with_test=with_test,
out=out, out_images=out_images)
if isinstance(out, str):
gini_df=gini_df[['Train', 'Validate', 'Test']+['Bootstrap'+str(i) for i in range(len(woe.datasamples.bootstrap))]].dropna(axis=1)
if out[-4:]=='.csv':
gini_df.to_csv(out, sep = sep)
elif out[-4:]=='.xls' or out[-5:]=='.xlsx':
writer = pd.ExcelWriter(out, engine='openpyxl')
gini_df.style.apply(color_background,
mn=gini_df.min().min(), mx=gini_df.max().max(), cmap='RdYlGn').to_excel(writer, sheet_name='Gini by Samples')
# Get the openpyxl objects from the dataframe writer object.
worksheet = writer.sheets['Gini by Samples']
for x in worksheet.columns:
worksheet.column_dimensions[x[0].column].width = 40 if x[0].column=='A' else 12
writer.save()
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
if drop_features:
woe.excluded_feature_woes.update({x:woe.feature_woes[x] for x in woe.feature_woes if gini_correct[x]==False})
woe.feature_woes={x:woe.feature_woes[x] for x in woe.feature_woes if gini_correct[x]}
return gini_correct
def work_tree(self, dtree, input_df=None, gini_threshold=5, gini_decrease_threshold=0.2, gini_increase_restrict=True,
verbose=False, with_test=False, out=False):
'''
Checks if gini of the tree is significant and stable enough
Parameters
-----------
dtree: a cross.DecisionTree object
input_df: a DataFrame, containing tree description
datasamples: an object of DataSamples type containing the samples to check input tree on
gini_threshold: gini on train and validate/95% bootstrap should be greater then this
gini_decrease_threshold: gini decrease from train to validate/95% bootstrap deviation from mean to mean should be greater then this
gini_increase_restrict: if gini increase should also be restricted
verbose: if comments and graphs should be printed
with_test: add checking of gini values on test (calculation is always on)
out: a boolean flag for gini values output
Returns
----------
Boolean - whether the check was successful
and if out==True then dictionary of gini values for all available samples
'''
if input_df is None:
tree_df=dtree.tree.copy()
else:
tree_df=input_df.copy()
datasamples=dtree.datasamples
features=[x for x in dtree.features if x in tree_df]
#[x for x in tree_df.columns[:tree_df.columns.get_loc('node')] if tree_df[x].dropna().shape[0]>0]
if verbose:
print('Checking tree on', str(features))
gini_correct=True
samples=[datasamples.train, datasamples.validate, datasamples.test]
sample_names=['Train', 'Validate', 'Test']
gini_values={}
for si in range(len(samples)):
if samples[si] is not None:
to_check=samples[si].keep(features=features).dataframe
to_check['woe']=dtree.transform(to_check, tree_df, ret_values=['woe'])
fpr, tpr, _ = roc_curve(to_check[samples[si].target], -to_check['woe'])
gini = (2*auc(fpr, tpr)-1)*100
gini_values[samples[si].name]=gini
if verbose:
print(samples[si].name+' gini = '+str(round(gini,2)))
if with_test or samples[si].name!='Test':
if gini<gini_threshold:
gini_correct=False
if verbose:
print(samples[si].name+' gini is less then threshold '+str(gini_threshold))
if samples[si].name!='Train':
decrease=1-gini/gini_values['Train']
if decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini decrease from Train to '+samples[si].name+' is greater then threshold: '+str(round(decrease,5))+' > '+str(gini_decrease_threshold))
if gini_increase_restrict and -decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini increase from Train to '+samples[si].name+' is greater then threshold: '+str(round(-decrease,5))+' > '+str(gini_decrease_threshold))
else:
gini_values[sample_names[si]]=None
gini_list=[]
if datasamples.bootstrap_base is not None:
base_with_woe=datasamples.bootstrap_base.keep(features=features).dataframe
base_with_woe['woe']=dtree.transform(base_with_woe, tree_df, ret_values=['woe'])
for bn in range(len(datasamples.bootstrap)):
to_check=base_with_woe.iloc[datasamples.bootstrap[bn]]
fpr, tpr, _ = roc_curve(to_check[datasamples.bootstrap_base.target], -to_check['woe'])
roc_auc = auc(fpr, tpr)
gini_list.append(round((roc_auc*2 - 1)*100, 2))
mean=np.mean(gini_list)
std=np.std(gini_list)
if verbose>True:
sns.distplot(gini_list)
plt.axvline(x=mean, linestyle='--', alpha=0.5)
plt.text(mean, 0, ' Mean = '+str(round(mean,2))+', std = '+str(round(std,2)),
horizontalalignment='right', verticalalignment='bottom', rotation=90)
plt.xlabel('Gini values in bootstrap')
plt.ylabel('Distribution')
plt.title('Tree on '+str(features), fontsize = 16)
plt.show()
elif verbose:
print('Bootstrap: mean = '+str(round(mean,2))+', std = '+str(round(std,2)))
if mean-1.96*std<gini_threshold:
gini_correct=False
if verbose:
print('Less then 95% of gini distribution is greater then threshold: (mean-1.96*std) '+str(round(mean-1.96*std,5))+' < '+str(gini_threshold))
val_decrease=1.96*std/mean
if val_decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini deviation from mean for 95% of distribution is greater then threshold: (1.96*std/mean) '+str(round(val_decrease,5))+' > '+str(gini_decrease_threshold))
if out:
gini_values.update({'Bootstrap'+str(i):gini_list[i] for i in range(len(gini_list))})
return gini_correct, gini_values
else:
return gini_correct
#---------------------------------------------------------------
# Author - <NAME>
class BusinessLogicChecker(Processor):
'''
Class for business logic checking
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, feature, conditions='', verbose=False, out=None):
'''
Checks if the business logic condition is True
Parameters
-----------
feature: an object of FeatureWOE type that should be checked
conditions: a string with business logic conditions
for feature.categorical==True: 'cond_1;cond_2;...;cond_n', where cond_i
is 'A sign B', where A and B
are comma-separated lists of values (or nothing, but not both at the same time)
and where sign
is one of the following: <, >, =, <=, >=
each condition compares risk of bins with values from A to risk of bins with values from B (if B is omitted,
then risk of bins with values from A is compared to risk of bins with values not in A);
> means that risk of the second values group is smaller then the risk of the first values group (and values from
different groups cannot be in one bin), < means the opposite (again, values from different groups cannot be in one
bin), adding = allows values from different groups to be in one bin;
ALL of the conditions should be True or conditions should be empty for the input feature to pass the check
-----------------------------------------------------------------------------------------------------------
for feature.categorical==False:'cond_1;cond_2;....;cond_n (excl_1;...;excl_n)', where cond_i
is 'sign_1 value_2 sign_2 value_3 sign_3 ... value_n sign_n', where sign_i
is one of the following: <, >
and where value_i
is a float/int and can be omitted
and where excl_i
is a float/int and can be omitted (if there is not excl_i at all, then parentheses can be omitted too)
each condition describes how should risk be changing when feature values are increasing;
> means that risk will be monotonicaly decreasing with increase of values, < means the opposite, >< means that
risk decrease and then increase, adding value between signs tells, that in the bin with this value should be
the local risk extremum (>N< means that the bin with N in it should have the least risk);
adding values in () will result in exclusion of bins with these values before risk trend checking (and so bins
with these values are ignored);
each condition should start with a sign and end with a sign, one sign is permitter, values between signs
can be omitted;
ANY one of the conditions should be True for the input feature to pass the check
in case of conditions==None or conditions=='' checker wil return True is risk trend is monotonicaly
increasing/decresing (the same check will be processed if only values to exclude are provided)
verbose: if comments and graphs should be printed
out: a path for csv/xlsx output file to export business logic check results
Returns
----------
Boolean - whether the check was successful
and if out is not None then dataframe of check log
'''
if out is not None:
out_df=pd.DataFrame(columns=['feature', 'categorical', 'condition', 'fact', 'condition_result'])
if feature.categorical == False:
woes_dropna={feature.groups[x][0]:feature.woes[x] for x in feature.woes if isinstance(feature.groups[x],list)}
groups_info=pd.DataFrame(woes_dropna, index=['woe']).transpose().reset_index().rename({'index':'lower'}, axis=1)
groups_info['upper']=groups_info['lower'].shift(-1).fillna(np.inf)
if groups_info.shape[0]==1:
if verbose:
print('Only 1 group with non-missing values is present. Skipping trend check..')
all_cond_correct=True
else:
all_cond_correct=False
for c in conditions.split(';'):
#find all floats/ints between > and < - minimal risk
#first there should be >, then + or - or nothing, then at least one digit, then . or , or nothing, then zero or more digits and < after that
min_risk = re.findall('(?<=>)[-+]?\d+[.,]?\d*(?=<)', c)
#find all floats/ints between < and > - maximal risk
max_risk = re.findall('(?<=<)[-+]?\d+[.,]?\d*(?=>)', c)
#find all floats/ints between ( and ), ( and ; or ; and ) - values to exclude (without risk checking)
excl_risk = re.findall('(?<=[(;])[-+]?\d+[.,]?\d*(?=[;)])', c)
clear_condition=''.join(x for x in c if x in '<>')
gi_check=groups_info.dropna(how='all', subset=['lower','upper'])[['woe','lower','upper']].copy()
for excl in excl_risk:
gi_check=gi_check[((gi_check['lower']<=float(excl)) & (gi_check['upper']>float(excl)))==False]
gi_check['risk_trend']=np.sign((gi_check['woe']-gi_check['woe'].shift(1)).dropna()).apply(lambda x: '+' if (x<0) else '-' if (x>0) else '0')
trend=gi_check['risk_trend'].str.cat()
reg_exp=r''
for s in clear_condition:
if s=='>':
reg_exp=reg_exp+r'-+'
if s=='<':
reg_exp=reg_exp+r'\++'
if len(reg_exp)==0:
reg_exp='-*|\+*'
if re.fullmatch(reg_exp, trend):
trend_correct=True
if verbose:
print('Risk trend in data is consistent with input trend: input ', clear_condition, ', data ', trend)
else:
trend_correct=False
if verbose:
print('Risk trend in data is not consistent with input trend: input ', clear_condition, ', data ', trend)
#local risk minimums
min_risk_data=gi_check[(gi_check['risk_trend']=='-') & (gi_check['risk_trend'].shift(-1)=='+')].reset_index(drop=True)
min_risk_correct=True
for mr in range(len(min_risk)):
if mr+1<=min_risk_data.shape[0]:
if verbose:
print(feature.feature+': checking min risk in', min_risk[mr], '(between ', min_risk_data['lower'].loc[mr], ' and ', min_risk_data['upper'].loc[mr], ')')
min_risk_correct=min_risk_correct and (float(min_risk[mr])>=min_risk_data['lower'].loc[mr] and float(min_risk[mr])<min_risk_data['upper'].loc[mr])
else:
if verbose:
print(feature.feature+': not enough minimums in data to check', min_risk[mr])
min_risk_correct=False
#local risk maximums
max_risk_data=gi_check[(gi_check['risk_trend']=='+') & (gi_check['risk_trend'].shift(-1)=='-')].reset_index(drop=True)
max_risk_correct=True
for mr in range(len(max_risk)):
if mr+1<=max_risk_data.shape[0]:
if verbose:
print(feature.feature+': checking max risk in', max_risk[mr], '(between ', max_risk_data['lower'].loc[mr], ' and ', max_risk_data['upper'].loc[mr], ')')
max_risk_correct=max_risk_correct and (float(max_risk[mr])>=max_risk_data['lower'].loc[mr] and float(max_risk[mr])<max_risk_data['upper'].loc[mr])
else:
if verbose:
print(feature.feature+': not enough maximums in data to check', max_risk[mr])
min_risk_correct=False
all_cond_correct=all_cond_correct or (trend_correct and min_risk_correct and max_risk_correct)
if out is not None:
out_df=out_df.append(dict(feature=feature.feature, categorical=feature.categorical, condition=c, fact=trend, condition_result=trend_correct and min_risk_correct and max_risk_correct), ignore_index=True)
if verbose:
if all_cond_correct:
print(feature.feature+': business logic check succeeded.')
else:
fig=plt.figure(figsize=(5,0.5))
plt.plot(range(len(groups_info.dropna(how='all', subset=['lower','upper'])['lower'])),
groups_info.dropna(how='all', subset=['lower','upper'])['woe'], color='red')
plt.xticks(range(len(groups_info.dropna(how='all', subset=['lower','upper'])['lower'])),
round(groups_info.dropna(how='all', subset=['lower','upper'])['lower'],3))
plt.ylabel('WoE')
fig.autofmt_xdate()
plt.show()
print(feature.feature+': business logic check failed.')
if out is not None:
return all_cond_correct, out_df[['feature', 'categorical', 'condition', 'fact', 'condition_result']]
else:
return all_cond_correct
else:
all_cond_correct=True
if conditions!='':
w={}
for x in feature.groups:
for y in feature.groups[x]:
w[y]=feature.woes[x]
groups_info=pd.DataFrame(w, index=['woe']).transpose().reset_index().rename({'index':'categories'}, axis=1)
groups_info=groups_info[groups_info['categories']!=-np.inf].reset_index(drop=True).copy()
cond_types2=['>=','=>','<=','=<']
cond_types1=['>','<','=']
for c in conditions.split(';'):
c0=[]
c1=[]
cond_type=[x for x in cond_types2 if x in c]
if len(cond_type)==0:
cond_type=[x for x in cond_types1 if x in c]
cond_type=cond_type[0]
if cond_type in ['>=', '=>', '>']:
c0=ast.literal_eval('['+c[:c.find(cond_type)]+']')
c1=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
elif cond_type in ['<=', '=<', '<']:
c0=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
c1=ast.literal_eval('['+c[:c.find(cond_type)]+']')
elif cond_type=='=':
c0=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
c1=ast.literal_eval('['+c[:c.find(cond_type)]+']')
can_be_equal=('=' in cond_type)
groups_info['risk_group']=groups_info['categories'].apply(lambda x: 0 if (x in c0 or (len(c0)==0 and x not in c1)) else 1 if (x in c1 or (len(c1)==0 and x not in c0)) else np.nan)
cond_correct = (cond_type!='=' and groups_info[groups_info['risk_group']==0]['woe'].max()<groups_info[groups_info['risk_group']==1]['woe'].min()) or (can_be_equal and (groups_info[groups_info['risk_group']==0]['woe'].max()==groups_info[groups_info['risk_group']==1]['woe'].min() or c0==c1))
all_cond_correct=all_cond_correct and cond_correct
if verbose:
print(feature.feature+': checking condition '+ c + ' => ' + str(cond_correct))
if out is not None:
out_df=out_df.append(dict(feature=feature.feature, categorical=feature.categorical, condition=c, fact='', condition_result=cond_correct), ignore_index=True)
if verbose:
print(feature.feature+': conditions ' + conditions + ' => ' + str(all_cond_correct))
else:
if verbose:
print(feature.feature+': no conditions were specified, business logic check succeeded.')
if out is not None:
return all_cond_correct, out_df[['feature', 'categorical', 'condition', 'fact', 'condition_result']]
else:
return all_cond_correct
#added 13.08.2018 by <NAME>
def work_all(self, woe, features=None, input_conditions=None, drop_features=False, verbose=False, out=None, sep=';'):
'''
Checks if business logic conditions for all features from the WOE object are True
Parameters
-----------
woe: an object of FeatureWOE type that should be checked
input_conditions: adress for excel-file with business logic conditions (columns 'variable' and 'condition' are mandatory)
drop_features: should the features be dropped from WOE.feature_woes list in case of failed checks
verbose: if comments and graphs should be printed
out: a path for csv/xlsx output file to export business logic check results
sep: the separator to be used in case of csv export
Returns
----------
Dictionary with results of check for all features from input WOE object
'''
if out is not None:
out_df=pd.DataFrame(columns=['feature', 'categorical', 'condition', 'fact', 'condition_result', 'overall_result'])
if features is None:
cycle_features=list(woe.feature_woes)
else:
cycle_features=list(features)
not_in_features_woe=[x for x in cycle_features if x not in woe.feature_woes]
if len(not_in_features_woe)>0:
print('No', not_in_features_woe, 'in self.feature_woes. Abort.')
return None
business_logic_correct={}
'''
if conditions_dict is not None:
if isinstance(conditions_dict, dict):
conditions_dict=pd.DataFrame(conditions_dict, index=['conditions']).T
elif isinstance(conditions_dict, str) and (conditions_dict[-5:]=='.xlsx' or conditions_dict[-4:]=='.xls'):
try:
conditions=pd.read_excel(conditions_dict).set_index('variable')
conditions['conditions']=conditions['conditions'].apply(lambda x: '' if (pd.isnull(x)) else x)
except Exception:
print('No conditions dictionary was found / no "variable" or "conditions" fields were found. Abort.')
return None
elif isinstance(conditions_dict, str):
conditions_dict=pd.DataFrame({x:conditions_dict for x in cycle_features},
index=['conditions']).T
else:
conditions=pd.DataFrame()
'''
if input_conditions is None:
conditions_dict=pd.DataFrame(columns=['feature', 'conditions'])
elif isinstance(input_conditions, dict) or isinstance(input_conditions, pd.DataFrame):
conditions_dict=input_conditions.copy()
elif isinstance(input_conditions, str):
if input_conditions[-4:]=='.csv':
conditions_dict=pd.read_csv(input_conditions, sep = sep)
elif input_conditions[-4:]=='.xls' or input_conditions[-5:]=='.xlsx':
conditions_dict=pd.read_excel(input_conditions)
else:
print('Unknown format for path to conditions dictionary file. Return None.')
elif isinstance(input_conditions, tuple):
conditions_dict={x:input_conditions[0] if x not in woe.categorical else input_conditions[1] for x in cycle_features}
else:
print('Unknown format for conditions dictionary file. Return None')
return None
if isinstance(conditions_dict, pd.DataFrame):
for v in ['feature', 'variable', 'var']:
if v in conditions_dict:
break
try:
conditions_dict=dict(conditions_dict.fillna('').set_index(v)['conditions'])
except Exception:
print("No 'feature' ,'variable', 'var' or 'conditions' field in input pandas.DataFrame. Return None.")
return None
for feature in cycle_features:
if feature not in conditions_dict:
current_conditions=''
else:
current_conditions=conditions_dict[feature]
if out is not None:
business_logic_correct[feature], out_feature_df=self.work(woe.feature_woes[feature], conditions=current_conditions, verbose=verbose, out=out)
out_feature_df['overall_result']=business_logic_correct[feature]
out_df=out_df.append(out_feature_df, ignore_index=True)
else:
business_logic_correct[feature]=self.work(woe.feature_woes[feature], conditions=current_conditions, verbose=verbose, out=out)
if drop_features:
woe.excluded_feature_woes.update({x:woe.feature_woes[x] for x in woe.feature_woes if business_logic_correct[x]==False})
woe.feature_woes={x:woe.feature_woes[x] for x in woe.feature_woes if business_logic_correct[x]}
if out is not None:
out_df=out_df[['feature', 'categorical', 'condition', 'fact', 'condition_result', 'overall_result']]
#display(out_df)
if out[-4:]=='.csv':
out_df.to_csv(out, sep = sep)
elif out[-4:]=='.xls' or out[-5:]=='.xlsx':
writer = pd.ExcelWriter(out, engine='openpyxl')
out_df.style.apply(self.color_result, subset=pd.IndexSlice[:,['condition_result', 'overall_result']]).to_excel(writer, sheet_name='Business Logic', index=False)
# Get the openpyxl objects from the dataframe writer object.
worksheet = writer.sheets['Business Logic']
for x in worksheet.columns:
worksheet.column_dimensions[x[0].column].width = 40 if x[0].column=='A' else 20
writer.save()
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
return business_logic_correct
def work_tree(self, dtree, input_df=None, input_conditions=None, max_corrections=None, sep=';', to_correct=False, verbose=False):
'''
Checks if the business logic conditions are True in every node of the input tree and corrects the tree for it to pass the check
Parameters
-----------
dtree: a cross.DecisionTree object to check
input_df: a DataFrame, containing tree description
input_conditions: a DataFrame, a dictionary or a string with a path to conditions dictionary (in case of DataFrame or string
the field with features' names should be called 'feature', 'variable' or 'var')
for categorical features: 'cond_1;cond_2;...;cond_n', where cond_i
is 'A sign B', where A and B
are comma-separated lists of values (or nothing, but not both at the same time)
and where sign
is one of the following: <, >, =, <=, >=
each condition compares risk of bins with values from A to risk of bins with values from B (if B is omitted,
then risk of bins with values from A is compared to risk of bins with values not in A);
> means that risk of the second values group is smaller then the risk of the first values group (and values from
different groups cannot be in one bin), < means the opposite (again, values from different groups cannot be in one
bin), adding = allows values from different groups to be in one bin;
ALL of the conditions should be True or conditions should be empty for the input feature to pass the check
-----------------------------------------------------------------------------------------------------------
for interval features:'cond_1;cond_2;....;cond_n (excl_1;...;excl_n)', where cond_i
is 'sign_1 sign_2 sign_3 ... sign_n', where sign_i
is one of the following: <, >
and where excl_i
is a float/int and can be omitted (if there is not excl_i at all, then parentheses can be omitted too)
each condition describes how should risk be changing when feature values are increasing;
> means that risk will be monotonicaly decreasing with increase of values, < means the opposite, >< means that
risk decrease and then increase, values between signs will be ignored because for most of nodes entire sample won't be
available for division and extremum values' absense or the presence of new local extremums should not be prohibited;
adding values in () will result in exclusion of bins with these values before risk trend checking (and so bins
with these values are ignored);
each condition should start with a sign and end with a sign, one sign is permitted;
ANY one of the conditions should be True for the input feature to pass the check
in case of conditions==None or conditions=='' checker wil return True is risk trend is monotonicaly
increasing/decresing (the same check will be processed if only values to exclude are provided)
max_corrections: maximal number of corrections in attempt to change the tree so it will pass the check
sep: a separator in case of csv import for conditions dictionary
to_correct: should there be attempts to correct tree by uniting nodes or not
verbose: if comments and graphs should be printed
Returns
----------
if to_correct:
True and a DataFrame with tree description - corrected or initial
else:
result of the input tree check and the input tree itself
'''
#-----------------------------------------------Subsidiary functions--------------------------------------------------
def bl_check_categorical(df, conditions, verbose=False, missing_group_is_correct=True):
'''
TECH
Check correctness of conditions for a categorical feature
Parameters
-----------
df: a DataFrame, containing lists of categories and WoE values
conditions: a string, containing business logic conditions for a feature
verbose: if comments should be printed
missing_group_is_correct: should missing of any value from condition in input data be considered as
successful check or not
Returns
----------
boolean flag of successful check
'''
all_cond_correct=True
if conditions!='':
tree_df=df.copy()
#display(tree_df)
cat_woes=[]
for i in tree_df.index:
categories, n, w = tree_df.loc[i]
#display(tree_df.loc[i])
#display(categories)
for c in categories:
cat_woes.append([c, n, w])
groups_info=pd.DataFrame(cat_woes, columns=['categories', 'nodes', 'woe'])
#display(groups_info)
cond_types2=['>=','=>','<=','=<']
cond_types1=['>','<','=']
for c in conditions.split(';'):
c0=[]
c1=[]
cond_type=[x for x in cond_types2 if x in c]
if len(cond_type)==0:
cond_type=[x for x in cond_types1 if x in c]
cond_type=cond_type[0]
if cond_type in ['>=', '=>', '>']:
c0=ast.literal_eval('['+c[:c.find(cond_type)]+']')
c1=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
elif cond_type in ['<=', '=<', '<']:
c0=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
c1=ast.literal_eval('['+c[:c.find(cond_type)]+']')
elif cond_type=='=':
c0=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
c1=ast.literal_eval('['+c[:c.find(cond_type)]+']')
can_be_equal=('=' in cond_type)
groups_info['risk_group']=groups_info['categories'].apply(lambda x: 0 if (x in c0 or (len(c0)==0 and x not in c1)) else 1 if (x in c1 or (len(c1)==0 and x not in c0)) else np.nan)
cond_correct = (cond_type!='=' and groups_info[groups_info['risk_group']==0]['woe'].max()<groups_info[groups_info['risk_group']==1]['woe'].min()) or \
(can_be_equal and (groups_info[groups_info['risk_group']==0]['woe'].max()==groups_info[groups_info['risk_group']==1]['woe'].min() or c0==c1)) or \
(missing_group_is_correct and len(groups_info['risk_group'].dropna().unique())<2)
all_cond_correct=all_cond_correct and cond_correct
if verbose:
print('\tChecking condition '+ c + ' => ' + str(cond_correct))
if verbose:
print('\tConditions ' + conditions + ' => ' + str(all_cond_correct))
elif verbose:
print('\tNo conditions were specified, business logic check succeeded.')
return all_cond_correct
def bl_check_interval(df, conditions, verbose=False):
'''
TECH
Check correctness of conditions for an interval feature
Parameters
-----------
df: a DataFrame, containing intervals' descriptions and WoE values
conditions: a string, containing business logic conditions for a feature
verbose: if comments should be printed
Returns
----------
boolean flag of successful check
'''
tree_df=df.copy()
split_feature=tree_df.columns[0]
groups_info=tree_df[pd.isnull(tree_df[split_feature])==False]
groups_info['upper']=groups_info[split_feature].apply(lambda x: x[0][1] if pd.isnull(x[1]) else x[1])
groups_info['lower']=groups_info[split_feature].apply(lambda x: x[0][0] if pd.isnull(x[1]) else x[0])
#display(groups_info)
if groups_info.shape[0]==1:
if verbose:
print('\tOnly 1 group with non-missing values is present. Skipping trend check..')
all_cond_correct=True
else:
all_cond_correct=False
for c in conditions.split(';'):
#find all floats/ints between > and < - minimal risk
#first there should be >, then + or - or nothing, then at least one digit, then . or , or nothing, then zero or more digits and < after that
#min_risk = re.findall('(?<=>)[-+]?\d+[.,]?\d*(?=<)', c)
#find all floats/ints between < and > - maximal risk
#max_risk = re.findall('(?<=<)[-+]?\d+[.,]?\d*(?=>)', c)
#find all floats/ints between ( and ), ( and ; or ; and ) - values to exclude (without risk checking)
excl_risk = re.findall('(?<=[(;])[-+]?\d+[.,]?\d*(?=[;)])', c)
clear_condition=''.join(x for x in c if x in '<>')
gi_check=groups_info.dropna(how='all', subset=['lower','upper'])[['woe','lower','upper']].copy()
for excl in excl_risk:
gi_check=gi_check[((gi_check['lower']<=float(excl)) & (gi_check['upper']>float(excl)))==False]
gi_check['risk_trend']=np.sign((gi_check['woe']-gi_check['woe'].shift(1)).dropna()).apply(lambda x: '+' if (x<0) else '-' if (x>0) else '0')
trend=gi_check['risk_trend'].str.cat()
reg_exp=r''
for s in clear_condition:
if s=='>':
reg_exp=reg_exp+r'-+'
if s=='<':
reg_exp=reg_exp+r'\++'
if len(reg_exp)==0:
reg_exp='-*|\+*'
if re.fullmatch(reg_exp, trend):
trend_correct=True
if verbose:
print('\tRisk trend in data is consistent with input trend: input ', clear_condition, ', data ', trend)
else:
trend_correct=False
if verbose:
print('\tRisk trend in data is not consistent with input trend: input ', clear_condition, ', data ', trend)
'''#local risk minimums
min_risk_data=gi_check[(gi_check['risk_trend']=='-') & (gi_check['risk_trend'].shift(-1)=='+')].reset_index(drop=True)
min_risk_correct=True
for mr in range(len(min_risk)):
if mr+1<=min_risk_data.shape[0]:
if verbose:
print('\tChecking min risk in', min_risk[mr], '(between ', min_risk_data['lower'].loc[mr], ' and ', min_risk_data['upper'].loc[mr], ')')
min_risk_correct=min_risk_correct and (float(min_risk[mr])>=min_risk_data['lower'].loc[mr] and float(min_risk[mr])<min_risk_data['upper'].loc[mr])
else:
if verbose:
print('\tNot enough minimums in data to check', min_risk[mr])
min_risk_correct=False
#local risk maximums
max_risk_data=gi_check[(gi_check['risk_trend']=='+') & (gi_check['risk_trend'].shift(-1)=='-')].reset_index(drop=True)
max_risk_correct=True
for mr in range(len(max_risk)):
if mr+1<=max_risk_data.shape[0]:
if verbose:
print('\tChecking max risk in', max_risk[mr], '(between ', max_risk_data['lower'].loc[mr], ' and ', max_risk_data['upper'].loc[mr], ')')
max_risk_correct=max_risk_correct and (float(max_risk[mr])>=max_risk_data['lower'].loc[mr] and float(max_risk[mr])<max_risk_data['upper'].loc[mr])
else:
if verbose:
print('\tNot enough maximums in data to check', max_risk[mr])
min_risk_correct=False
all_cond_correct=all_cond_correct or (trend_correct and min_risk_correct and max_risk_correct)'''
all_cond_correct=all_cond_correct or trend_correct
if verbose:
if all_cond_correct:
print('\tBusiness logic check succeeded.')
else:
fig=plt.figure(figsize=(5,0.5))
plt.plot(range(len(groups_info.dropna(how='all', subset=['lower','upper'])['lower'])),
groups_info.dropna(how='all', subset=['lower','upper'])['woe'], color='red')
plt.xticks(range(len(groups_info.dropna(how='all', subset=['lower','upper'])['lower'])),
round(groups_info.dropna(how='all', subset=['lower','upper'])['lower'],3))
plt.ylabel('WoE')
fig.autofmt_xdate()
plt.show()
print('\tBusiness logic check failed.')
return all_cond_correct
def bl_recursive_correct(tree_df, node, allowed_corrections=1, corrections=None, conditions='', max_corrections=1,
verbose=False):
'''
TECH
Recursive search of corrections needed for tree to pass business logic checks
Parameters
-----------
tree_df: a DataFrame, containing tree description
node: a node number, whose children are corrected and checked
allowed_corrections: a number of remaining corrections, that are allowed
max_corrections: maximal number of corrections in attempt to change the tree so it will pass the check
corrections: the list of current corrections
conditions: a string, containing business logic conditions for a feature, by which current node was split
verbose: if comments and graphs should be printed
Returns
----------
boolean flag of corrected tree passing the check and
the list of corrections, that were made
'''
if corrections is None:
corrections=[]
split_feature=tree_df[(tree_df['node']==node)]['split_feature'].values[0]
if allowed_corrections>0:
possible_nodes_to_correct=sorted(tree_df[(tree_df['parent_node']==node)]['node'].tolist())
combinations=[]
for n1 in range(len(possible_nodes_to_correct)):
for n2 in range(len(possible_nodes_to_correct[n1+1:])):
if dtree.check_unitability(tree_df, [possible_nodes_to_correct[n1], possible_nodes_to_correct[n1+1:][n2]]):
first_condition=tree_df[(tree_df['node']==possible_nodes_to_correct[n1])][split_feature].values[0]
if not(isinstance(first_condition, list) or isinstance(first_condition, tuple)):
nodes_combination=[possible_nodes_to_correct[n1+1:][n2], possible_nodes_to_correct[n1]]
else:
nodes_combination=[possible_nodes_to_correct[n1], possible_nodes_to_correct[n1+1:][n2]]
combinations.append([nodes_combination,
abs(tree_df[tree_df['node']==possible_nodes_to_correct[n1]]['woe'].values[0]- \
tree_df[tree_df['node']==possible_nodes_to_correct[n1+1:][n2]]['woe'].values[0])])
combinations.sort(key=itemgetter(1))
for nodes_to_unite, woe in combinations:
if verbose:
print('Checking (',(max_corrections-allowed_corrections+1),'): for node', node, 'uniting children', str(nodes_to_unite), 'with woe difference =', woe)
tree_df_corrected=dtree.unite_nodes(tree_df, nodes_to_unite)
#display(tree_df_corrected)
if tree_df_corrected.shape[0]!=tree_df.shape[0]:
correct, final_corrections=bl_recursive_correct(tree_df_corrected, node, allowed_corrections-1, corrections+[nodes_to_unite],
conditions, max_corrections=max_corrections, verbose=verbose)
else:
correct=False
if correct:
return correct, final_corrections
else:
return False, corrections
else:
df_to_check=tree_df[(tree_df['parent_node']==node)][[split_feature, 'node', 'woe']]
categorical=sum([isinstance(x, list) for x in df_to_check[split_feature]])>0
if verbose:
print('Node', node, split_feature, (': Checking categorical business logic..' if categorical \
else ': Checking interval business logic..'))
correct=bl_check_categorical(df_to_check, conditions, verbose=verbose) if categorical \
else bl_check_interval(df_to_check, conditions, verbose=verbose)
return correct, corrections
#---------------------------------------------------------------------------------------------------------------------
if input_df is None:
tree_df=dtree.tree.copy()
else:
tree_df=input_df.copy()
features=[x for x in dtree.features if x in tree_df]
if input_conditions is None:
conditions_dict=pd.DataFrame(columns=['feature', 'conditions'])
elif isinstance(input_conditions, dict) or isinstance(input_conditions, pd.DataFrame):
conditions_dict=input_conditions.copy()
elif isinstance(input_conditions, str):
if input_conditions[-4:]=='.csv':
conditions_dict=pd.read_csv(input_conditions, sep = sep)
elif input_conditions[-4:]=='.xls' or input_conditions[-5:]=='.xlsx':
conditions_dict=pd.read_excel(input_conditions)
else:
print('Unknown format for path to conditions dictionary file. Return None.')
elif isinstance(input_conditions, tuple):
conditions_dict={x:input_conditions[0] if x not in dtree.categorical else input_conditions[1] for x in features}
else:
print('Unknown format for conditions dictionary file. Return None')
return None
if isinstance(conditions_dict, pd.DataFrame):
for v in ['feature', 'variable', 'var']:
if v in conditions_dict:
break
try:
conditions_dict=dict(conditions_dict.fillna('').set_index(v)['conditions'])
except Exception:
print("No 'feature' ,'variable', 'var' or 'conditions' field in input pandas.DataFrame. Return None.")
return None
#tree_df['split_feature'].dropna().unique().tolist()
categorical={}
for f in features:
if f not in conditions_dict:
conditions_dict[f]=''
categorical[f]=sum([isinstance(x,list) for x in tree_df[f]])>0
nodes_to_check=tree_df[tree_df['leaf']==False].sort_values(['depth', 'node'])['node'].tolist()
current_node_index=0
to_check=True
correct_all=True
while to_check:
node=nodes_to_check[current_node_index]
to_check=False
split_feature=tree_df.loc[tree_df['node']==node, 'split_feature'].values[0]
conditions=conditions_dict[split_feature]
if conditions is None:
if verbose:
print('Node', node, split_feature, ': <None> conditions specified, skipping..')
correct=True
else:
df_to_check=tree_df[(tree_df['parent_node']==node)][[split_feature, 'node', 'woe']]
if verbose:
print('Node', node, split_feature, (': Checking categorical business logic..' if categorical[split_feature] \
else ': Checking interval business logic..'))
correct=bl_check_categorical(df_to_check, conditions, verbose=verbose) if categorical[split_feature] \
else bl_check_interval(df_to_check, conditions, verbose=verbose)
correct_all=correct_all and correct
if correct==False and to_correct:
new_correct=False
if len(df_to_check['node'].unique())>2:
nodes_to_correct=sorted(df_to_check['node'].unique().tolist())
if max_corrections is None:
allowed_corrections=len(nodes_to_correct)-1
else:
allowed_corrections=min(len(nodes_to_correct)-1, max_corrections)
#print('correct', nodes_to_correct)
for cur_allowed_corrections in range(1,allowed_corrections):
new_correct, corrections=bl_recursive_correct(tree_df, node, allowed_corrections=cur_allowed_corrections, conditions=conditions,
max_corrections=allowed_corrections, verbose=verbose)
if new_correct:
break
if new_correct:
if verbose:
print('Successful corrections:', str(corrections))
for correction in corrections:
tree_df=dtree.unite_nodes(tree_df, correction)
if new_correct==False:
if verbose:
print('No successful corrections were found. Pruning node', node)
tree_df=dtree.prune(tree_df, node)
nodes_to_check=tree_df[tree_df['leaf']==False].sort_values(['depth', 'node'])['node'].tolist()
if current_node_index+1<len(nodes_to_check):
current_node_index+=1
to_check=True
if to_correct:
return True, tree_df
else:
return correct_all, tree_df
def color_result(self, x):
'''
TECH
Defines result cell color for excel export
Parameters
-----------
x: input values
Returns
--------
color description for style.apply()
'''
colors=[]
for e in x:
if e:
colors.append('background-color: green')
else:
colors.append('background-color: red')
return colors
#---------------------------------------------------------------
#added 13.08.2018 by <NAME>
class WOEOrderChecker(Processor):
'''
Class for WoE order checking
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, feature, datasamples, dr_threshold=0.01, correct_threshold=0.85, woe_adjust=0.5, miss_is_incorrect=True,
verbose=False, out=False, out_images='WOEOrderChecker/'):
'''
Checks if WoE order of the feature remains stable in bootstrap
Parameters
-----------
feature: an object of FeatureWOE type that should be checked
datasamples: an object of DataSamples type containing the samples to check input feature on
dr_threshold: if WoE order is not correct, then default rate difference between swaped bins is checked
correct_threshold: what part of checks on bootstrap should be correct for feature to pass the check
woe_adjust: woe adjustment factor (for Default_Rate_i formula)
miss_is_incorrect: is there is no data for a bin on bootstrap sample, should it be treated as error or not
verbose: if comments and graphs should be printed
out: a boolean for image output or a path for csv/xlsx output file to export woe and er values
out_images: a path for image output (default - WOEOrderChecker/)
Returns
----------
Boolean - whether the check was successful
and if isinstance(out, str) then dataframes with WoE and ER values for groups per existing sample
'''
if out:
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
w={x:feature.woes[x] for x in feature.woes if feature.woes[x] is not None}
woes_df=pd.DataFrame(w, index=['Train']).transpose().reset_index().rename({'index':'group'},axis=1).sort_values('group')
if isinstance(out, str):
out_woes=woes_df.copy()
if feature.data.weights is None:
out_er=woes_df.drop('Train', axis=1).merge(feature.data.dataframe[['group', feature.data.target]].groupby('group', as_index=False).mean(),
on='group').rename({feature.data.target:'Train'}, axis=1)
else:
for_er=feature.data.dataframe[['group', feature.data.target, feature.data.weights]]
for_er[feature.data.target]=for_er[feature.data.target]*for_er[feature.data.weights]
out_er=woes_df.drop('Train', axis=1).merge(for_er[['group', feature.data.target]].groupby('group', as_index=False).mean(),
on='group').rename({feature.data.target:'Train'}, axis=1)
cur_sample_woe=pd.DataFrame(columns=['group', 'woe', 'event_rate'])
samples=[datasamples.validate, datasamples.test]
sample_names=['Validate', 'Test']
for si in range(len(samples)):
if samples[si] is not None:
to_keep=[feature.feature, samples[si].target]
if samples[si].weights is not None:
to_keep.append(samples[si].weights)
cur_sample=samples[si].dataframe[to_keep]
cur_sample['group']=feature.set_groups(woes=feature.woes, original_values=True, data=cur_sample[feature.feature])
#cur_sample=cur_sample.sort_values('group')
if samples[si].weights is None:
N_b = cur_sample[samples[si].target].sum()
N_g = (1-cur_sample[samples[si].target]).sum()
else:
N_b = cur_sample[cur_sample[samples[si].target] == 1][samples[si].weights].sum()
N_g = cur_sample[cur_sample[samples[si].target] == 0][samples[si].weights].sum()
DR = N_b*1.0/N_g
index=-1
# for each interval
for gr_i in sorted(cur_sample['group'].unique()):
index=index+1
if samples[si].weights is None:
N_b_i = cur_sample[cur_sample['group']==gr_i][samples[si].target].sum()
N_g_i = cur_sample[cur_sample['group']==gr_i].shape[0] - N_b_i
else:
N_b_i = cur_sample[(cur_sample['group']==gr_i)&(cur_sample[samples[si].target] == 1)][samples[si].weights].sum()
N_g_i = cur_sample[(cur_sample['group']==gr_i)&(cur_sample[samples[si].target] == 0)][samples[si].weights].sum()
if not(N_b_i==0 and N_g_i==0):
DR_i = (N_b_i + woe_adjust)/(N_g_i + woe_adjust)
ER_i=N_b_i/(N_b_i+N_g_i)
n = N_g_i + N_b_i
smoothed_woe_i = np.log(DR*(feature.alpha + n)/(n*DR_i + feature.alpha))#*DR))
cur_sample_woe.loc[index]=[gr_i, smoothed_woe_i, ER_i]
out_woes=out_woes.merge(cur_sample_woe.drop('event_rate', axis=1), on='group').rename({'woe':samples[si].name}, axis=1)
out_er=out_er.merge(cur_sample_woe.drop('woe', axis=1), on='group').rename({'event_rate':samples[si].name}, axis=1)
else:
out_woes[sample_names[si]]=np.nan
out_er[sample_names[si]]=np.nan
if datasamples.bootstrap_base is not None:
if verbose:
fig = plt.figure(figsize=(15,7))
bootstrap_correct=[]
to_keep=[feature.feature, datasamples.bootstrap_base.target]+([datasamples.bootstrap_base.weights] if datasamples.bootstrap_base.weights is not None else [])
base_with_group=datasamples.bootstrap_base.dataframe[to_keep]
base_with_group['group']=feature.set_groups(woes=feature.woes, original_values=True, data=base_with_group[feature.feature])
for bn in range(len(datasamples.bootstrap)):
cur_sample_woe= | pd.DataFrame(columns=['group', 'train_woe', 'woe', 'event_rate']) | pandas.DataFrame |
import numpy as np
from calculate_mdl import *
import argparse
import os
import pandas as pd
from sklearn import metrics as sk_metrics
def calculate_auc(curves):
aucs = []
for curve in curves:
curve = np.array(curve)
epochs = np.arange(len(curve))
aucs.append(sk_metrics.auc(epochs, curve))
return np.mean(aucs), np.std(aucs), np.array(aucs).tolist()
def calculate_regret(curves):
regrets = []
for curve in curves:
curve = np.array(curve)
upper_bound = 0.7849
regrets.append(np.sum(upper_bound - curve))
return np.mean(regrets), np.std(regrets), np.array(regrets).tolist()
def smooth(list_of_list_of_scalars, weight: float): # Weight between 0 and 1
list_of_smoothed = []
for scalars in list_of_list_of_scalars:
last = scalars[0] # First value in the plot (first timestep)
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed.append(smoothed_val) # Save it
last = smoothed_val # Anchor the last smoothed value
list_of_smoothed.append(smoothed)
return list_of_smoothed
def calculate_codebook_metrics(location):
"""
Return list of smoothed, averaged codebook returns given the codebook location
"""
dirs_to_search = ['rl_logs_train', 'rl_logs_test']
metrics = {}
for log_dir in dirs_to_search:
for codebook_file in os.listdir(os.path.join(location, log_dir)):
for seed_dir in os.listdir(os.path.join(location, log_dir, codebook_file)):
for inner_file in os.listdir(os.path.join(location, log_dir, codebook_file, seed_dir)):
if inner_file.endswith('progress.csv'):
progress_csv = os.path.join(location, log_dir, codebook_file, seed_dir, inner_file)
df = pd.read_csv(progress_csv)
rewards = df['evaluation/Average Returns'].to_numpy()
#path_length = df['evaluation/path length Mean'].to_numpy()
stripped_codebook_file = codebook_file.replace('rl_', '')
stripped_codebook_file += '.npy'
if stripped_codebook_file not in metrics:
metrics[stripped_codebook_file] = dict(train=[], test=[])
if 'train' in log_dir:
metrics[stripped_codebook_file]['train'].append(rewards)
else:
metrics[stripped_codebook_file]['test'].append(rewards)
for codebook_file in metrics.keys():
smoothed_train = smooth(metrics[codebook_file]['train'], 0.5)
smoothed_test = smooth(metrics[codebook_file]['test'], 0.5)
metrics[codebook_file]['train'] = smoothed_train # (np.mean(smoothed_train, axis=0), np.var(smoothed_train))
metrics[codebook_file]['test'] = smoothed_test # (np.mean(smoothed_test, axis=0), np.var(smoothed_test))
return metrics
def discover_evaluations(location):
"""
Return list of evaluations given the codebook location
"""
codebooks = []
for codebook_file in os.listdir(location):
if codebook_file.endswith('.npy'):
with open(os.path.join(location, codebook_file), 'rb+') as f:
codebook = np.load(f, allow_pickle=True)
codebooks.append((codebook_file, codebook.item())) #.item() to extract dictionary from 0d array
return codebooks
def process_evaluation(evaluation, codec, tree_bits, name, trajectory_dict):
"""
Adds to trajectory_dict the mappings from start/end positions to the
various metrics stored for the associated codebook
"""
test_trajectories = evaluation.pop('test')
train_trajectories = evaluation.pop('train')
def process_trajectories(trajectories, traj_type):
for trajectory, node_cost, start, end in trajectories:
trajectory_id = (start, end)
cleaned_trajectory = list(filter(lambda a: a != "", trajectory.split(" ")))
code_length = len(codec.encode(cleaned_trajectory)) * 8
num_primitive_actions = len(trajectory.replace(" ", ""))
num_abstract_actions = len(cleaned_trajectory)
metrics = dict(
num_primitive_actions=num_primitive_actions,
num_abstract_actions=num_abstract_actions,
code_length=code_length,
description_length=code_length + tree_bits,
node_cost=node_cost)
if trajectory_id not in trajectory_dict[traj_type]:
trajectory_dict[traj_type][trajectory_id] = {name: metrics}
else:
trajectory_dict[traj_type][trajectory_id][name] = metrics
process_trajectories(train_trajectories, 'train')
process_trajectories(test_trajectories, 'test')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculate MDL of a directory of codebooks which are encoded in .npy format')
parser.add_argument('location', type=str,
help='a file location where codebooks are stored')
args = parser.parse_args()
codebooks = discover_codebooks(args.location)
codebook_name_dl_tuples = []
codebook_dict = {}
previous_length_range = None
track_probs = True
for codebook in codebooks:
length_range = codebook[1].pop('length_range')
probabilities = codebook[1].pop('probabilities')
if track_probs != False:
# If different lengths, then don't track this
if previous_length_range != None:
if length_range != previous_length_range:
track_probs = False
previous_length_range = length_range
dl, tree_bits, codec, uncompressed_len = calculate_codebook_dl(codebook[1])
codebook_name_dl_tuples.append((codebook[0], dl, tree_bits, codec, length_range, probabilities, uncompressed_len))
sorted_codebooks_by_dl = sorted(codebook_name_dl_tuples, key=lambda x: x[1])
for name, dl, tree_bits, codec, length_range, probabilities, uncompressed_len in sorted_codebooks_by_dl:
#print(name, dl, uncompressed_len)
print(name, dl)
codebook_dict[name] = dict(description_length=dl,
tree_bits=tree_bits,
codec=codec,
length_range=length_range,
probabilities=probabilities,
uncompressed_len=uncompressed_len)
evaluations = discover_evaluations(os.path.join(args.location, 'evaluations'))
has_rl = False
try:
metrics = calculate_codebook_metrics(args.location)
has_rl = True
except FileNotFoundError as e:
print("No RL Logs Detected")
trajectory_dict = dict(train={}, test={}, probabilities={})
for codebook_name, evaluation in evaluations:
original_name = codebook_name.replace("trajectories_", "")
codebook_info = codebook_dict[original_name]
process_evaluation(evaluation, codebook_info['codec'], codebook_info['tree_bits'], original_name, trajectory_dict)
# building a pandas dataframe
pd_index = []
pd_dict = {'codebook_dl': [], 'num_symbols': [], 'test_rl_auc': [], 'test_rl_regret': [],
'test_auc_std': [], 'test_regret_std': [], 'test_regrets': []}
if not has_rl:
pd_dict.pop('test_rl_auc')
pd_dict.pop('test_rl_regret')
pd_dict.pop('test_auc_std')
pd_dict.pop('test_regret_std')
pd_dict.pop('test_regrets')
length_set = set()
for name, dl, *_ in sorted_codebooks_by_dl:
pd_index.append(name)
def accumulate_values(traj_type):
values_to_track = ['num_primitive_actions', 'num_abstract_actions', 'code_length', 'description_length', 'node_cost']
values = [0 for _ in values_to_track]
for start_end_pair in trajectory_dict[traj_type].keys():
for i, value_name in enumerate(values_to_track):
values[i] += trajectory_dict[traj_type][start_end_pair][name][value_name]
for i in range(len(values)):
values[i] /= len(trajectory_dict[traj_type].keys())
values_to_track = [f'{traj_type}_{value_name}' for value_name in values_to_track]
for i, column_name in enumerate(values_to_track):
if column_name not in pd_dict:
pd_dict[column_name] = []
pd_dict[column_name].append(values[i])
accumulate_values('train')
accumulate_values('test')
pd_dict['codebook_dl'].append(dl)
if track_probs:
for i, length in enumerate(codebook_dict[name]['length_range']):
length = str(length)
length_set.add(length)
if length not in pd_dict:
pd_dict[length] = []
pd_dict[length].append(codebook_dict[name]['probabilities'][i])
pd_dict['num_symbols'].append(len(codebook_dict[name]['codec'].get_code_table()))
if has_rl:
test_auc_mean, test_auc_std, test_aucs = calculate_auc(metrics[name]['test'])
test_regret_mean, test_regret_std, test_regrets = calculate_regret(metrics[name]['test'])
pd_dict['test_rl_auc'].append(test_auc_mean)
pd_dict['test_rl_regret'].append(test_regret_mean)
pd_dict['test_auc_std'].append(test_auc_std)
pd_dict['test_regret_std'].append(test_regret_std)
pd_dict['test_regrets'].append(test_regrets)
df = | pd.DataFrame(data=pd_dict, index=pd_index) | pandas.DataFrame |
####################################
# author: <NAME>
# course: Python for Data Science and Machine Learning Bootcamp
# purpose: lecture notes
# description: Section 06 - Python for Data Analysis, Pandas
# other: N/A
####################################
# PANDAS
# To know: Pandas will try to turn all numeric data into float in order to retain
# as much information as possible
import pandas as pd
import numpy as np
## Series (data type)
# It is like a NumPy array that contains axis labels so it can be indexed by a
# label.
labels = ['a','b','c']
my_data = [10,20,30]
arr = np.array(my_data)
d = {'a':10,'b':20,'c':30}
pd.Series(data = my_data) # w/o labels
pd.Series(data = my_data, index = labels)
pd.Series(arr,labels) # NumPy arrays or lists work equally as a Series
pd.Series(data = labels) # string data
# if we have a dictionary, the following line is unnecesary
pd.Series(data = my_data, index = d)
pd.Series(data = d) # this is a simplified version
# very flexible, e.g., built-in functions within a Series
pd.Series(data = [sum,print,len]) # not used in reality
# indexing - it will depend on the data type is my index
ser1 = pd.Series([1,2,3,4],['USA','Germany','Chile','Japan'])
ser2 = pd.Series([1,2,5,4],['USA','Germany','Italy','Japan'])
ser1['USA']
ser1[0] == ser1['USA']
# operations are by label - if there is no match, then a NaN is return
ser1 + ser2
## DataFrames (made of Series objects)
from numpy.random import randn
np.random.seed(101)
df = pd.DataFrame(data = randn(5,4),index = ['A','B','C','C','E'], \
columns = ['W','X','Y','Z'])
df
# each column is Series
df['W']
df.W # SQL nomenclature is also allowed!! [it can be messy, since it can get
# confounded with a DataFrame method!]
type(df['W'])
# df['column']['row'] - numeric indexing works just for rows
df['W']['A']
df['W'][0]
type(df['W']['A'])
df[['W','Z']] # an extract from a DataFrame which is a DataFrame by itself
type(df[['W','Z']])
# creating a new column - it can be defined as it already exists
df['new'] = df['W'] + df['Y']
df
# deleting columns or rows - it does not happen in place!!
df.drop('new',axis = 1) # deletes the specified columns
df.drop(['B','C']) # deletes the specified rows
#df.drop[['B','C'], axis = 0] # same as previous command, but by default
df # not in place!!!
# to make it happen in place, I have to options
df = df.drop('new',axis = 1) # re-define df
#df.drop('new',axis = 1, inplace = True) # activate the inplace option
df
# shape or DataFrame dimensions
df.shape # 2-tuple: (columns, rows)
# selecting rows
# using loc - note it requires to use brackets instead of parentheses.
df.loc['A'] # series
df.loc[['A','B']] # DataFrame
# using numerical position with iloc
df.iloc[0]
df.iloc[[0,1]]
# selecting subsets
df.loc['B','Y'] # row first, column second
df['Y']['B'] # column first, row second
df.loc['B','Y'] == df['Y']['B']
df.loc[['A','B'],['W','X']]
df[['W','X']][:2]
df.loc[['A','B'],['W','X']] == df[['W','X']][:2]
# conditional selection
booldf = df > 0
df[booldf] # NaN when the value is false
df['W']>0 # a series
df[df['W']>0] # filtering by rows that fulfill the specified condition
# working with a filtered dataset
### one way
new_df = df[df['Z']<0]
new_df['X']
### another way
df[df['Z']<0][['X','Y']]
### and / or does not normally work in this environment, since we have plenty
### of values within a column. Both are built to be use for comparing just
### a single True/False value, not multiple (truth value of a Series is
### ambiguous). Instead what we need to use is the & symbol for 'and' and | for
### 'or'. These, now, are going to allow us to make multiple comparisons at
### the same time
df[(df['W']>0) & (df['X']>1)]
df[(df['W']>0) | (df['X']>1)]
# reseting / setting the index - not occurring in place!
df.reset_index() # it resets index values to numbers and creates a new column
# with their former values
# df.reset_index(inplace = True) # in place!
newind = 'CA NY WY OR CO'.split() # fast way to create a new list
df['States'] = newind
df.set_index('States') # setting an existing column as index
df
# multi-level indexing
outside = ['G1','G1','G1','G2','G2','G2']
inside = [1,2,3,1,2,3]
hier_index = list(zip(outside,inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
hier_index.levels # note each index level outside has an inside index
hier_index.levshape # from outside to inside
df = pd.DataFrame(randn(6,2),hier_index,['A','B'])
df
### indexing
df.loc['G1'] # data frame, outside index
df.loc['G1'].loc[1] # series, inside index
### index names
df.index.names # no names has been assigned
df.index.names = ['Groups','Num']
df.loc['G2'].loc[2]['B']
# cross - sections (xs function): useful when we want to extract info from a
# particular level that is common to each outside index
df.xs('G1') # easy
df.xs(1,level = 'Num') # non-trivial
## Missing Data
d = {'A':[1,2,np.nan],'B':[5,np.nan,np.nan],'C':[1,2,3]}
df = pd.DataFrame(d)
# dropping missing values
df.dropna() # it drops any ROW with missing values
df.dropna(axis = 1) # it drops any COLUMN with missing values
df.dropna(thresh = 2) # it keeps rows with at least 2 non-na values
df.dropna(thresh = 1) # it keeps rows with at least 1 non-na values
# filling missing values
df.fillna(value = 'FILL VALUE')
df['A'].fillna(value = df['A'].mean()) # for instance with the mean of the column
## Group By - same sort of stuff from SQL; group together rows based off of
# a column and perform an aggregate function on them
data = {'Company': ['GOOG','GOOG','MSFT','MSFT','FB','FB'], \
'Person': ['Sam','Charlie','Amy','Vanessa','Carl','Sarah'], \
'Sales': [200,120,340,124,243,350]}
df = pd.DataFrame(data)
# step 1 - group by a specific column
byComp = df.groupby('Company')
# step 2 - aggregate values using a specific operation (function)
byComp.mean() # Pandas ignores non-numeric columns, such as Person
byComp.sum()
byComp.sum().loc['FB']
byComp.std()
# all together
df.groupby('Company').sum().loc['FB']
df.groupby('Company').count()
# useful information
df.groupby('Company').describe().transpose()
## Merging, Joining and Concatenating
df1 = pd.DataFrame({'A':['A0','A1','A2','A3'],\
'B':['B0','B1','B2','B3'],\
'C':['C0','C1','C2','C3'],\
'D':['D0','D1','D2','D3']},\
index = [0,1,2,3])
df2 = pd.DataFrame({'A':['A4','A5','A6','A7'],\
'B':['B4','B5','B6','B7'],\
'C':['C4','C5','C6','C7'],\
'D':['D4','D5','D6','D7']},\
index = [4,5,6,7])
df3 = pd.DataFrame({'A':['A8','A9','A10','A11'],\
'B':['B8','B9','B10','B11'],\
'C':['C8','C9','C10','C11'],\
'D':['D8','D9','D10','D11']},\
index = [8,9,10,11])
# contatenating - dimensions should match along the axis we are concatenating on
# by default the axis = 0 (along rows)
pd.concat([df1,df2,df3])
pd.concat([df1,df2,df3],axis = 1) # concatenating aling columns
# merging - similar to SQL
left = pd.DataFrame({'key':['K0','K1','K2','K3'],\
'A':['A0','A1','A2','A3'],\
'B':['B0','B1','B2','B3']})
right = pd.DataFrame({'key':['K0','K1','K2','K3'],\
'C':['C0','C1','C2','C3'],\
'D':['D0','D1','D2','D3']})
# with the 'how' option we specify the type of merge method we want to use
# (identical to SQL)
pd.merge(left,right,how = 'inner',on = 'key') # inner is selected by default
left = pd.DataFrame({'key1':['K0','K0','K1','K2'],\
'key2':['K0','K1','K0','K1'],\
'A':['A0','A1','A2','A3'],\
'B':['B0','B1','B2','B3']})
right = pd.DataFrame({'key1':['K0','K1','K1','K2'],\
'key2':['<KEY>'],\
'C':['C0','C1','C2','C3'],\
'D':['D0','D1','D2','D3']})
## how = inner: it includes the intersection of cases
pd.merge(left,right,on = ['key1','key2'])
## how = outer: it includes all cases (union)
pd.merge(left,right,how = 'outer',on = ['key1','key2'])
## how = right: it includes all cases from the "right" set, no matter whether
## an intersection with the "left" set exists or not
pd.merge(left,right,how = 'right',on = ['key1','key2'])
# Joining - combines columns of two potentially differently-indexed DataFrames
# into a singre resulting DataFrame
left = pd.DataFrame({'A':['A0','A1','A2'],\
'B':['B0','B1','B2']},\
index = ['K0','K1','K2'])
right = pd.DataFrame({'C':['C0','C1','C2'],\
'D':['D0','D1','D2']},\
index = ['K0','K2','K3'])
left.join(right)
left.join(right,how = 'outer') # same as concatenating by column (axis = 0)
## Operations
df = pd.DataFrame({'col1':[1,2,3,4], \
'col2':[444,555,666,444], \
'col3':['abc','def','ghi','xyz']})
df.head() # displays the first n = 5 (by default) number of rows
df.info() # shows haow many entries are and their data type
df['col2'].unique() # shows all unique values in a specific array
len(df['col2'].unique()) # displays the number of unique elements
df['col2'].nunique() # does the same as the previous command
df['col2'].value_counts() # returns the number of times the values appear
# apply method - broadcast a function to each value in the column it a way
# to implement my own functions apart from those built-in in Python
def times2(x):
return x*2
df['col1'].apply(times2)
df['col3'].apply(len) # returns the length of each string value in a column
df['col2'].apply(lambda x: x*2) # no need to define a function previously
# removing columns
#df.drop('col1',axis = 1,inplace = True)
# names (attributes)
df.columns
df.index
# sorting / ordering
df.sort_values(by = 'col2')
# null values
df.isnull()
# pivot table (similar to Excel) - mainly used to have a multi level DataFrame
data = {'A':['foo','foo','foo','bar','bar','bar'],\
'B':['one','one','two','two','one','one'],\
'C':['x','y','x','y','x','y'],\
'D':[1,3,2,5,4,1]}
df = pd.DataFrame(data)
df.pivot_table(values = 'D', index = ['A','B'], columns = 'C')
## Data Input and Output
# required libraries to work with four main data sources: CSV, Excel, HTML, SQL
#conda install sqlalchemy
#conda install lxml
#conda install html5lib
#conda install BeautifulSoup4
import os
os.getcwd()
os.chdir('/Users/gsalazar/Documents/C_Codes/Learning-Python/Udemy_Py_DataScience_ML/Pandas_examples')
## CSV
df = | pd.read_csv('example') | pandas.read_csv |
import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
def generate_fullstats(dataset_path, filelist, targets, target_col_name='Target'):
"""
Generates single csv of all statatistics from list of files
Parameters
---------
dataset_path: string
string of path to folder containing data files
filelist: list
list containing filenames of all files to be processed
targets: list
list containing strings that state which class/group a file is from,
string must be in the filename of the data files
Target: string
Returns
-------
fstats_tot: pandas.DataFrame
dataframe containing all rows from data files and with new column
for the class/group the row came from
"""
fstats_tot = None
video_num = 0
for filename in filelist:
fstats = pd.read_csv(dataset_path + filename, encoding = "ISO-8859-1", index_col='Unnamed: 0')
#print('{} size: {}'.format(filename, fstats.shape))
for i in range(0, len(targets)):
if targets[i] in filename:
print('Adding file {} size: {}'.format(filename, fstats.shape))
fstats[target_col_name] = | pd.Series(fstats.shape[0]*[targets[i]], index=fstats.index) | pandas.Series |
# Some utilites functions for loading the data, adding features
import numpy as np
import pandas as pd
from functools import reduce
from sklearn.preprocessing import MinMaxScaler
def load_csv(path):
"""Load dataframe from a csv file
Args:
path (STR): File path
"""
# Load the file
df = pd.read_csv(path)
# Lowercase column names
df.rename(columns=lambda x: x.lower().strip(), inplace=True)
return df
def fill_missing_values(df):
"""Fill the missing data points
Args:
df: Input dataframe
Return: the modified dataframe
"""
# Get datetime col
df['ds'] = pd.to_datetime(df['update_time']) + df['hour_id'].astype('timedelta64[h]')
pdlist = []
for z in df.zone_code.unique():
zone = df[df['zone_code'] == z]
r = pd.date_range(zone.ds.min(), zone.ds.max(), freq='H')
ds_range = pd.DataFrame({'ds': r, 'zone_code': z})
zone_merged = ds_range.merge(zone, how='left', on=['ds', 'zone_code'])
zone_merged['hour_id'] = zone_merged['ds'].dt.hour
# Fill the null values
for col in ['bandwidth_total', 'max_user']:
for index, row in zone_merged[zone_merged[col].isnull()].iterrows():
shifted_index = index - (24*7)
flag = True
while flag:
fill_val = zone_merged.loc[shifted_index, col]
if | pd.isnull(fill_val) | pandas.isnull |
# Copyright (C) 2015-2018 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from itertools import count
import json
import os
import re
from doit.tools import run_once, create_folder, LongRunning
from doit.task import clean_targets, dict_to_task
from khmer import HLLCounter, ReadParser
import pandas as pd
from dammit.fileio.gff3 import GFF3Parser
from dammit.profile import profile_task
from dammit.utils import which, doit_task
seq_ext = re.compile(r'(.fasta)|(.fa)|(.fastq)|(.fq)')
def strip_seq_extension(fn):
return seq_ext.split(fn)[0]
@doit_task
def get_rename_transcriptome_task(transcriptome_fn, output_fn, names_fn,
transcript_basename, split_regex=None):
'''Create a doit task to copy a FASTA file and rename the headers.
Args:
transcriptome_fn (str): The FASTA file.
output_fn (str): Destination to copy to.
names_fn (str): Destination to the store mapping from old to new names.
transcript_basename (str): String to contruct new names from.
split_regex (regex): Regex to split the input names with; must contain
a `name` field.
Returns:
dict: A doit task.
'''
import re
name = os.path.basename(transcriptome_fn)
if split_regex is None:
counter = count()
header_func = lambda name: '{0}_{1}'.format(transcript_basename, next(counter))
else:
def header_func(header):
results = re.search(split_regex, header).groupdict()
try:
header = results['name']
except KeyError as err:
err.message = 'Header regex should have a name field!'
raise
return header
def fix():
names = []
with open(output_fn, 'w') as fp:
for record in ReadParser(transcriptome_fn):
header = header_func(record.name)
fp.write('>{0}\n{1}\n'.format(header, record.sequence))
names.append((record.name, header))
| pd.DataFrame(names, columns=['original', 'renamed']) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
result = df.groupby("A").first()["B"]
expected = Series(
Categorical([], categories=["test", "train"]),
index=Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_sort():
# https://stackoverflow.com/questions/23814368/sorting-pandas-
# categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = Categorical(df["range"], ordered=True)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range", ordered=True
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
col = "range"
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
df["range"] = Categorical(df["range"], ordered=False)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range"
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
index = CategoricalIndex(
["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
categories=["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
name="range",
)
expected_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=["foo", "bar"]
)
col = "range"
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame(
{
"dt": [
datetime(2011, 7, 1),
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 2, 1),
datetime(2011, 1, 1),
datetime(2011, 5, 1),
],
"foo": [10, 8, 5, 6, 4, 1, 7],
"bar": [10, 20, 30, 40, 50, 60, 70],
},
columns=["dt", "foo", "bar"],
)
# ordered=True
df["dt"] = Categorical(df["dt"], ordered=True)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt", ordered=True)
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(
index, categories=index, name="dt", ordered=True
)
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
# when categories is ordered, group is ordered by category's order
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first()
)
# ordered = False
df["dt"] = Categorical(df["dt"], ordered=False)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt")
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(index, categories=index, name="dt")
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
tm.assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first()
)
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = Series([3, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
expected = Series([3, np.nan, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 1 by default
result = df.groupby("A", observed=False).B.prod()
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.prod(min_count=0)
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.prod(min_count=1)
expected = Series([2, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_groupby_multiindex_categorical_datetime():
# https://github.com/pandas-dev/pandas/issues/21390
df = DataFrame(
{
"key1": Categorical(list("<KEY>")),
"key2": Categorical(
list(pd.date_range("2018-06-01 00", freq="1T", periods=3)) * 3
),
"values": np.arange(9),
}
)
result = df.groupby(["key1", "key2"]).mean()
idx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(pd.date_range("2018-06-01 00", freq="1T", periods=3)),
],
names=["key1", "key2"],
)
expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"as_index, expected",
[
(
True,
Series(
index=MultiIndex.from_arrays(
[Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"]
),
data=[1, 2, 3],
name="x",
),
),
(
False,
DataFrame(
{
"a": Series([1, 1, 2], dtype="category"),
"b": [1, 2, 2],
"x": [1, 2, 3],
}
),
),
],
)
def test_groupby_agg_observed_true_single_column(as_index, expected):
# GH-23970
df = DataFrame(
{"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]}
)
result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum()
tm.assert_equal(result, expected)
@pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT])
def test_shift(fill_value):
ct = Categorical(
["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False
)
expected = Categorical(
[None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
res = ct.shift(1, fill_value=fill_value)
| tm.assert_equal(res, expected) | pandas._testing.assert_equal |
#!/usr/bin/env python
__author__ = '<NAME>'
__date__ = '2020-07-09'
__version__ = '0.0.1'
import argparse
import os
from distutils.version import LooseVersion
import scipy
import scipy.io
import gzip
import pandas as pd
import numpy as np
import scanpy as sc
def prepare_h5ad_MAST(
adata,
cols_to_retain,
out_dir='tenx_metadata',
verbose=True
):
"""Write 10x like data from h5ad data.
Include a cell_metadata file.
Parameters
----------
adata : pandas.DataFrame
Description of parameter `adata`.
out_dir : string
Description of parameter `out_dir`.
verbose : boolean
Description of parameter `verbose`.
Returns
-------
execution_code : int
"""
# Make the output directory if it does not exist.
if out_dir == '':
out_dir = os.getcwd()
else:
os.makedirs(out_dir, exist_ok=True)
# Get compression opts for pandas
compression_opts = 'gzip'
if LooseVersion(pd.__version__) > '1.0.0':
compression_opts = dict(method='gzip', compresslevel=9)
# Save the barcodes.
out_f = os.path.join(
out_dir,
'barcodes.tsv.gz'
)
if verbose:
print('Writing {}'.format(out_f))
| pd.DataFrame(adata.obs.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 5 13:30:38 2019
@author: Prasad
"""
# Artificial Neural Network
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13]
y = dataset.iloc[:, 13]
#Create dummy variables
geography=pd.get_dummies(X["Geography"],drop_first=True)
gender=pd.get_dummies(X['Gender'],drop_first=True)
## Concatenate the Data Frames
X= | pd.concat([X,geography,gender],axis=1) | pandas.concat |
import os
import uuid
import numpy as np
import pandas as pd
from .functools import FuncDataFrame
from .primitives import Cartesian3
get_env_var_as_int = lambda var_name: int(os.getenv(var_name))
def single_gamma_hist2d(hits):
return np.histogram2d(
hits.localPosX, hits.localPosY, bins=get_env_var_as_int("SIPM_BINS")
)[0].tolist()
class SipmArray:
def __init__(self):
self.detector_size_xy = int(os.getenv("DETECTOR_SIZE_XY"))
self.detector_size_z = int(os.getenv("DETECTOR_SIZE_Z"))
self.bins = int(os.getenv("SIPM_BINS"))
@property
def sipm_boundaries_coord(self):
return np.linspace(
-self.detector_size_xy / 2, self.detector_size_xy / 2, self.bins + 1
)
@property
def sipm_center_coord(self):
return (
(np.roll(self.sipm_boundaries_coord, 1) + self.sipm_boundaries_coord) / 2
)[1:]
@property
def sipm_boundaries(self):
return np.meshgrid(self.sipm_boundaries_coord, self.sipm_boundaries_coord)
@property
def sipm_center(self):
return np.meshgrid(self.sipm_center_coord, self.sipm_center_coord)
@property
def sipm_center_3d(self):
return Cartesian3(
*self.sipm_center,
np.ones_like(self.sipm_center[0]) * (self.detector_size_z / 2)
)
class Hits:
def __init__(self, raw_hits: FuncDataFrame):
self.raw_hits = raw_hits
def _group_n_selector(group_key, filter_func):
return lambda hits: Hits(
FuncDataFrame(hits.raw_hits.groupby(group_key).filter(filter_func))
)
_fdf_select_process_by_name = lambda process_name: (
lambda fdf: Hits(fdf.select_where(processName=process_name))
)
self._single = _group_n_selector(
group_key="eventID", filter_func=lambda g: len(g.photonID.unique()) == 1
)
self._coincidence = _group_n_selector(
group_key="eventID", filter_func=lambda g: len(g.photonID.unique()) == 2
)
self._single_has_compton = _group_n_selector(
group_key=["eventID", "photonID"],
filter_func=lambda g: "Compton" in g.processName.unique(),
)
self._coincidence_has_compton = _group_n_selector(
group_key="eventID",
filter_func=lambda g: "Compton" in g.processName.unique(),
)
self._coincidence_has_no_compton = _group_n_selector(
group_key="eventID",
filter_func=lambda g: "Compton" not in g.processName.unique(),
)
self._Transportation = _fdf_select_process_by_name("Transportation")
self._OpticalAbsorption = _fdf_select_process_by_name("OpticalAbsorption")
self._Compton = _fdf_select_process_by_name("Compton")
self._PhotoElectric = _fdf_select_process_by_name("PhotoElectric")
self._event = lambda event_id: (lambda fdf: fdf.select_where(eventID=event_id))
self._to_cart3_by_key = lambda key: Cartesian3(
*self.raw_hits.select(key).to_numpy().T
)
@property
def counts(self):
"""
Caution! counts only works on coincidence.
Hits(hits_df).coincidence.counts
"""
return (
self.raw_hits.groupby(["eventID", "photonID"])
.apply(
lambda event_gamma: (
FuncDataFrame(event_gamma)
# select optical photon by first interaction crystalID
.select_where(crystalID=event_gamma.iloc[0].crystalID)
# select by Transportation process, which are those transport from back surface of crystal
.select_where(processName="Transportation")
)
)
# group again by eventID and photonID
.reset_index(drop=True)
.groupby(["eventID", "photonID"])
# calculate hits2d for each event_gamma
.apply(single_gamma_hist2d)
# merge counts by events
.groupby(["eventID"])
.apply(lambda e: [np.array(e)[0], np.array(e)[1]])
)
def sipm_center_pos(self, crystalRC):
return (
self.raw_hits
# Hits(hits).coincidence.raw_hits
.groupby(['eventID', 'photonID'])
.apply(
lambda event_gamma: (
SipmArray().sipm_center_3d
.move_by_crystalID(event_gamma.iloc[0].crystalID, crystalRC).flatten().to_numpy()
)
)
# merge by events
.groupby(['eventID'])
.apply(lambda e: np.array([np.array(e)[0], np.array(e)[1]]))
)
@property
def crystalID(self):
return (
self.raw_hits.groupby(["eventID", "photonID"])
.apply(lambda g: g.iloc[0])
.crystalID.groupby("eventID")
.apply(lambda e: [np.array(e)[0], np.array(e)[1]])
)
@property
def sourcePosX(self):
return self.raw_hits.groupby(["eventID"]).apply(lambda g: g.iloc[0]).sourcePosX
@property
def sourcePosY(self):
return self.raw_hits.groupby(["eventID"]).apply(lambda g: g.iloc[0]).sourcePosY
@property
def sourcePosZ(self):
return self.raw_hits.groupby(["eventID"]).apply(lambda g: g.iloc[0]).sourcePosZ
@property
def single(self):
return self._single(self)
@property
def coincidence(self):
return self._coincidence(self)
@property
def coincidence_has_compton(self):
return self._coincidence_has_compton(self._coincidence(self))
@property
def coincidence_has_no_compton(self):
return self._coincidence_has_no_compton(self._coincidence(self))
@property
def num_of_compton_by_event(self):
return self.raw_hits.groupby("eventID").apply(
lambda g: g.processName.value_counts()
)[:, "Compton"]
@property
def local_pos(self):
return self._to_cart3_by_key(["localPosX", "localPosY", "localPosZ"])
@property
def global_pos(self):
return self._to_cart3_by_key(["posX", "posY", "posZ"])
@property
def source_pos(self):
return self._to_cart3_by_key(["localPosX", "localPosY", "localPosZ"])
def get_event(self, eventID):
return self._event(eventID)(self.raw_hits)
def event_sample_hist_2d(self):
gamma_1, gamma_2 = coincidence_group_by_event_n_gamma(self.raw_hits)
return [
single_gamma_hist2d(get_most_hit_crystal_optical_photons(gamma_1)),
single_gamma_hist2d(get_most_hit_crystal_optical_photons(gamma_2)),
]
def assemble_sample(self, crystalRC):
sample = pd.DataFrame()
sample['sourcePosX'] = self.coincidence.sourcePosX
sample['sourcePosY'] = self.coincidence.sourcePosY
sample['sourcePosZ'] = self.coincidence.sourcePosZ
sample['counts'] = self.coincidence.counts
sample['crystalID'] = self.coincidence.crystalID
sample['sipm_center_pos'] = self.coincidence.sipm_center_pos(crystalRC)
return sample
def assign_to_experiment(self, experiment_id):
result = pd.DataFrame()
result['eventID'] = self.coincidence.raw_hits.eventID.unique()
result['experiment_id'] = experiment_id
result.to_csv('experiment_coincidence_event.csv', index=False)
def gen_uuid4():
yield str(uuid.uuid4())
class HitsEventIDMapping:
def __init__(self, df):
self.df = df
def get_by_key(self, key):
return self.df[key]
@staticmethod
def from_file(path="./eventID_mapping.map"):
return HitsEventIDMapping(dict(pd.read_csv(path).to_records(index=False)))
@staticmethod
def build(hits, path="./eventID_mapping.map"):
try:
id_map = HitsEventIDMapping.from_file().df
except FileNotFoundError as e:
id_map = {
eventID: next(gen_uuid4()) for eventID in hits["eventID"].unique()
}
pd.DataFrame(
list(id_map.items()), columns=["eventID_num", "eventID_uuid"]
).to_csv(path, index=False)
return HitsEventIDMapping(id_map)
def to_dict(self):
return self.df
def do_replace(self, hits):
hits["eventID"] = | pd.Series([self.df[eventID] for eventID in hits["eventID"]]) | pandas.Series |
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import cv2
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt # for making plots,
from PIL import Image, ImageDraw
import matplotlib.gridspec
from scipy.spatial import distance
from scipy.cluster import hierarchy
from matplotlib.font_manager import FontProperties
from scipy.cluster.hierarchy import leaves_list, ClusterNode, leaders
from sklearn.metrics import accuracy_score
import graphviz # allows visualizing decision trees,
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ParameterGrid
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier # accepts only numerical data
from sklearn.tree import export_graphviz
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
# Function, .............................................................................
def find_different_filetypes(*,
subsets_dict,
filetypes_dict,
path,
verbose=False
):
"""
A generaric Function that allows to find files that:
* are grouped together, eg by subset name like train, test, valid
* have the same core name, but different affix
This function to build new, logfile for data encoding - this aoows adding
ASSUMPTION: coprresponding batch labels and extracted features have the same file name
except for affix, _encoded.npy and _labels.csv
# Inputs:
. subsets_dict : dict,
<key> : str, name of the group of files eg: test, train, etc..
<value> : str, part of the pattern a apttern that allows to find all files belonging to one group
important: in case more then one pattern must be used to identify files form one group,
just name them with numbers, and later on replace in df returned by the function,
. filetypes_dict : dict,
<key> : str, name of affix added to the file of a given type
<value> : str, of affix added to the file of a given type
. path : full path to directory with fileas searched by the function,
. verbose : bool,
# returns
. dataFrame : df, where each row represents files form one group, wiht one core name,
eg: test_batch_01, test_batch_02 etc..., and rows names after filetypes_dict keys
have corresponding filetypes, eg:
test_batch_01_features.npy, test_batch_01_labels.csv,
additional columns shows also the path, and subset_type (key from subsets_dict)
# Note
the function find all files that mach any cobination of the following pattern
> f'{subsets_dict[<key>]}*filetypes_dict[<key>]'
"""
os.chdir(path)
filename_table_list = [] # one table with all file names,
# ...
for i, subset_name in enumerate(list(subsets_dict.keys())):
" subset_file_name_pat may allow finidng one, or many different files wiht eg 01, 02, ... 0n numbers or other designations "
" these shodul be "
# ........................................................................................
# get pattern used to find files from a given data subset/group
subset_pat = subsets_dict[subset_name] # pat may be str or a list with >=1 str,
first_filetype = filetypes_dict[list(filetypes_dict.keys())[0]]
# ........................................................................................
# step 1. find first file, to later on final any othe file types in the same order
one_subset_corename_with_one_filetype_list = []
# if, there is a list, it means more then one pattern was mashing to a given subset,
if isinstance(subset_pat, list):
for one_subset_pat in subset_pat:
for file in glob.glob(f"{one_subset_pat}*{first_filetype}"):
one_subset_corename_with_one_filetype_list.append(file)
else:
for file in glob.glob(f"{subset_pat}*{first_filetype}"):
one_subset_corename_with_one_filetype_list.append(file)
# ........................................................................................
# step 2. find all different types of associated files defined by different file_affix_pat
""" LIMITATION: these different types of files shdoul be in the same directory
"""
# .. test if anything coult be found
if len(one_subset_corename_with_one_filetype_list)==0:
if verbose==True:
print(f"{subset_name} - No files were found using provided subset_pat_list & filetype_pat_list[0]")
else:
pass
pass # becausde there is nothing to continue with, and I dont want to stop on that
else:
if verbose==True:
print(f"{subset_name} - {len(one_subset_corename_with_one_filetype_list)} files were found, at least for the first filetype")
else:
pass
# .. remove affix, and create core file names that can be used to find other types of files,
""" and create list of core files that can be used to search
for different types of files for each item in that list
"""
one_subset_corename_list = pd.Series(one_subset_corename_with_one_filetype_list).str.split(first_filetype, expand=True).iloc[:, 0].values.tolist()
# .. search filtypes for all core names,
for one_file_corename in one_subset_corename_list:
# .... now find all filetypes with the same corename (one by one),
one_corename_filetypenames_dict = dict()
for filetype_name in list(filetypes_dict.keys()):
# - get patter used to filnd one filetype,
filetype_pat = filetypes_dict[filetype_name]
# - search for ONE_FILE_NAME
ONE_FILE_NAME = [] # at least ot. shoudl be one !
for file in glob.glob(f"{one_file_corename}*{filetype_pat}"):
ONE_FILE_NAME.append(file)
# - test if you can find only one name, if not the patterns provided are not specifficnc enought
if verbose==True:
if (ONE_FILE_NAME)==0:
print(f"Error - FILE NOT FOUND: {f'{one_file_corename}*{filetype_pat}'}")
if len(ONE_FILE_NAME)==1:
"everything is ok"
pass
if len(ONE_FILE_NAME)>1:
print(f"Error: provided combination of - {file_core_name} - and - {file_affix_pat}- is not speciffic enought !!!")
print("Error: in results more then one file was found and now only the first one will be loaded")
else:
pass
# .. add that file to the duct with assocuated files,
one_corename_filetypenames_dict[filetype_name] = ONE_FILE_NAME[0]
# .... finally, add each group of assicated files wiht the same core file name to filename_table
"ie. build table row"
filename_table_list.append({
"subset_name": subset_name,
"path": path,
**one_corename_filetypenames_dict
})
return pd.DataFrame(filename_table_list)
# Function, ......................................................................
# working version ...... 2020.12.11 ----- finally !!!!!!!!!!!!!!!
# Function, ......................................................................
def pair_files(*, search_patterns, pair_files_with, allow_duplicates_between_subsets=False, verbose=False, track_progres=False):
'''
function to find list speciffic files or pairs or groups of associated files,
eg: batch of images and their labels that can be with different formats and in different locations,
One file type is described with so called corefilename and subset types that will allow to group them,
and search for other, associated files using that cofilename and profided filename prefixes and extensions,
done: 2020.12.10
# inputs
. search_patterns : dict, see example below
. pair_files_with : a type of file that is parired with other filetypes
. allow_duplicates_between_subsets : bool, if True, the function will stop on subset collection,
that assigned the same leading filenames to differetn subsets
. verbose : bool,
. track_progres : bool, like versbose, but sending minimal info on the process going on
# returns:
. dictionary with DataFames : dict, key==Datasubsets collection
values=pd.DataFrame, with paired file_name's and file_path's
and col: subset_name that allows separating different subsets in one df
df, contains also several other values, that can help createing
new derivative files
# Example
search_patterns = {
"all_data":{ # one datasets collection will create one dataframe,
"extracted_features":{
"file_path":PATH_extracted_features,
"file_prefix": f'{module_name}_{dataset_name}_{dataset_name}',
"file_extension": "_encoded.npy",
"file_corename": {
"train": f"_", # this will return several duplicates in train data
"valid": f"_valid_batch",
"test": f"_test_01",
"test_2": f"_test_02" # you may add more then one in a list !
}},
"labels":{
"file_path":None,
"file_prefix": None,
"file_extension": "labels.csv"
},
},
"partial_data":{ # one datasets collection will create one dataframe,
"extracted_features":{
"file_path":PATH_extracted_features,
"file_prefix": f'{module_name}_{dataset_name}_{dataset_name}',
"file_extension": "_encoded.npy",
"file_corename": {
"train": [f"_train_batch01", f"_train_batch02",f"_train_batch03",f"_train_batch03",f"_train_batch03"],
"valid": f"_valid_batch01",
"test": f"_test_01"
}},
"labels":{
"file_path":None,
"file_prefix": None,
"file_extension": "labels.csv"
},
}
}
# .......
df = pair_or_list_files(
search_patterns=search_patterns,
pair_files_with="extracted_features",
verbose=True)
'''
STOP_LOOP = False # if true after some test, function stops execution and returns None
subsets_collection_list = list(search_patterns.keys()) # used separately, and returned as dict with different tables,
compare_all_files_to = pair_files_with # legacy issue, I chnaged the name to make it more informative
paired_filenames_dict = dict() # keys==collection of subsets, values = table with paired filesnames/paths and name of the datasubset
# -------------------------------------------------------------------------------
# create one df table per collection of subsets,
for subsets_collection_name in list(search_patterns.keys()):
if track_progres==True:
print("* Preparing: ", subsets_collection_name, " - from - ", subsets_collection_list)
else:
pass
# -------------------------------------------------------------------------------
# Step 1. search filenames of the first filetype (compare_all_files_to !)
# -------------------------------------------------------------------------------
'''
here the df, is created with all items such as subsets_collection_name, & one_subset_name,
that will allow identifying the file without the ptoblems,
'''
# - list with subset names to loop over,
subset_name_list_in_one_collection = list(search_patterns[subsets_collection_name][compare_all_files_to]["file_corename"].keys())
# - list to store results on one subset collection (one entry == one file)
one_subset_collection_file_list = list()
# - loop over each subset
for i, one_subset_name in enumerate(subset_name_list_in_one_collection):
# **** STEP 1 **** parameters, ,
# .... get variables provided as parameters to the function for one_subset_name,
file_path = search_patterns[subsets_collection_name][compare_all_files_to]["file_path"] # str
file_prefix = search_patterns[subsets_collection_name][compare_all_files_to]["file_prefix"] # str
file_extension = search_patterns[subsets_collection_name][compare_all_files_to]["file_extension"] # str
file_corename = search_patterns[subsets_collection_name][compare_all_files_to]["file_corename"][one_subset_name] # str/list
# .... ensure that corename is a list, (can also be provided as str, with one pattern)
if isinstance(file_corename, str)==True:
file_corename = [file_corename]
else:
pass
# **** STEP 2 **** get filenames,
# .... set dir,
try:
os.chdir(file_path)
except:
if verbose==True:
print(f"ERROR incorrect path provided for {compare_all_files_to}")
else:
pass
# .... identify all files in one subset from that subsets collection
'all files found with all patterns added to the same list'
found_file_name_list = []
for one_file_corename in file_corename:
for file in glob.glob(f"{file_prefix}*{one_file_corename}*{file_extension}"):
found_file_name_list.append(file)
# ... ensure there are no repeats in found_file_name_list
found_file_name_list = pd.Series(found_file_name_list).unique().tolist()
# **** STEP 3 **** get file speciffic corename and place all results in dict in the list
# .... create a file_speciffic_corename
file_speciffic_corename_s = pd.Series(found_file_name_list)
file_speciffic_corename_s = file_speciffic_corename_s.str.replace(file_prefix, "")
file_speciffic_corename_s = file_speciffic_corename_s.str.replace(file_extension, "")
# .... add each file into one_subset_collection_file_list
for file_name, filespeciffic_corename in zip(found_file_name_list, file_speciffic_corename_s):
one_subset_collection_file_list.append({
"subsets_collection_name": subsets_collection_name,
"subset_name": one_subset_name,
f"{compare_all_files_to}_file_name": file_name,
f"{compare_all_files_to}_file_path":file_path,
f"{compare_all_files_to}_file_prefix": file_prefix,
f"{compare_all_files_to}_file_corename":file_corename,
f"{compare_all_files_to}_file_extension":file_extension,
f"{compare_all_files_to}_filespeciffic_corename":filespeciffic_corename,
})
# -------------------------------------------------------------------------------
# Step 2. test if all file_names are unique and were not used in mutiple subsets
# -------------------------------------------------------------------------------
'caution this maybe done intentionally'
# - get all filenames in a given cllection of subsets, - duplicates can be the same files listed for 2 different subsets,
collected_filenames = | pd.DataFrame(one_subset_collection_file_list) | pandas.DataFrame |
#!/usr/bin/env python3
from collections import Counter
from datetime import date, timedelta
import itertools
import json
import pickle
from prettytable import PrettyTable
import requests
import os
from .savemeps import save_meps
url = 'http://www.votewatch.eu/actions.php?euro_parlamentar_id={}&form_category=get_mep_acte&sEcho=1&iColumns=6&sColumns=&iDisplayStart=0&iDisplayLength={}&mDataProp_0=mysql_data&mDataProp_1=act_nume_full&mDataProp_2=euro_vot_valoare_special_vote_page&mDataProp_3=euro_vot_rol_euro_grup.rol_af&mDataProp_4=euro_domeniu_nume&mDataProp_5=euro_vot_valoare_text&sSearch=&bRegex=false&sSearch_0=&bRegex_0=false&bSearchable_0=true&sSearch_1=&bRegex_1=false&bSearchable_1=true&sSearch_2=&bRegex_2=false&bSearchable_2=true&sSearch_3=&bRegex_3=false&bSearchable_3=true&sSearch_4=&bRegex_4=false&bSearchable_4=true&sSearch_5=&bRegex_5=false&bSearchable_5=true&iSortingCols=1&iSortCol_0=0&sSortDir_0=desc&bSortable_0=true&bSortable_1=true&bSortable_2=true&bSortable_3=true&bSortable_4=true&bSortable_5=true&_=1486840527483'
def get_meps(country=None):
"""Reference list of countries and MEPs and their ids
for data retrieval.
Parameters
-----------
country: str
If None as per default prints a list of available
countries, otherwise prints a list of MEPs of the
given country.
"""
meps_path = os.path.expanduser("~/.meps")
if os.path.isfile(meps_path):
with open(meps_path, 'rb') as f:
meps = pickle.load(f)
if country is not None:
t = PrettyTable(['ID', 'Name', 'Country'])
for mep in meps:
if mep[2] == country.lower():
t.add_row([mep[0], mep[1].title(), mep[2].title()])
print(t)
else:
print("\nSearch through these countries:\n")
countries = []
for mep in meps:
countries.append(mep[2])
countries = set(countries)
t = PrettyTable(['Country'])
for country in countries:
t.add_row([country.title()])
print(t)
else:
save_meps()
get_meps(country)
class EUvotes(object):
"""Retrieve MEP last votes from VoteWatch.eu,
print them in tabular format and summarize them.
It is possible to return None by printing via
stdout or to return results in various formats for
various uses.
Parameters
-----------
mep_id: int
The id of the MEP you want to check. To see a list
of MEPs and their ids use get_meps(country)
limit: int
The number of votes you want to check, default
is 50
"""
def __init__(self, mep_id, limit=50):
"""When launching initialization use given data
to retrieve all interesting info from Votewatch.eu
automatically.
"""
self.mep_id = abs(mep_id)
self.limit = abs(limit)
self.dates, self.domains, self.absent = self._get_votes(self.mep_id, self.limit)
self.dates = self._convert_to_date(self.dates)
self.period = self._last_vote_period(self.dates)
self.name = self._mep_name(self.mep_id)
def __str__(self):
printing = ("Data about last {} votes for {}".format(self.limit, self.name) + "\n\nTo see the data use `print_attendance` method, `summary=True` prints only a summary of votes")
return printing
def _mep_name(self, mep_id):
"""Get searched MEP name"""
meps_path = os.path.expanduser("~/.meps")
if os.path.isfile(meps_path):
with open(os.path.expanduser(meps_path), 'rb') as f:
meps = pickle.load(f)
return meps[abs(mep_id) - 1][1]
else:
save_meps()
self._mep_name(mep_id)
def _get_votes(self, mep_id, limit):
"""Get last `limit` votes for requested MEP"""
r = requests.get(url.format(abs(mep_id), abs(limit)))
data = json.loads(r.text)
dates = [vote['mysql_data_text'] for vote in data['all_votes']]
domains = [domain['euro_domeniu_nume'] for domain in data['all_votes']]
absent = [vote['euro_vot_rol_euro_grup']['rol_af'] for vote in data['all_votes']]
for i, vote in enumerate(absent):
if vote == "Didn't vote":
absent[i] = "Absent"
return dates, domains, absent
def _to_date(self, dates):
"""Helper method to convert str to dates"""
y, m, d = dates.split('-')
return date(int(y), int(m), int(d))
def _convert_to_date(self, dates):
"""Convert retrieved str dates to date objects"""
return [self._to_date(date) for date in dates]
def _last_vote_period(self, dates):
"""Transform dates to three reference periods"""
vote_period = []
for vote in dates:
period = date.today() - vote
if period < timedelta(weeks=1):
vote_period.append("This week")
elif period < timedelta(weeks=4):
vote_period.append("This month")
else:
vote_period.append("More than one month")
return vote_period
def change_limit(self, limit=50):
print("Updating limit from {} to {}".format(self.limit, abs(limit)))
self.__init__(self.mep_id, abs(limit))
def print_attendance(self, summary=False):
"""Print retrieved data to stdout in a nice tabular
format.
Parameters
-----------
summary: bool
If True prints a count of votes by tipology and
period of time.
Return
--------
None
"""
if summary:
counts = Counter(zip(self.period, self.absent))
t = PrettyTable(['Period', 'Vote', 'Count'])
periods = ["This week", "This month", "More than one month"]
voting = ["Absent", "Loyal", "Rebel", "No political line"]
for per in [(i, j) for i in periods for j in voting]:
t.add_row([per[0], per[1], counts[per]])
print("\nCount of last {} votes by period for {}. Percentage of absenteeism: {:.1%}\n".format(
self.limit, self.name.title(), Counter(self.absent)[voting[0]] / self.limit))
print(t)
else:
print("Last {} votes attendance for {}".format(self.limit, self.name.title()))
t = PrettyTable(['Date', 'Vote', 'Topic'])
for row in zip(self.dates, self.absent, self.domains):
t.add_row(row)
print(t)
def data_(self, shape='json', limit=50):
"""Return retrieved data in various formats for
various possible uses.
Parameters
-----------
shape: ['json', 'list', 'df']
Decide the format of the returned data.
limit: int
Get only last x votes retrieved, default is 50.
Return
-------
json, list or DataFrame depending on `shape` parameter
"""
if shape == 'json':
js = []
for day, presence, topic in zip(self.dates[:limit], self.absent[:limit], self.domains[:limit]):
js.append({'Date': day.isoformat(), 'Vote': presence, 'Topic': topic})
return json.dumps(js)
elif shape == 'list':
ls = []
for day, presence, topic in zip(self.dates[:limit], self.absent[:limit], self.domains[:limit]):
ls.append([day.isoformat(), presence, topic])
return ls
elif shape == 'df':
import pandas as pd
df = | pd.DataFrame({'Date': self.dates[:limit], 'Vote': self.absent[:limit], 'Topic': self.domains[:limit]}) | pandas.DataFrame |
""" write to a SQLite database with forms, templates
add new record, delete a record, edit/update a record
"""
from flask import Flask, render_template, request, flash, send_file, make_response, jsonify, abort, session, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from flask_login import login_required, LoginManager, login_user, UserMixin, logout_user, current_user
from flask_wtf import FlaskForm
from wtforms import SubmitField, SelectField, RadioField, HiddenField, StringField, IntegerField, FloatField, PasswordField
from wtforms.fields.html5 import DateField
from wtforms.validators import InputRequired, Optional, DataRequired
from datetime import date
import csv
import sqlite3
from io import StringIO, BytesIO
import os
import pandas as pd
from sqlalchemy import create_engine
import plotly.express as px
from plotly.offline import plot
DB_VAR=os.environ.get('HEROKU_POSTGRESQL_PINK_URL', None)
OUT_DB_VAR=os.environ.get('DATABASE_URL', None)
GROUP_NAME=os.environ.get('GROUP_NAME', None)
app = Flask(__name__)
# Flask-WTF requires an enryption key - the string can be anything
app.config['SECRET_KEY'] = '<KEY>'
# Flask-Bootstrap requires this line
Bootstrap(app)
# the name of the database; add path if necessary
app.config['SQLALCHEMY_BINDS'] = {
"db1":DB_VAR,
"db2":OUT_DB_VAR}
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
###Login Setting###
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
# this variable, db, will be used for all SQLAlchemy commands
db = SQLAlchemy(app)
# each table in the database needs a class to be created for it
# db.Model is required - don't change it
# identify all columns by name and data type
class Emissions(db.Model):
__tablename__ = 'records'
__bind_key__= "db1"
id = db.Column(db.Integer, primary_key=True)
kms = db.Column(db.Float)
transport = db.Column(db.String)
fuel = db.Column(db.String)
date = db.Column(db.String)
co2= db.Column(db.Float)
ch4= db.Column(db.Float)
user_name= db.Column(db.String)
updated = db.Column(db.String)
def __init__(self, kms, transport, fuel, date, co2, ch4, user_name, updated):
self.kms = kms
self.transport = transport
self.fuel = fuel
self.date = date
self.co2 = co2
self.ch4 = ch4
self.user_name = user_name
self.updated = updated
engine_local = create_engine(DB_VAR)
engine_super =create_engine(OUT_DB_VAR)
### SupeUser DB
class SuperUser(UserMixin,db.Model):
__tablename__ = 'users'
__bind_key__= "db2"
id = db.Column(db.Integer, primary_key=True)
student = db.Column(db.String)
user_name= db.Column(db.String)##If it breaks change back to Integer for Mixin
password = db.Column(db.String)
group_name= db.Column(db.String)
def __init__(self, user_name):
self.user_name= user_name
####Everything is recorded. nothing removed
class SuperBackUp(db.Model):
__tablename__= 'backup'
__bind_key__="db2"
id = db.Column(db.Integer, primary_key=True)
kms = db.Column(db.Float)
transport = db.Column(db.String)
fuel = db.Column(db.String)
date = db.Column(db.String)
co2= db.Column(db.Float)
ch4= db.Column(db.Float)
user_name= db.Column(db.String)
updated = db.Column(db.String)
def __init__(self, kms, transport, fuel, date, co2, ch4, user_name, updated):
self.kms = kms
self.transport = transport
self.fuel = fuel
self.date = date
self.co2 = co2
self.ch4 = ch4
self.user_name = user_name
self.updated = updated
###Global DB dynamically updated from sessions.
class SuperGlobal(db.Model):
__tablename__= 'global'
__bind_key__="db2"
id = db.Column(db.Integer, primary_key=True)
kms = db.Column(db.Float)
transport = db.Column(db.String)
fuel = db.Column(db.String)
date = db.Column(db.String)
co2= db.Column(db.Float)
ch4= db.Column(db.Float)
user_name= db.Column(db.String)
updated = db.Column(db.String)
group_name = db.Column(db.String)
def __init__(self, kms, transport, fuel, date, co2, ch4, user_name, updated, group_name):
self.kms = kms
self.transport = transport
self.fuel = fuel
self.date = date
self.co2 = co2
self.ch4 = ch4
self.user_name = user_name
self.updated = updated
self.group_name = group_name
@app.before_first_request
def before_first_request():
db.create_all()
# +++++++++++++++++++++++
# forms with Flask-WTF
class LoginRecord(FlaskForm):
user= StringField("User",validators=[InputRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField("Submit")
# form for add_record and edit_or_delete
# each field includes validation requirements and messages
class AddRecord(FlaskForm):
id_field = HiddenField()
##Transport
kms = FloatField("Kilometers",[InputRequired()])
transport_type = SelectField("Type of Transport",
[InputRequired()],
choices=[
('Bus', 'Bus'),
('Car', 'Car'),
('Plane', 'Plane'),
('Ferry', 'Ferry'),
('Scooter', 'E-Scooter'),
('Bicycle', 'Bicycle'),
('Motorbike',"Motorbike"),
('Walk', 'Walk')
])
fuel_type = SelectField("Fuel Type",
validators=[InputRequired()],choices=[])
date=DateField("Date",[InputRequired()])
gas =FloatField("kg/passenger km",[Optional()],description='Add CO2 kg/passenger km if known. \
Otherwise, leave blank and a default corresponding to the fuel \
type and vehicle average from "UK Government GHG Conversion Factors for Company Reporting" will be used')
submit = SubmitField("Submit")
##Emissions factor per transport in kg per passemger km
##++++++++++++++++++++++
efco2={"Bus":{"Diesel":0.10231,"CNG":0.08,"Petrol":0.10231,"No Fossil Fuel":0},
"Car":{"Hybrid":0.10567,"Petrol":0.18592,"Diesel":0.16453,"No Fossil Fuel":0},
"Plane":{"Jet Fuel":0.24298,"No Fossil Fuel":0},
"Ferry":{"Diesel":0.11131,"HFO":0.1131,"No Fossil Fuel":0},
"Motorbike":{"Petrol":0.09816,"No Fossil Fuel":0},
"Scooter":{"No Fossil Fuel":0},
"Bicycle":{"No Fossil Fuel":0},
"Walk":{"No Fossil Fuel":0}}
efch4={"Bus":{"Diesel":2e-5,"CNG":2.5e-3,"Petrol":2e-5,"No Fossil Fuel":0},
"Car":{"Hybrid":1.5e-4,"Petrol":3.1e-4,"Diesel":3e-6,"No Fossil Fuel":0},
"Plane":{"Jet Fuel":1.1e-4,"No Fossil Fuel":0},
"Ferry":{"DO":3e-5,"HFO":3e-5,"No Fossil Fuel":0},
"Motorbike":{"Petrol":2.1e-3,"No Fossil Fuel":0},
"Scooter":{"No Fossil Fuel":0},
"Bicycle":{"No Fossil Fuel":0},
"Walk":{"No Fossil Fuel":0}}
#+++++++++++++++++++++++
# small form
class DeleteForm(FlaskForm):
id_field = HiddenField()
purpose = HiddenField()
submit = SubmitField('Delete This Record')
# +++++++++++++++++++++++
# get local date - does not account for time zone
# note: date was imported at top of script
def stringdate():
today = date.today()
date_list = str(today).split('-')
# build string in format 01-01-2000
date_string = date_list[0] + "-" + date_list[1] + "-" + date_list[2]
return date_string
###routes
@login_manager.user_loader
def load_user(user_id):
return SuperUser(user_id)
@app.route('/login',methods=['GET', 'POST'])
def login():
formlog=LoginRecord(request.form)
if request.method =="POST" and formlog.validate_on_submit():
##check user
user=SuperUser.query.filter_by(user_name=formlog.user.data).first()
if user and formlog.password.data == user.password and GROUP_NAME==user.group_name:
login_user(user)
session.pop('_flashes', None)
return (redirect(url_for("index")))
else:
# if password is in correct , redirect to login page
message = "User or password incorrect "
return render_template('login.html', formlog=formlog, message=message)
return render_template('login.html', formlog = formlog)
@app.route('/')
@login_required
def index():
# get a list of unique values in the style column
user_rec=SuperUser.query.filter_by(id=current_user.user_name).first().student
transport = Emissions.query.with_entities(Emissions.transport).distinct()
###Outer Plot
global_emissions=pd.read_sql("SELECT * FROM global",engine_super)
global_emissions["date"]= pd.to_datetime(global_emissions["date"],yearfirst=True)
global_emissions=global_emissions.sort_values(by="date")
global_emissions=global_emissions.groupby(["date","group_name"]).agg({"co2":sum})
global_emissions=global_emissions.reset_index()
if global_emissions.shape[0]!=0:
global_emissions["date"]=global_emissions["date"].dt.strftime('%Y-%m-%d %H:%M:%S')
fig_global = px.line(global_emissions, x="date", y="co2", color='group_name',
labels={
"co2": "CO2 kg/passenger km",
"date": "Date",
"group_name": "Group Name"
},
title="Emissions per Group")
fig_global.update_traces(mode='markers+lines')
plot_div_global = plot(fig_global, output_type='div', include_plotlyjs=False)
else:
plot_div_global = ""
if transport.first() is not None: ##To avoid crash when DB is empty
##Inner plot group
group_emissions= | pd.read_sql("SELECT * FROM records",engine_local) | pandas.read_sql |
import os
import pandas as pd
import pyspark
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.pipeline import Pipeline
from pyspark.version import __version__ as pyspark_version
import pytest
from sklearn import datasets
import shutil
from mlflow import pyfunc
from mlflow import spark as sparkm
from mlflow import tracking
from mlflow.utils.environment import _mlflow_conda_env
from tests.helper_functions import score_model_in_sagemaker_docker_container
from tests.pyfunc.test_spark import score_model_as_udf
def test_hadoop_filesystem(tmpdir):
# copy local dir to and back from HadoopFS and make sure the results match
from mlflow.spark import _HadoopFileSystem as FS
test_dir_0 = os.path.join(str(tmpdir), "expected")
test_file_0 = os.path.join(test_dir_0, "root", "file_0")
test_dir_1 = os.path.join(test_dir_0, "root", "subdir")
test_file_1 = os.path.join(test_dir_1, "file_1")
os.makedirs(os.path.dirname(test_file_0))
with open(test_file_0, "w") as f:
f.write("test0")
os.makedirs(os.path.dirname(test_file_1))
with open(test_file_1, "w") as f:
f.write("test1")
remote = "/tmp/mlflow/test0"
FS.copy_from_local_file(test_dir_0, remote, removeSrc=False)
local = os.path.join(str(tmpdir), "actual")
FS.copy_to_local_file(remote, local, removeSrc=True)
assert sorted(os.listdir(os.path.join(local, "root"))) == sorted([
"subdir", "file_0", ".file_0.crc"])
assert sorted(os.listdir(os.path.join(local, "root", "subdir"))) == sorted([
"file_1", ".file_1.crc"])
# compare the files
with open(os.path.join(test_dir_0, "root", "file_0")) as expected_f:
with open(os.path.join(local, "root", "file_0")) as actual_f:
assert expected_f.read() == actual_f.read()
with open(os.path.join(test_dir_0, "root", "subdir", "file_1")) as expected_f:
with open(os.path.join(local, "root", "subdir", "file_1")) as actual_f:
assert expected_f.read() == actual_f.read()
# make sure we cleanup
assert not os.path.exists(FS._remote_path(remote).toString()) # skip file: prefix
FS.copy_from_local_file(test_dir_0, remote, removeSrc=False)
assert os.path.exists(FS._remote_path(remote).toString()) # skip file: prefix
FS.delete(remote)
assert not os.path.exists(FS._remote_path(remote).toString()) # skip file: prefix
@pytest.mark.large
def test_model_export(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["pyspark=={}".format(pyspark_version)])
iris = datasets.load_iris()
X = iris.data # we only take the first two features.
y = iris.target
pandas_df = | pd.DataFrame(X, columns=iris.feature_names) | pandas.DataFrame |
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.metrics import matthews_corrcoef, confusion_matrix
from sklearn.metrics import classification_report as class_re
from sklearn.preprocessing import MinMaxScaler
from openpyxl import load_workbook
from openpyxl import Workbook
import pandas as pd
from collections import namedtuple
import numpy as np
from os import path
def split_transform(X, Y, states=20):
"""Given X and Y returns a split and scaled version of them"""
scaling = MinMaxScaler()
esterase = ['EH51(22)', 'EH75(16)', 'EH46(23)', 'EH98(11)', 'EH49(23)']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.20, random_state=states, stratify=Y)
X_train = X_train.loc[[x for x in X_train.index if x not in esterase]]
X_test = X_test.loc[[x for x in X_test.index if x not in esterase]]
Y_train = Y_train.loc[[x for x in Y_train.index if x not in esterase]]
Y_test = Y_test.loc[[x for x in Y_test.index if x not in esterase]]
transformed_x = scaling.fit_transform(X_train)
transformed_x = pd.DataFrame(transformed_x)
transformed_x.index = X_train.index
transformed_x.columns = X_train.columns
test_x = scaling.transform(X_test)
test_x = pd.DataFrame(test_x)
test_x.index = X_test.index
test_x.columns = X_test.columns
return transformed_x, test_x, Y_train, Y_test, X_train, X_test
def vote(pred1, pred2, pred3=None, pred4=None, pred5=None):
"""Hard voting for the ensembles"""
vote_ = []
index = []
if pred3 is None:
mean = np.mean([pred1, pred2], axis=0)
for s, x in enumerate(mean):
if x == 1 or x == 0:
vote_.append(int(x))
else:
vote_.append(pred2[s])
index.append(s)
elif pred4 is None and pred5 is None:
mean = np.mean([pred1, pred2, pred3], axis=0)
for s, x in enumerate(mean):
if x == 1 or x == 0:
vote_.append(int(x))
elif x > 0.5:
vote_.append(1)
index.append(s)
else:
vote_.append(0)
index.append(s)
elif pred5 is None:
mean = np.mean([pred1, pred2, pred3, pred4], axis=0)
for s, x in enumerate(mean):
if x == 1 or x == 0:
vote_.append(int(x))
elif x > 0.5:
vote_.append(1)
index.append(s)
elif x < 0.5:
vote_.append(0)
index.append(s)
else:
vote_.append(pred4[s])
index.append(s)
else:
mean = np.mean([pred1, pred2, pred3, pred4], axis=0)
for s, x in enumerate(mean):
if x == 1 or x == 0:
vote_.append(int(x))
elif x > 0.5:
vote_.append(1)
index.append(s)
else:
vote_.append(0)
index.append(s)
return vote_, index
def print_score(Y_test, y_grid, train_predicted, Y_train, test_index=None, train_index=None, mode=None):
""" The function prints the scores of the models and the prediction performance """
score_tuple = namedtuple("scores", ["test_confusion", "tr_report", "te_report",
"train_mat", "test_mat", "train_confusion"])
target_names = ["class 0", "class 1"]
# looking at the scores of those predicted by al 3 of them
if mode:
Y_test = Y_test.iloc[[x for x in range(len(Y_test)) if x not in test_index]]
Y_train = Y_train.iloc[[x for x in range(len(Y_train)) if x not in train_index]]
y_grid = [y_grid[x] for x in range(len(y_grid)) if x not in test_index]
train_predicted = [train_predicted[x] for x in range(len(train_predicted)) if x not in train_index]
# Training scores
train_confusion = confusion_matrix(Y_train, train_predicted)
train_matthews = matthews_corrcoef(Y_train, train_predicted)
# print(f"Y_train : {Y_train}, predicted: {train_predicted}")
tr_report = class_re(Y_train, train_predicted, target_names=target_names, output_dict=True)
# Test metrics
test_confusion = confusion_matrix(Y_test, y_grid)
test_matthews = matthews_corrcoef(Y_test, y_grid)
te_report = class_re(Y_test, y_grid, target_names=target_names, output_dict=True)
all_scores = score_tuple(*[test_confusion, tr_report, te_report, train_matthews,
test_matthews, train_confusion])
return all_scores
def to_dataframe(score_list, name):
""" A function that transforms the data into dataframes"""
matrix = namedtuple("confusion_matrix", ["true_n", "false_p", "false_n", "true_p"])
# Taking the confusion matrix
test_confusion = matrix(*score_list.test_confusion.ravel())
training_confusion = matrix(*score_list.train_confusion.ravel())
# Separating confusion matrix into individual elements
test_true_n = test_confusion.true_n
test_false_p = test_confusion.false_p
test_false_n = test_confusion.false_n
test_true_p = test_confusion.true_p
training_true_n = training_confusion.true_n
training_false_p = training_confusion.false_p
training_false_n = training_confusion.false_n
training_true_p = training_confusion.true_p
# coonstructing the dataframe
dataframe = pd.DataFrame([test_true_n, test_true_p, test_false_p, test_false_n, training_true_n,
training_true_p, training_false_p, training_false_n, score_list.test_mat,
score_list.train_mat])
dataframe = dataframe.transpose()
dataframe.columns = ["test_tn", "test_tp", "test_fp", "test_fn", "train_tn", "train_tp",
"train_fp", "train_fn", "test_Mat", "train_Mat", ]
dataframe.index = name
te_report = pd.DataFrame(score_list.te_report).transpose()
tr_report = | pd.DataFrame(score_list.tr_report) | pandas.DataFrame |
import numpy as np
import torch
import pandas as pd
from matplotlib import pyplot as plt
import sys
import dataloader
# TODO figsize argument
HALF_FIGSIZE = 113
def save_figures(X, y, savename):
X_, y_ = X.squeeze(dim=1).detach().cpu().numpy(), y.detach().cpu().numpy()
fig = plt.figure(figsize=(12, 12))
fig.subplots_adjust(
left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05
)
for i in range(16):
axis = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
axis.imshow(X_[i])
points = np.vstack(np.split(y_[i], 15)).T * HALF_FIGSIZE + HALF_FIGSIZE
axis.plot(points[0], points[1], 'o', color='red')
fig.savefig(savename)
def generate_csv(output, filepath='data/IdLookupTable.csv'):
lookid_data = | pd.read_csv(filepath) | pandas.read_csv |
""" Sample dataframe for testing.
key: SQL data type
---
SQL data type with underscore prefixed
value: pd.Series([LowerLimit, UpperLimit, NULL, Truncation])
-----
LowerLimit: SQL lower limit or pandas lower limit if it is more restrictive
UpperLimit: SQL upper limit or pandas upper limit if it is more restrictive
NULL: SQL NULL / pandas <NA>
Truncation: truncated values due to SQL precision limit
"""
import pandas as pd
pd.options.mode.chained_assignment = "raise"
dataframe = pd.DataFrame(
{
"_bit": pd.Series([False, True, None, False], dtype="boolean"),
"_tinyint": | pd.Series([0, 255, None, None], dtype="UInt8") | pandas.Series |
import os
import warnings
from six import BytesIO
from six.moves import cPickle
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
import pandas as pd
import pandas.util.testing as tm
import pytest
from sm2 import datasets
from sm2.regression.linear_model import OLS
from sm2.tsa.arima_model import AR, ARMA, ARIMA
from sm2.tsa.arima_process import arma_generate_sample
from sm2.tools.sm_exceptions import MissingDataError
from .results import results_arma, results_arima
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
current_path = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(current_path, 'results', 'y_arma_data.csv')
y_arma = pd.read_csv(path, float_precision='high').values
cpi_dates = pd.PeriodIndex(start='1959q1', end='2009q3', freq='Q')
sun_dates = pd.PeriodIndex(start='1700', end='2008', freq='A')
cpi_predict_dates = pd.PeriodIndex(start='2009q3', end='2015q4', freq='Q')
sun_predict_dates = pd.PeriodIndex(start='2008', end='2033', freq='A')
@pytest.mark.not_vetted
@pytest.mark.skip(reason="fa, Arma not ported from upstream")
def test_compare_arma():
# dummies to avoid flake8 warnings until porting
fa = None
Arma = None
# import statsmodels.sandbox.tsa.fftarma as fa
# from statsmodels.tsa.arma_mle import Arma
# this is a preliminary test to compare
# arma_kf, arma_cond_ls and arma_cond_mle
# the results returned by the fit methods are incomplete
# for now without random.seed
np.random.seed(9876565)
famod = fa.ArmaFft([1, -0.5], [1., 0.4], 40)
x = famod.generate_sample(nsample=200, burnin=1000)
modkf = ARMA(x, (1, 1))
reskf = modkf.fit(trend='nc', disp=-1)
dres = reskf
modc = Arma(x)
resls = modc.fit(order=(1, 1))
rescm = modc.fit_mle(order=(1, 1), start_params=[0.4, 0.4, 1.], disp=0)
# decimal 1 corresponds to threshold of 5% difference
# still different sign corrected
assert_almost_equal(resls[0] / dres.params,
np.ones(dres.params.shape),
decimal=1)
# TODO: Is the next comment still accurate. It is retained from upstream
# where there was a commented-out assertion after the comment
# rescm also contains variance estimate as last element of params
assert_almost_equal(rescm.params[:-1] / dres.params,
np.ones(dres.params.shape),
decimal=1)
@pytest.mark.not_vetted
class CheckArmaResultsMixin(object):
"""
res2 are the results from gretl. They are in results/results_arma.
res1 are from sm2
"""
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params,
self.res2.params,
self.decimal_params)
decimal_aic = DECIMAL_4
def test_aic(self):
assert_almost_equal(self.res1.aic,
self.res2.aic,
self.decimal_aic)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic,
self.res2.bic,
self.decimal_bic)
decimal_arroots = DECIMAL_4
def test_arroots(self):
assert_almost_equal(self.res1.arroots,
self.res2.arroots,
self.decimal_arroots)
decimal_maroots = DECIMAL_4
def test_maroots(self):
assert_almost_equal(self.res1.maroots,
self.res2.maroots,
self.decimal_maroots)
decimal_bse = DECIMAL_2
def test_bse(self):
assert_almost_equal(self.res1.bse,
self.res2.bse,
self.decimal_bse)
decimal_cov_params = DECIMAL_4
def test_covparams(self):
assert_almost_equal(self.res1.cov_params(),
self.res2.cov_params,
self.decimal_cov_params)
decimal_hqic = DECIMAL_4
def test_hqic(self):
assert_almost_equal(self.res1.hqic,
self.res2.hqic,
self.decimal_hqic)
decimal_llf = DECIMAL_4
def test_llf(self):
assert_almost_equal(self.res1.llf,
self.res2.llf,
self.decimal_llf)
decimal_resid = DECIMAL_4
def test_resid(self):
assert_almost_equal(self.res1.resid,
self.res2.resid,
self.decimal_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues,
self.res2.fittedvalues,
self.decimal_fittedvalues)
decimal_pvalues = DECIMAL_2
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues,
self.res2.pvalues,
self.decimal_pvalues)
decimal_t = DECIMAL_2 # only 2 decimal places in gretl output
def test_tvalues(self):
assert_almost_equal(self.res1.tvalues,
self.res2.tvalues,
self.decimal_t)
decimal_sigma2 = DECIMAL_4
def test_sigma2(self):
assert_almost_equal(self.res1.sigma2,
self.res2.sigma2,
self.decimal_sigma2)
@pytest.mark.smoke
def test_summary(self):
self.res1.summary()
@pytest.mark.not_vetted
class CheckForecastMixin(object):
decimal_forecast = DECIMAL_4
def test_forecast(self):
assert_almost_equal(self.res1.forecast_res,
self.res2.forecast,
self.decimal_forecast)
decimal_forecasterr = DECIMAL_4
def test_forecasterr(self):
assert_almost_equal(self.res1.forecast_err,
self.res2.forecasterr,
self.decimal_forecasterr)
@pytest.mark.not_vetted
class CheckDynamicForecastMixin(object):
decimal_forecast_dyn = 4
def test_dynamic_forecast(self):
assert_almost_equal(self.res1.forecast_res_dyn,
self.res2.forecast_dyn,
self.decimal_forecast_dyn)
#def test_forecasterr(self):
# assert_almost_equal(self.res1.forecast_err_dyn,
# self.res2.forecasterr_dyn,
# DECIMAL_4)
@pytest.mark.not_vetted
class CheckArimaResultsMixin(CheckArmaResultsMixin):
def test_order(self):
assert self.res1.k_diff == self.res2.k_diff
assert self.res1.k_ar == self.res2.k_ar
assert self.res1.k_ma == self.res2.k_ma
decimal_predict_levels = DECIMAL_4
def test_predict_levels(self):
assert_almost_equal(self.res1.predict(typ='levels'),
self.res2.linear,
self.decimal_predict_levels)
@pytest.mark.not_vetted
class Test_Y_ARMA11_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 0]
cls.res1 = ARMA(endog, order=(1, 1)).fit(trend='nc', disp=-1)
fc_res, fc_err, ci = cls.res1.forecast(10)
cls.res1.forecast_res = fc_res
cls.res1.forecast_err = fc_err
cls.res2 = results_arma.Y_arma11()
# TODO: share with test_ar? other test classes?
def test_pickle(self):
fh = BytesIO()
# test wrapped results load save pickle
self.res1.save(fh)
fh.seek(0, 0)
res_unpickled = self.res1.__class__.load(fh)
assert type(res_unpickled) is type(self.res1) # noqa:E721
# TODO: Test equality instead of just type equality?
@pytest.mark.not_vetted
class Test_Y_ARMA14_NoConst(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 1]
cls.res1 = ARMA(endog, order=(1, 4)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma14()
@pytest.mark.not_vetted
@pytest.mark.slow
class Test_Y_ARMA41_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
decimal_maroots = DECIMAL_3
@classmethod
def setup_class(cls):
endog = y_arma[:, 2]
cls.res1 = ARMA(endog, order=(4, 1)).fit(trend='nc', disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma41()
@pytest.mark.not_vetted
class Test_Y_ARMA22_NoConst(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 3]
cls.res1 = ARMA(endog, order=(2, 2)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma22()
@pytest.mark.not_vetted
class Test_Y_ARMA50_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 4]
cls.res1 = ARMA(endog, order=(5, 0)).fit(trend='nc', disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma50()
@pytest.mark.not_vetted
class Test_Y_ARMA02_NoConst(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 5]
cls.res1 = ARMA(endog, order=(0, 2)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma02()
@pytest.mark.not_vetted
class Test_Y_ARMA11_Const(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 6]
cls.res1 = ARMA(endog, order=(1, 1)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma11c()
@pytest.mark.not_vetted
class Test_Y_ARMA14_Const(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 7]
cls.res1 = ARMA(endog, order=(1, 4)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma14c()
@pytest.mark.not_vetted
class Test_Y_ARMA41_Const(CheckArmaResultsMixin, CheckForecastMixin):
decimal_cov_params = DECIMAL_3
decimal_fittedvalues = DECIMAL_3
decimal_resid = DECIMAL_3
decimal_params = DECIMAL_3
@classmethod
def setup_class(cls):
endog = y_arma[:, 8]
cls.res2 = results_arma.Y_arma41c()
cls.res1 = ARMA(endog, order=(4, 1)).fit(trend="c", disp=-1,
start_params=cls.res2.params)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
@pytest.mark.not_vetted
class Test_Y_ARMA22_Const(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 9]
cls.res1 = ARMA(endog, order=(2, 2)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma22c()
@pytest.mark.not_vetted
class Test_Y_ARMA50_Const(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 10]
cls.res1 = ARMA(endog, order=(5, 0)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma50c()
@pytest.mark.not_vetted
class Test_Y_ARMA02_Const(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 11]
cls.res1 = ARMA(endog, order=(0, 2)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma02c()
# cov_params and tvalues are off still but not as much vs. R
@pytest.mark.not_vetted
class Test_Y_ARMA11_NoConst_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 0]
cls.res1 = ARMA(endog, order=(1, 1)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma11("css")
# better vs. R
@pytest.mark.not_vetted
class Test_Y_ARMA14_NoConst_CSS(CheckArmaResultsMixin):
decimal_fittedvalues = DECIMAL_3
decimal_resid = DECIMAL_3
decimal_t = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 1]
cls.res1 = ARMA(endog, order=(1, 4)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma14("css")
# bse, etc. better vs. R
# maroot is off because maparams is off a bit (adjust tolerance?)
@pytest.mark.not_vetted
class Test_Y_ARMA41_NoConst_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_pvalues = 0
decimal_cov_params = DECIMAL_3
decimal_maroots = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 2]
cls.res1 = ARMA(endog, order=(4, 1)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma41("css")
# same notes as above
@pytest.mark.not_vetted
class Test_Y_ARMA22_NoConst_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_resid = DECIMAL_3
decimal_pvalues = DECIMAL_1
decimal_fittedvalues = DECIMAL_3
@classmethod
def setup_class(cls):
endog = y_arma[:, 3]
cls.res1 = ARMA(endog, order=(2, 2)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma22("css")
# NOTE: gretl just uses least squares for AR CSS
# so BIC, etc. is
# -2*res1.llf + np.log(nobs)*(res1.q+res1.p+res1.k)
# with no adjustment for p and no extra sigma estimate
# NOTE: so our tests use x-12 arima results which agree with us and are
# consistent with the rest of the models
@pytest.mark.not_vetted
class Test_Y_ARMA50_NoConst_CSS(CheckArmaResultsMixin):
decimal_t = 0
decimal_llf = DECIMAL_1 # looks like rounding error?
@classmethod
def setup_class(cls):
endog = y_arma[:, 4]
cls.res1 = ARMA(endog, order=(5, 0)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma50("css")
@pytest.mark.not_vetted
class Test_Y_ARMA02_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 5]
cls.res1 = ARMA(endog, order=(0, 2)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma02("css")
# NOTE: our results are close to --x-12-arima option and R
@pytest.mark.not_vetted
class Test_Y_ARMA11_Const_CSS(CheckArmaResultsMixin):
decimal_params = DECIMAL_3
decimal_cov_params = DECIMAL_3
decimal_t = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 6]
cls.res1 = ARMA(endog, order=(1, 1)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma11c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA14_Const_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_pvalues = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 7]
cls.res1 = ARMA(endog, order=(1, 4)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma14c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA41_Const_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_cov_params = DECIMAL_1
decimal_maroots = DECIMAL_3
decimal_bse = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 8]
cls.res1 = ARMA(endog, order=(4, 1)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma41c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA22_Const_CSS(CheckArmaResultsMixin):
decimal_t = 0
decimal_pvalues = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 9]
cls.res1 = ARMA(endog, order=(2, 2)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma22c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA50_Const_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_params = DECIMAL_3
decimal_cov_params = DECIMAL_2
@classmethod
def setup_class(cls):
endog = y_arma[:, 10]
cls.res1 = ARMA(endog, order=(5, 0)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma50c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA02_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 11]
cls.res1 = ARMA(endog, order=(0, 2)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma02c("css")
@pytest.mark.not_vetted
class Test_ARIMA101(CheckArmaResultsMixin):
# just make sure this works
@classmethod
def setup_class(cls):
endog = y_arma[:, 6]
cls.res1 = ARIMA(endog, (1, 0, 1)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma11c()
cls.res2.k_diff = 0
cls.res2.k_ar = 1
cls.res2.k_ma = 1
@pytest.mark.not_vetted
class Test_ARIMA111(CheckArimaResultsMixin, CheckForecastMixin,
CheckDynamicForecastMixin):
decimal_llf = 3
decimal_aic = 3
decimal_bic = 3
decimal_cov_params = 2 # this used to be better?
decimal_t = 0
@classmethod
def setup_class(cls):
cpi = datasets.macrodata.load_pandas().data['cpi'].values
cls.res1 = ARIMA(cpi, (1, 1, 1)).fit(disp=-1)
cls.res2 = results_arima.ARIMA111()
# make sure endog names changes to D.cpi
(cls.res1.forecast_res,
cls.res1.forecast_err,
conf_int) = cls.res1.forecast(25)
# TODO: fix the indexing for the end here, I don't think this is right
# if we're going to treat it like indexing
# the forecast from 2005Q1 through 2009Q4 is indices
# 184 through 227 not 226
# note that the first one counts in the count so 164 + 64 is 65
# predictions
cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=164 + 63,
typ='levels',
dynamic=True)
def test_freq(self):
assert_almost_equal(self.res1.arfreq, [0.0000], 4)
assert_almost_equal(self.res1.mafreq, [0.0000], 4)
@pytest.mark.not_vetted
class Test_ARIMA111CSS(CheckArimaResultsMixin, CheckForecastMixin,
CheckDynamicForecastMixin):
decimal_forecast = 2
decimal_forecast_dyn = 2
decimal_forecasterr = 3
decimal_arroots = 3
decimal_cov_params = 3
decimal_hqic = 3
decimal_maroots = 3
decimal_t = 1
decimal_fittedvalues = 2 # because of rounding when copying
decimal_resid = 2
decimal_predict_levels = DECIMAL_2
@classmethod
def setup_class(cls):
cpi = datasets.macrodata.load_pandas().data['cpi'].values
cls.res1 = ARIMA(cpi, (1, 1, 1)).fit(disp=-1, method='css')
cls.res2 = results_arima.ARIMA111(method='css')
cls.res2.fittedvalues = - cpi[1:-1] + cls.res2.linear
# make sure endog names changes to D.cpi
(fc_res, fc_err, conf_int) = cls.res1.forecast(25)
cls.res1.forecast_res = fc_res
cls.res1.forecast_err = fc_err
cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=164 + 63,
typ='levels',
dynamic=True)
@pytest.mark.not_vetted
class Test_ARIMA112CSS(CheckArimaResultsMixin):
decimal_llf = 3
decimal_aic = 3
decimal_bic = 3
decimal_arroots = 3
decimal_maroots = 2
decimal_t = 1
decimal_resid = 2
decimal_fittedvalues = 3
decimal_predict_levels = DECIMAL_3
@classmethod
def setup_class(cls):
cpi = datasets.macrodata.load_pandas().data['cpi'].values
cls.res1 = ARIMA(cpi, (1, 1, 2)).fit(disp=-1, method='css',
start_params=[.905322, -.692425,
1.07366, 0.172024])
cls.res2 = results_arima.ARIMA112(method='css')
cls.res2.fittedvalues = - cpi[1:-1] + cls.res2.linear
# make sure endog names changes to D.cpi
#(cls.res1.forecast_res,
# cls.res1.forecast_err,
# conf_int) = cls.res1.forecast(25)
#cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=226,
# typ='levels',
# dynamic=True)
# TODO: fix the indexing for the end here, I don't think this is right
# if we're going to treat it like indexing
# the forecast from 2005Q1 through 2009Q4 is indices
# 184 through 227 not 226
# note that the first one counts in the count so 164 + 64 is 65
# predictions
#cls.res1.forecast_res_dyn = self.predict(start=164, end=164+63,
# typ='levels', dynamic=True)
# since we got from gretl don't have linear prediction in differences
def test_freq(self):
assert_almost_equal(self.res1.arfreq, [0.5000], 4)
assert_almost_equal(self.res1.mafreq, [0.5000, 0.5000], 4)
#class Test_ARIMADates(CheckArmaResults, CheckForecast, CheckDynamicForecast):
# @classmethod
# def setup_class(cls):
# cpi = datasets.macrodata.load_pandas().data['cpi'].values
# dates = pd.date_range('1959', periods=203, freq='Q')
# cls.res1 = ARIMA(cpi, dates=dates, freq='Q').fit(order=(1, 1, 1),
# disp=-1)
# cls.res2 = results_arima.ARIMA111()
# # make sure endog names changes to D.cpi
# cls.decimal_llf = 3
# cls.decimal_aic = 3
# cls.decimal_bic = 3
# (cls.res1.forecast_res,
# cls.res1.forecast_err,
# conf_int) = cls.res1.forecast(25)
@pytest.mark.not_vetted
@pytest.mark.slow
def test_start_params_bug():
data = np.array([
1368., 1187, 1090, 1439, 2362, 2783, 2869, 2512, 1804,
1544, 1028, 869, 1737, 2055, 1947, 1618, 1196, 867, 997, 1862, 2525,
3250, 4023, 4018, 3585, 3004, 2500, 2441, 2749, 2466, 2157, 1847,
1463, 1146, 851, 993, 1448, 1719, 1709, 1455, 1950, 1763, 2075, 2343,
3570, 4690, 3700, 2339, 1679, 1466, 998, 853, 835, 922, 851, 1125,
1299, 1105, 860, 701, 689, 774, 582, 419, 846, 1132, 902, 1058, 1341,
1551, 1167, 975, 786, 759, 751, 649, 876, 720, 498, 553, 459, 543,
447, 415, 377, 373, 324, 320, 306, 259, 220, 342, 558, 825, 994,
1267, 1473, 1601, 1896, 1890, 2012, 2198, 2393, 2825, 3411, 3406,
2464, 2891, 3685, 3638, 3746, 3373, 3190, 2681, 2846, 4129, 5054,
5002, 4801, 4934, 4903, 4713, 4745, 4736, 4622, 4642, 4478, 4510,
4758, 4457, 4356, 4170, 4658, 4546, 4402, 4183, 3574, 2586, 3326,
3948, 3983, 3997, 4422, 4496, 4276, 3467, 2753, 2582, 2921, 2768,
2789, 2824, 2482, 2773, 3005, 3641, 3699, 3774, 3698, 3628, 3180,
3306, 2841, 2014, 1910, 2560, 2980, 3012, 3210, 3457, 3158, 3344,
3609, 3327, 2913, 2264, 2326, 2596, 2225, 1767, 1190, 792, 669,
589, 496, 354, 246, 250, 323, 495, 924, 1536, 2081, 2660, 2814, 2992,
3115, 2962, 2272, 2151, 1889, 1481, 955, 631, 288, 103, 60, 82, 107,
185, 618, 1526, 2046, 2348, 2584, 2600, 2515, 2345, 2351, 2355,
2409, 2449, 2645, 2918, 3187, 2888, 2610, 2740, 2526, 2383, 2936,
2968, 2635, 2617, 2790, 3906, 4018, 4797, 4919, 4942, 4656, 4444,
3898, 3908, 3678, 3605, 3186, 2139, 2002, 1559, 1235, 1183, 1096,
673, 389, 223, 352, 308, 365, 525, 779, 894, 901, 1025, 1047, 981,
902, 759, 569, 519, 408, 263, 156, 72, 49, 31, 41, 192, 423, 492,
552, 564, 723, 921, 1525, 2768, 3531, 3824, 3835, 4294, 4533, 4173,
4221, 4064, 4641, 4685, 4026, 4323, 4585, 4836, 4822, 4631, 4614,
4326, 4790, 4736, 4104, 5099, 5154, 5121, 5384, 5274, 5225, 4899,
5382, 5295, 5349, 4977, 4597, 4069, 3733, 3439, 3052, 2626, 1939,
1064, 713, 916, 832, 658, 817, 921, 772, 764, 824, 967, 1127, 1153,
824, 912, 957, 990, 1218, 1684, 2030, 2119, 2233, 2657, 2652, 2682,
2498, 2429, 2346, 2298, 2129, 1829, 1816, 1225, 1010, 748, 627, 469,
576, 532, 475, 582, 641, 605, 699, 680, 714, 670, 666, 636, 672,
679, 446, 248, 134, 160, 178, 286, 413, 676, 1025, 1159, 952, 1398,
1833, 2045, 2072, 1798, 1799, 1358, 727, 353, 347, 844, 1377, 1829,
2118, 2272, 2745, 4263, 4314, 4530, 4354, 4645, 4547, 5391, 4855,
4739, 4520, 4573, 4305, 4196, 3773, 3368, 2596, 2596, 2305, 2756,
3747, 4078, 3415, 2369, 2210, 2316, 2263, 2672, 3571, 4131, 4167,
4077, 3924, 3738, 3712, 3510, 3182, 3179, 2951, 2453, 2078, 1999,
2486, 2581, 1891, 1997, 1366, 1294, 1536, 2794, 3211, 3242, 3406,
3121, 2425, 2016, 1787, 1508, 1304, 1060, 1342, 1589, 2361, 3452,
2659, 2857, 3255, 3322, 2852, 2964, 3132, 3033, 2931, 2636, 2818, 3310,
3396, 3179, 3232, 3543, 3759, 3503, 3758, 3658, 3425, 3053, 2620, 1837,
923, 712, 1054, 1376, 1556, 1498, 1523, 1088, 728, 890, 1413, 2524,
3295, 4097, 3993, 4116, 3874, 4074, 4142, 3975, 3908, 3907, 3918, 3755,
3648, 3778, 4293, 4385, 4360, 4352, 4528, 4365, 3846, 4098, 3860, 3230,
2820, 2916, 3201, 3721, 3397, 3055, 2141, 1623, 1825, 1716, 2232, 2939,
3735, 4838, 4560, 4307, 4975, 5173, 4859, 5268, 4992, 5100, 5070, 5270,
4760, 5135, 5059, 4682, 4492, 4933, 4737, 4611, 4634, 4789, 4811, 4379,
4689, 4284, 4191, 3313, 2770, 2543, 3105, 2967, 2420, 1996, 2247, 2564,
2726, 3021, 3427, 3509, 3759, 3324, 2988, 2849, 2340, 2443, 2364, 1252,
623, 742, 867, 684, 488, 348, 241, 187, 279, 355, 423, 678, 1375, 1497,
1434, 2116, 2411, 1929, 1628, 1635, 1609, 1757, 2090, 2085, 1790, 1846,
2038, 2360, 2342, 2401, 2920, 3030, 3132, 4385, 5483, 5865, 5595, 5485,
5727, 5553, 5560, 5233, 5478, 5159, 5155, 5312, 5079, 4510, 4628, 4535,
3656, 3698, 3443, 3146, 2562, 2304, 2181, 2293, 1950, 1930, 2197, 2796,
3441, 3649, 3815, 2850, 4005, 5305, 5550, 5641, 4717, 5131, 2831, 3518,
3354, 3115, 3515, 3552, 3244, 3658, 4407, 4935, 4299, 3166, 3335, 2728,
2488, 2573, 2002, 1717, 1645, 1977, 2049, 2125, 2376, 2551, 2578, 2629,
2750, 3150, 3699, 4062, 3959, 3264, 2671, 2205, 2128, 2133, 2095, 1964,
2006, 2074, 2201, 2506, 2449, 2465, 2064, 1446, 1382, 983, 898, 489,
319, 383, 332, 276, 224, 144, 101, 232, 429, 597, 750, 908, 960, 1076,
951, 1062, 1183, 1404, 1391, 1419, 1497, 1267, 963, 682, 777, 906,
1149, 1439, 1600, 1876, 1885, 1962, 2280, 2711, 2591, 2411])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ARMA(data, order=(4, 1)).fit(start_ar_lags=5, disp=-1)
@pytest.mark.not_vetted
def test_arima_predict_mle_dates():
cpi = datasets.macrodata.load_pandas().data['cpi'].values
res1 = ARIMA(cpi, (4, 1, 1), dates=cpi_dates, freq='Q').fit(disp=-1)
path = os.path.join(current_path, 'results',
'results_arima_forecasts_all_mle.csv')
arima_forecasts = pd.read_csv(path).values
fc = arima_forecasts[:, 0]
fcdyn = arima_forecasts[:, 1]
fcdyn2 = arima_forecasts[:, 2]
start, end = 2, 51
fv = res1.predict('1959Q3', '1971Q4', typ='levels')
assert_almost_equal(fv, fc[start:end + 1], DECIMAL_4)
tm.assert_index_equal(res1.data.predict_dates,
cpi_dates[start:end + 1])
start, end = 202, 227
fv = res1.predict('2009Q3', '2015Q4', typ='levels')
assert_almost_equal(fv, fc[start:end + 1], DECIMAL_4)
tm.assert_index_equal(res1.data.predict_dates,
cpi_predict_dates)
# make sure dynamic works
start, end = '1960q2', '1971q4'
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[5:51 + 1], DECIMAL_4)
start, end = '1965q1', '2015q4'
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[24:227 + 1], DECIMAL_4)
@pytest.mark.not_vetted
def test_arma_predict_mle_dates():
sunspots = datasets.sunspots.load_pandas().data['SUNACTIVITY'].values
mod = ARMA(sunspots, (9, 0), dates=sun_dates, freq='A')
mod.method = 'mle'
with pytest.raises(ValueError):
mod._get_prediction_index('1701', '1751', True)
start, end = 2, 51
mod._get_prediction_index('1702', '1751', False)
tm.assert_index_equal(mod.data.predict_dates, sun_dates[start:end + 1])
start, end = 308, 333
mod._get_prediction_index('2008', '2033', False)
tm.assert_index_equal(mod.data.predict_dates, sun_predict_dates)
@pytest.mark.not_vetted
def test_arima_predict_css_dates():
cpi = datasets.macrodata.load_pandas().data['cpi'].values
res1 = ARIMA(cpi, (4, 1, 1), dates=cpi_dates, freq='Q').fit(disp=-1,
method='css',
trend='nc')
params = np.array([1.231272508473910,
-0.282516097759915,
0.170052755782440,
-0.118203728504945,
-0.938783134717947])
path = os.path.join(current_path, 'results',
'results_arima_forecasts_all_css.csv')
arima_forecasts = pd.read_csv(path).values
fc = arima_forecasts[:, 0]
fcdyn = arima_forecasts[:, 1]
fcdyn2 = arima_forecasts[:, 2]
start, end = 5, 51
fv = res1.model.predict(params, '1960Q2', '1971Q4', typ='levels')
assert_almost_equal(fv, fc[start:end + 1], DECIMAL_4)
tm.assert_index_equal(res1.data.predict_dates, cpi_dates[start:end + 1])
start, end = 202, 227
fv = res1.model.predict(params, '2009Q3', '2015Q4', typ='levels')
assert_almost_equal(fv, fc[start:end + 1], DECIMAL_4)
tm.assert_index_equal(res1.data.predict_dates, cpi_predict_dates)
# make sure dynamic works
start, end = 5, 51
fv = res1.model.predict(params, '1960Q2', '1971Q4', typ='levels',
dynamic=True)
assert_almost_equal(fv, fcdyn[start:end + 1], DECIMAL_4)
start, end = '1965q1', '2015q4'
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[24:227 + 1], DECIMAL_4)
@pytest.mark.not_vetted
def test_arma_predict_css_dates():
# TODO: GH reference?
sunspots = datasets.sunspots.load_pandas().data['SUNACTIVITY'].values
mod = ARMA(sunspots, (9, 0), dates=sun_dates, freq='A')
mod.method = 'css'
with pytest.raises(ValueError):
mod._get_prediction_index('1701', '1751', False)
def test_arima_wrapper():
# test that names get attached to res.params correctly
# TODO: GH reference?
cpi = datasets.macrodata.load_pandas().data['cpi']
cpi.index = pd.Index(cpi_dates)
res = ARIMA(cpi, (4, 1, 1), freq='Q').fit(disp=-1)
expected_index = pd.Index(['const', 'ar.L1.D.cpi', 'ar.L2.D.cpi',
'ar.L3.D.cpi', 'ar.L4.D.cpi',
'ma.L1.D.cpi'])
assert expected_index.equals(res.params.index)
tm.assert_index_equal(res.params.index, expected_index)
assert res.model.endog_names == 'D.cpi'
@pytest.mark.not_vetted
@pytest.mark.smoke
def test_1dexog():
# smoke test, this will raise an error if broken
dta = datasets.macrodata.load_pandas().data
endog = dta['realcons'].values
exog = dta['m1'].values.squeeze()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod = ARMA(endog, (1, 1), exog).fit(disp=-1)
mod.predict(193, 203, exog[-10:])
# check for dynamic is true and pandas Series see GH#2589
mod.predict(193, 202, exog[-10:], dynamic=True)
dta.index = pd.Index(cpi_dates)
mod = ARMA(dta['realcons'], (1, 1), dta['m1'])
res = mod.fit(disp=-1)
res.predict(dta.index[-10], dta.index[-1],
exog=dta['m1'][-10:], dynamic=True)
mod = ARMA(dta['realcons'], (1, 1), dta['m1'])
res = mod.fit(trend='nc', disp=-1)
res.predict(dta.index[-10], dta.index[-1],
exog=dta['m1'][-10:], dynamic=True)
@pytest.mark.not_vetted
def test_arima_predict_bug():
# predict_start_date wasn't getting set on start = None
# TODO: GH reference?
dta = datasets.sunspots.load_pandas().data['SUNACTIVITY']
dta.index = pd.DatetimeIndex(start='1700', end='2009', freq='A')[:309]
arma_mod20 = ARMA(dta, (2, 0)).fit(disp=-1)
arma_mod20.predict(None, None)
# test prediction with time stamp, see GH#2587
predict = arma_mod20.predict(dta.index[-20], dta.index[-1])
assert predict.index.equals(dta.index[-20:])
predict = arma_mod20.predict(dta.index[-20], dta.index[-1], dynamic=True)
assert predict.index.equals(dta.index[-20:])
# partially out of sample
predict_dates = pd.DatetimeIndex(start='2000', end='2015', freq='A')
predict = arma_mod20.predict(predict_dates[0], predict_dates[-1])
assert predict.index.equals(predict_dates)
@pytest.mark.not_vetted
def test_arima_predict_q2():
# bug with q > 1 for arima predict
# TODO: GH reference?
inv = datasets.macrodata.load().data['realinv']
arima_mod = ARIMA(np.log(inv), (1, 1, 2)).fit(start_params=[0, 0, 0, 0],
disp=-1)
fc, stderr, conf_int = arima_mod.forecast(5)
# values copy-pasted from gretl
assert_almost_equal(fc,
[7.306320, 7.313825, 7.321749, 7.329827, 7.337962],
5)
@pytest.mark.not_vetted
def test_arima_predict_pandas_nofreq():
# GH#712
dates = ["2010-01-04", "2010-01-05", "2010-01-06", "2010-01-07",
"2010-01-08", "2010-01-11", "2010-01-12", "2010-01-11",
"2010-01-12", "2010-01-13", "2010-01-17"]
close = [626.75, 623.99, 608.26, 594.1, 602.02, 601.11, 590.48, 587.09,
589.85, 580.0, 587.62]
data = pd.DataFrame(close, index= | pd.DatetimeIndex(dates) | pandas.DatetimeIndex |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": | pandas.StringDtype() | pandas.StringDtype |
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import numpy as np
import pandas as pd
from rich.table import Table
from rich import print
""""""""" Single Asset Graphing """""""""""
def graph(price_array, time_array, graphtitle = "Price of asset over time", yaxistitle = 'Price (USD)', xaxistitle = 'Time (Months)'):
""" First parameter is for the price array and the second is for the time array"""
#Creates the figure under the graphtitle name
fig = plt.figure(graphtitle)
#sets the background of the plot to trasparent
fig.patch.set_alpha(0.0)
ax = plt.axes()
ax.patch.set_alpha(0.0)
#sets up the graph and displays it to the screen in the figure
plt.title(graphtitle)
plt.plot(price_array, time_array)
plt.ylabel(yaxistitle)
plt.xlabel(xaxistitle)
print("[bold purple][Displaying\t][/bold purple] graph")
print(f"[bold yellow][Title:\t\t][/bold yellow] {graphtitle}")
plt.show()
print("[bold red][Exiting\t][/bold red] graph\n")
""""""""" Multiple Assets Graphing """""""""""
def subcompare(assets_array, subplot_title = "The prices over time:", yaxistitle = 'Price (USD)', xaxistitle = 'Time (Months)'):
"""Compares multiple assets in one price over time graph. (Parameter: Expects a Matrix)"""
number_of_assets = len(assets_array[0])
#Dynamically creates the title and adds in all the assets to it
title_array = [subplot_title]
for assets_name in assets_array[0]:
title_array.append(assets_name)
title = | pd.Series(title_array) | pandas.Series |
import pandas as pd
import numpy as np
import re
from law.utils import *
import jieba.posseg as pseg
import datetime
import mysql.connector
class case_reader:
def __init__(self, user, password, n=1000, preprocessing=False):
'''
n is total types,
preprocessing: whether needs preprocessing
'''
# old version: use file_path
# self.file_path = file_path
# self.data = pd.read_csv(self.file_path, encoding='utf-8', engine='python')
# new version: directly reading data
# connect database
self.n = n
self.preprocessing = preprocessing
print("Connecting to Server...")
cnx = mysql.connector.connect(user=user, password=password,
host="cdb-74dx1ytr.gz.tencentcdb.com",
port=10008,
database='law')
cursor = cnx.cursor(buffered=True)
print("Server Connected.")
# read database
if n>=0:
query = 'SELECT * FROM Civil LIMIT ' + str(self.n) + ';'
else:
query = 'SELECT * FROM Civil;'
print("Start Reading Data...")
self.data = pd.read_sql(query,con=cnx)
print("Read Data Successful...")
self.data_len = len(self.data)
print("This dataset has ", self.data_len, "rows of data.")
# np.nan replace missing value
self.data = self.data.fillna(np.nan)
def return_data(self):
if self.preprocessing:
self.preprocess()
return self.data
def number2(self):
'''
This function change '庭审程序' into one hot encodings
-- Klaus
'''
xingfabiangeng = np.zeros(self.data_len)
yishen = np.zeros(self.data_len)
ershen = np.zeros(self.data_len)
fushen = np.zeros(self.data_len)
qita = np.zeros(self.data_len)
for i in range(self.data_len):
if self.data['proc'][i] == "刑罚变更":
xingfabiangeng[i] += 1
if self.data['proc'][i] == "一审":
yishen[i] += 1
if self.data['proc'][i] == "二审":
ershen[i] += 1
if self.data['proc'][i] == "复核":
fushen[i] += 1
if self.data['proc'][i] == "其他" :
qita[i] += 1
self.data['proc_是否_刑罚变更'] = xingfabiangeng
self.data['proc_是否_一审'] = yishen
self.data['proc_是否_二审'] = ershen
self.data['proc_是否_复核'] = fushen
self.data['proc_是否_其他'] = qita
#print(xingfabiangeng)
#print(yishen)
#print(ershen)
#print(qita)
del xingfabiangeng, yishen, ershen, fushen, qita
def number3(self):
'''
This function change '案由' into one hot encodings
'''
reasons = ['机动车交通事故责任纠纷' ,'物件损害责任纠纷' ,'侵权责任纠纷', '产品责任纠纷', '提供劳务者受害责任纠纷' ,'医疗损害责任纠纷',
'地面施工、地下设施损害责任纠纷', '饲养动物损害责任纠纷' ,'产品销售者责任纠纷', '因申请诉中财产保全损害责任纠纷', '教育机构责任纠纷',
'违反安全保障义务责任纠纷' , '网络侵权责任纠纷' ,'因申请诉前财产保全损害责任纠纷' ,'物件脱落、坠落损害责任纠纷',
'因申请诉中证据保全损害责任纠纷' ,'建筑物、构筑物倒塌损害责任纠纷' ,'提供劳务者致害责任纠纷' ,'产品生产者责任纠纷',
'公共场所管理人责任纠纷', '公证损害责任纠纷', '用人单位责任纠纷' ,'触电人身损害责任纠纷', '义务帮工人受害责任纠纷',
'高度危险活动损害责任纠纷', '噪声污染责任纠纷' ,'堆放物倒塌致害责任纠纷', '公共道路妨碍通行损害责任纠纷' ,'见义勇为人受害责任纠纷',
'医疗产品责任纠纷' ,'监护人责任纠纷', '水上运输人身损害责任纠纷', '环境污染责任纠纷', '因申请先予执行损害责任纠纷',
'铁路运输人身损害责任纠纷' ,'水污染责任纠纷', '林木折断损害责任纠纷', '侵害患者知情同意权责任纠纷' ,'群众性活动组织者责任纠纷',
'土壤污染责任纠纷']
mreason = np.zeros(self.data_len)
for i in range(self.data_len):
for j,reason in enumerate(reasons):
if self.data['class'][i] == reasons[j]:
mreason[i] +=j+1
self.data['class_index'] = mreason
del mreason
def number4(self):
'''
This function change '文书类型' into one hot encodings
'''
panjueshu = np.zeros(self.data_len)
caidingshu = np.zeros(self.data_len)
for i in range(self.data_len):
if self.data['doc_type'][i] == "判决书":
panjueshu[i] += 1
if self.data['doc_type'][i] == "裁定书":
caidingshu[i] += 1
self.data['doc_type'] = panjueshu
self.data['doc_type'] = caidingshu
del panjueshu, caidingshu
def number5(self):
'''
court → province、city、level
-- <NAME>
'''
level = [] # court level
distinct = [] # province
block = [] # city
for x in self.data['court_name']:
if pd.isna(x):#if empty
level.append(None)
distinct.append(None)
block.append(None)
else:
# search “省”(province)
a = re.compile(r'.*省')
b = a.search(x)
if b == None:
distinct.append(None)
else:
distinct.append(b.group(0))
x = re.sub(b.group(0), '', x)
# search "市"(city)
a = re.compile(r'.*市')
b = a.search(x)
if b == None:
block.append(None)
else:
block.append(b.group(0))
# search“级”(level)
a = re.compile(r'.级')
b = a.search(x)
if b == None:
level.append(None)
else:
level.append(b.group(0))
newdict = {
'法院所在省': distinct,
'法院所在市': block,
'法院等级': level
}
# DataFrame
newdata = pd.DataFrame(newdict)
self.data = pd.concat([self.data, newdata], axis=1)
del newdata, level, distinct, block
def number6(self):
'''
分成年月日
:return:
'''
year = []
month = []
day = []
for x in self.data['date']:
# year
a = re.compile(r'.*年')
b = a.search(str(x))
if b == None:
year.append(None)
else:
year.append(b.group(0))
x = re.sub(b.group(0), '', x)
# month
a1 = re.compile(r'.*月')
b1 = a1.search(str(x))
if b1 == None:
month.append(None)
else:
month.append(b1.group(0))
x = re.sub(b1.group(0), '', x)
# day
a2 = re.compile(r'.*日')
b2 = a2.search(str(x))
if b2 == None:
day.append(None)
else:
day.append(b2.group(0))
newdict = {
'判决年份': year,
'判决月份': month,
'判决日期': day
}
# DataFrame
newdata = pd.DataFrame(newdict)
self.data = pd.concat([self.data, newdata], axis=1)
del year, month, day
def number7(self): # 四列 one hot 检察院,法人,自然人,其他
'''
--<NAME>
'''
self.data['原告_是否_检察院'] = 0
self.data['原告_是否_法人'] = 0
self.data['原告_是否_自然人'] = 0
self.data['原告_是否_其他'] = 0
pattern = r'(?::|:|。|、|\s|,|,)\s*'
jcy_pattern = re.compile(r'.*检察院')
gs_pattern = re.compile(r'.*公司')
for i in range(len(self.data['plantiff'])):
if pd.isna(self.data['plantiff'][i]):
continue
self.data['plantiff'][i] = re.sub(' ', '', self.data['plantiff'][i])
result_list = re.split(pattern, self.data['plantiff'][i])
for x in result_list:
temp1 = jcy_pattern.findall(x)
temp2 = gs_pattern.findall(x)
if len(temp1) != 0:
self.data['原告_是否_检察院'][i] = 1
if (0 < len(x) <= 4):
self.data['原告_是否_自然人'][i] = 1
if ((len(temp1) != 0) or len(temp2) != 0):
self.data['原告_是否_法人'][i] = 1
if (len(x) > 4 and len(temp1) == 0 and len(temp2) == 0):
self.data['原告_是否_其他'][i] = 1
def number8(self):
# http://www.sohu.com/a/249531167_656612
company = re.compile(r'.*?公司')
natural_person = np.zeros(self.data_len)
legal_person = np.zeros(self.data_len)
other_person = np.zeros(self.data_len)
for i in range(self.data_len):
# 显示进度
#if i % 100 == 0:
# print(i)
if pd.isna(self.data['defendant'][i]):
continue
if re.search(company, self.data['defendant'][i]) is not None:
legal_person[i] = 1
l = re.split('、', self.data['defendant'][i])
l1 = list(filter(lambda s: len(s) <= 4, l))
l2 = list(filter(lambda s: (re.search(company, s)) is None, l1))
if len(l2) > 0:
natural_person[i] = 1
l3 = list(filter(lambda s: len(s) > 4, l))
l4 = list(filter(lambda s: (re.search(company, s)) is None, l3))
if len(l4) > 0:
other_person[i] = 1
for mes in l4:
words = pseg.cut(mes)
#verbs = []
for word, flag in words:
if flag == 'v':
other_person[i] = 0
break
self.data['被告_是否_自然人'] = natural_person
self.data['被告_是否_法人'] = legal_person
self.data['被告_是否_其他'] = other_person
del natural_person, legal_person, other_person
def number9(self):
'''
--<NAME>'''
self.data['第三人_有无自然人'] = 0
pattern = r'(?::|:|。|、|\s|,|,)\s*'
for i in range(len(self.data['third_party'])):
if pd.isna(self.data['third_party'][i]):
continue
result_list = re.split(pattern, self.data['third_party'][i])
for x in result_list:
if (0 < len(x) <= 4):
self.data['第三人_有无自然人'][i] = 1
break
def number10(self):
information = []
for i in range(self.data_len):
#if i % 100 == 0:
#print(i)
info = {}
if pd.isna(self.data['party'][i]):
information.append({})
continue
information.append(ADBinfo(self.data, i))
self.data['party_one_hot'] = information
del information, info
def number11(self):
types = []
money = []
for x in self.data['procedure']:
#print(x)
if str(x)=='nan' or re.search('[0-9]+元',x)==None:
money.append(0)
else:
money.append(1)
if str(x)=='nan':
types.append('空白')
elif not(re.search('不宜在互联网公布|涉及国家秘密的|未成年人犯罪的',x)==None):
types.append('不公开')
elif not(re.search('以调解方式结案的',x)==None):
types.append('调解结案')
elif not(re.search('一案.*本院.*简易程序.*(因|转为)',x)==None):
types.append('已审理(简易转普通)')
elif not(re.search('一案.*(小额诉讼程序|简易程序).*审理(。$|终结。$|.*到庭参加诉讼|.*到庭应诉|.*参加诉讼)',x)==None):
types.append('已审理(简易)')
elif not(re.search('(一案.*本院.*(审理。$|审理终结。$|公开开庭进行了审理。$|公开开庭进行.?审理.*到庭参加.?诉讼))',x)==None):
types.append('已审理')
#elif not(re.search('一案.*本院.*(受理|立案).*简易程序.*(因|转为)',x)==None):
#types.append('已受理/立案(简易转普通)')
#这种情况出现的太少,暂不单独分类
elif not(re.search('一案.*本院.*(受理|立案).*(小额诉讼程序|简易程序)(。$|.*由.*审判。$)',x)==None):
types.append('已受理/立案(简易)')
elif not(re.search('一案.*本院.*(立案。$|立案受理。$|立案后。$)',x)==None):
types.append('已受理/立案')
elif not(re.search('一案.*(调解.*原告|原告.*调解).*撤',x)==None):
types.append('调解撤诉')
elif (re.search('调解',x)==None) and not(re.search('一案.*原告.*撤',x)==None):
types.append('其他撤诉')
elif not(re.search('一案.*原告.*((未|不).*(受理|诉讼)费|(受理|诉讼)费.*(未|不))',x)==None):
types.append('未交费')
elif not(re.search('一案.*本院.*依法追加.*被告',x)==None):
types.append('追加被告')
elif not(re.search('上诉人.*不服.*上诉。$',x)==None):
types.append('上诉')
elif not(re.search('再审.*一案.*不服.*再审。$',x)==None):
types.append('要求再审')
elif not(re.search('一案.*申请财产保全.*符合法律规定。$',x)==None):
types.append('同意诉前财产保全')
elif not(re.search('申请.*(请求|要求).*(查封|冻结|扣押|保全措施)',x)==None):
types.append('申请财产保全')
elif not(re.search('一案.*(缺席|拒不到庭|未到庭)',x)==None):
types.append('缺席审判')
elif not(re.search('一案.*申请.*解除(查封|冻结|扣押|保全措施).*符合法律规定。$',x)==None):
types.append('同意解除冻结')
else:
types.append('其他/错误')
#newdict={'庭审程序分类':types,'money':money}
newdict={'庭审程序分类':types}
newdata = pd.DataFrame(newdict)
self.data = pd.concat([self.data, newdata], axis=1)
del types
def number12(self):
#if cancel
repeal_pattern = re.compile(r'撤诉')
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = repeal_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否撤诉_是'] = yes
self.data['庭审过程_是否撤诉_未知'] = dk
self.data['庭审过程_是否撤诉_否'] = no
self.data['庭审过程_是否撤诉_汇总'] = al
del yes, no, dk, al
#if hurt
situation_pattern = re.compile(r'受伤|死亡|伤残|残疾|致残')
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = situation_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否受伤_是'] = yes
self.data['庭审过程_是否受伤_否'] = no
self.data['庭审过程_是否受伤_未知'] = dk
self.data['庭审过程_是否受伤_汇总'] = al
del yes, no, dk, al
#if money
money_pattern = re.compile(r'[0-9]+元|[0-9]+万元|[0-9]+万+[0-9]+千元|[0-9]+千+[0-9]+百元'
r'[0-9]+万+[0-9]+千+[0-9]+百元|[0-9]+,+[0-9]+元|[0-9]+,+[0-9]+,+[0-9]+元')
'''
包含xxx元 xxx万元 xxx万xxx千元 xxx万xxx千xxx百元 xxx千xxx百元 xxx,xxx元 xxx,xxx,xxx元
'''
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = money_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否涉及金钱_是'] = yes
self.data['庭审过程_是否涉及金钱_否'] = no
self.data['庭审过程_是否涉及金钱_未知'] = dk
self.data['庭审过程_是否涉及金钱_汇总'] = al
del yes, no, dk, al
#if on purpose
intent_pattern = re.compile(r'有意|故意')
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = intent_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否故意_是'] = yes
self.data['庭审过程_是否故意_否'] = no
self.data['庭审过程_是否故意_未知'] = dk
self.data['庭审过程_是否故意_汇总'] = al
del yes, no, dk, al
#if moral reparation
mental_pattern = re.compile(r'精神损失|精神赔偿|精神抚慰')
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = mental_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否要求精神赔偿_是'] = yes
self.data['庭审过程_是否要求精神赔偿_否'] = no
self.data['庭审过程_是否要求精神赔偿_未知'] = dk
self.data['庭审过程_是否要求精神赔偿_汇总'] = al
del yes, no, dk, al
#if rejection
absent_pattern = re.compile(r'拒不到庭')
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = absent_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否拒不到庭_是'] = yes
self.data['庭审过程_是否拒不到庭_否'] = no
self.data['庭审过程_是否拒不到庭_未知'] = dk
self.data['庭审过程_是否拒不到庭_汇总'] = al
del yes, no, dk, al
#if argument
objection_pattern = re.compile(r'有异议|重新鉴定|判决异议|')
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = objection_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否有异议_是'] = yes
self.data['庭审过程_是否有异议_否'] = no
self.data['庭审过程_是否有异议_未知'] = dk
self.data['庭审过程_是否有异议_汇总'] = al
del yes, no, dk, al
#length
length = np.zeros(self.data_len)
for i in range(self.data_len):
if type(self.data['process'][i]) == str:
length[i] = len(self.data['process'][i])
else:
length[i] = 0
self.data['庭审过程_长度'] = length
del length
def number13(self):
'''“法院意见”(court comments)
-money
-number of law
-number of pieces
-number of clause
--<NAME>'''
self.data['法院意见_是否涉及金额'] = 0
self.data['法院意见_涉及的法数'] = 0
self.data['法院意见_涉及的条数'] = 0
self.data['法院意见_涉及的款数'] = 0
money_pattern = re.compile(r'[0-9]+元')
for i in range(len(self.data['opinion'])):
if not pd.isna(self.data['opinion'][i]):
try:
temp = find_law_tiao_kuan_in_text(self.data['opinion'][i])
except:
print('法院意见无法处理的案件案号:'+self.data['id'][i])
else:
if len(temp) > 0:
self.data['法院意见_涉及的法数'][i] = len(temp)
sum_tiao = 0
sum_kuan = 0
for j in range(len(temp)):
sum_tiao += len(temp[j][1])
sum_kuan += len(temp[j][2])
self.data['法院意见_涉及的条数'][i] = sum_tiao
self.data['法院意见_涉及的款数'][i] = sum_kuan
# money
for i in range(len(self.data['opinion'])):
if not | pd.isna(self.data['opinion'][i]) | pandas.isna |
import os
opj = os.path.join
import numpy as np
import pandas as pd
import glob
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
color_cycle = ['dodgerblue', 'olivedrab', 'darkorange', 'magenta']
plt.ioff()
plt.rcParams.update({'font.family': 'Times New Roman',
'font.size': 16, 'axes.labelsize': 20,
'mathtext.default': 'regular',
'xtick.minor.visible': True,
'xtick.major.size': 5,
'ytick.minor.visible': True,
'ytick.major.size': 5,
'axes.prop_cycle': plt.cycler('color', color_cycle)})
import lmfit as lm
import RTxploitation as rtx
import study_cases.vsf as vsf
import pffit
fit = pffit.phase_function_models.inversion()
m = pffit.phase_function_models.models()
dir = pffit.__path__[0]
dirdata = opj(dir, 'data')
dirfig = opj(dir, 'fig')
# -------------------
# fitting section
# -------------------
models = (fit.FF_fit, fit.RM_fit, fit.FFRM_fit, fit.TTRM_fit)
file = '/home/harmel/Dropbox/work/git/vrtc/RTxploitation/RTxploitation/../study_cases/vsf/data/petzold_data.txt'
df = | pd.read_csv(file, skiprows=3, sep='\s+', index_col=0, skipinitialspace=True, na_values='inf') | pandas.read_csv |
#!/usr/bin/env python3
##########################################################
## <NAME> ##
## Copyright (C) 2019 <NAME>, IGTP, Spain ##
##########################################################
"""
Generates sample identification using KMA software and MLSTar.
Looks for similar entries on GenBank and retrieves them.
"""
## useful imports
import time
import io
import os
import re
import sys
import concurrent.futures
from termcolor import colored
import pandas as pd
## import my modules
from BacterialTyper.scripts import species_identification_KMA
from BacterialTyper.scripts import database_generator
from BacterialTyper.scripts import MLSTar
from BacterialTyper.scripts import edirect_caller
from BacterialTyper.modules import help_info
from BacterialTyper.config import set_config
from BacterialTyper import __version__ as pipeline_version
import HCGB
from HCGB import sampleParser
import HCGB.functions.aesthetics_functions as HCGB_aes
import HCGB.functions.time_functions as HCGB_time
import HCGB.functions.main_functions as HCGB_main
import HCGB.functions.files_functions as HCGB_files
####################################
def run_ident(options):
"""
Main function acting as an entry point to the module *ident*.
Arguments:
.. seealso:: Additional information to PubMLST available datasets.
- :doc:`PubMLST datasets<../../../data/PubMLST_datasets>`
"""
##################################
### show help messages if desired
##################################
if (options.help_format):
## help_format option
sampleParser.help_format()
exit()
elif (options.help_project):
## information for project
help_info.project_help()
exit()
elif (options.help_KMA):
## information for KMA Software
species_identification_KMA.help_kma_database()
exit()
elif (options.help_MLSTar):
## information for KMA Software
MLSTar.help_MLSTar()
exit()
## init time
start_time_total = time.time()
## debugging messages
global Debug
if (options.debug):
Debug = True
else:
Debug = False
### set as default paired_end mode
if (options.single_end):
options.pair = False
else:
options.pair = True
### species_identification_KMA -> most similar taxa
HCGB_aes.pipeline_header("BacterialTyper", ver=pipeline_version)
HCGB_aes.boxymcboxface("Species identification")
print ("--------- Starting Process ---------")
HCGB_time.print_time()
## absolute path for in & out
input_dir = os.path.abspath(options.input)
outdir=""
## Project mode as default
global Project
if (options.detached):
options.project = False
project_mode=False
outdir = os.path.abspath(options.output_folder)
Project=False
else:
options.project = True
outdir = input_dir
Project=True
## get files
pd_samples_retrieved = sampleParser.files.get_files(options, input_dir, "trim", ['_trim'], options.debug)
## debug message
if (Debug):
print (colored("**DEBUG: pd_samples_retrieve **", 'yellow'))
print (pd_samples_retrieved)
## generate output folder, if necessary
print ("\n+ Create output folder(s):")
if not options.project:
HCGB_files.create_folder(outdir)
## for each sample
outdir_dict = HCGB_files.outdir_project(outdir, options.project, pd_samples_retrieved, "ident", options.debug)
## let's start the process
print ("+ Generate an species typification for each sample retrieved using:")
print ("(1) Kmer alignment (KMA) software.")
print ("(2) Pre-defined databases by KMA or user-defined databases.")
## get databases to check
retrieve_databases = get_options_db(options)
## time stamp
start_time_partial = HCGB_time.timestamp(start_time_total)
## debug message
if (Debug):
print (colored("**DEBUG: retrieve_database **", 'yellow'))
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
print (retrieve_databases)
######## KMA identification
dataFrame_kma = KMA_ident(options, pd_samples_retrieved, outdir_dict, retrieve_databases, start_time_partial)
## functions.timestamp
start_time_partial = HCGB_time.timestamp(start_time_partial)
## debug message
if (Debug):
print (colored("**DEBUG: retrieve results to summarize **", 'yellow'))
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
print ("dataframe_kma")
print (dataFrame_kma)
## exit if viral search
skip=False
if (len(options.kma_dbs) == 1):
for i in options.kma_dbs:
if (i == 'viral'):
print ()
MLST_results = ''
options.fast = True
skip=True
## what if only plasmids?
## do edirect and MLST if bacteria
if (not skip):
dataFrame_edirect = pd.DataFrame()
######## EDirect identification
#dataFrame_edirect = edirect_ident(dataFrame_kma, outdir_dict, Debug)
## functions.timestamp
start_time_partial = HCGB_time.timestamp(start_time_partial)
## debug message
if (Debug):
print (colored("**DEBUG: retrieve results from NCBI **", 'yellow'))
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
print ("dataFrame_edirect")
print (dataFrame_edirect)
######## MLST identification
MLST_results = MLST_ident(options, dataFrame_kma, outdir_dict, dataFrame_edirect, retrieve_databases)
## functions.timestamp
start_time_partial = HCGB_time.timestamp(start_time_partial)
## debug message
if (Debug):
print (colored("**DEBUG: retrieve results to summarize **", 'yellow'))
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
print ("MLST_results")
print (MLST_results)
## generate summary for sample: all databases
## MLST, plasmids, genome, etc
HCGB_aes.boxymcboxface("Results Summary")
#####################################
## Summary identification results ##
#####################################
## parse results
if options.project:
final_dir = os.path.join(outdir, 'report', 'ident')
HCGB_files.create_folder(final_dir)
else:
final_dir = outdir
###
excel_folder = HCGB_files.create_subfolder("samples", final_dir)
print ('+ Print summary results in folder: ', final_dir)
print ('+ Print sample results in folder: ', excel_folder)
# Group dataframe results summary by sample name
sample_results_summary = dataFrame_kma.groupby(["Sample"])
## debug message
if (Debug):
print (colored("**DEBUG: sample_results_summary **", 'yellow'))
print (sample_results_summary)
##
results_summary_KMA = pd.DataFrame()
MLST_all = pd.DataFrame()
for name, grouped in sample_results_summary:
## create a excel and txt for sample
name_sample_excel = excel_folder + '/' + name + '_ident.xlsx'
name_sample_csv = outdir_dict[name] + '/ident_summary.csv' ## check in detached mode
writer_sample = | pd.ExcelWriter(name_sample_excel, engine='xlsxwriter') | pandas.ExcelWriter |
def get_samples_metadata_columns():
import pandas as pd
samples_meta_df = pd.read_csv('Belly_Button_Biodiversity_Metadata.csv')
return list(samples_meta_df.columns)
def get_samples_metadata_values():
import pandas as pd
samples_meta_df = pd.read_csv('Belly_Button_Biodiversity_Metadata.csv')
return samples_meta_df.values.tolist()
def get_otu_pie_labels():
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, inspect
from sqlalchemy import func, desc
from matplotlib.ticker import NullFormatter
import matplotlib.dates as mdates
from datetime import datetime, timedelta
import seaborn as sns
from flask import Flask, jsonify
import datetime as dt
engine = create_engine("sqlite:///belly_button_biodiversity.sqlite", echo=False)
Base = automap_base()
Base.prepare(engine, reflect=True)
Otu = Base.classes.otu
session = Session(engine)
otu_id_list = session.query(Otu.otu_id).all()
otu_taxonomic_list = session.query(Otu.lowest_taxonomic_unit_found).all()
otu_id = pd.DataFrame(otu_id_list)
otu_taxonomic = pd.DataFrame(otu_taxonomic_list)
otu_df = otu_id.join(otu_taxonomic)
new_otu_df = otu_df.groupby('lowest_taxonomic_unit_found').count().sort_values(by=['otu_id'], ascending=False).reset_index()
final_new_otu_df = new_otu_df.rename(columns={'otu_id' : "count"})
otu_id_df = otu_df.groupby('lowest_taxonomic_unit_found').max().reset_index()
final_otu_df = final_new_otu_df.merge(otu_id_df, how='inner', on='lowest_taxonomic_unit_found')
named_labels = list(final_otu_df[:10]['otu_id'])
return named_labels
def get_otu_pie_values():
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, inspect
from sqlalchemy import func, desc
from matplotlib.ticker import NullFormatter
import matplotlib.dates as mdates
from datetime import datetime, timedelta
import seaborn as sns
from flask import Flask, jsonify
import datetime as dt
engine = create_engine("sqlite:///belly_button_biodiversity.sqlite", echo=False)
Base = automap_base()
Base.prepare(engine, reflect=True)
Otu = Base.classes.otu
session = Session(engine)
otu_id_list = session.query(Otu.otu_id).all()
otu_taxonomic_list = session.query(Otu.lowest_taxonomic_unit_found).all()
otu_id = pd.DataFrame(otu_id_list)
otu_taxonomic = | pd.DataFrame(otu_taxonomic_list) | pandas.DataFrame |
"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from pandas._libs.tslibs import timezones
from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import pandas._testing as tm
from pandas.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=pd.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
date_range(start=pd.NaT, end="2016-01-01", freq="D")
def test_date_range_multiplication_overflow(self):
# GH#24255
# check that overflows in calculating `addend = periods * stride`
# are caught
with tm.assert_produces_warning(None):
# we should _not_ be seeing a overflow RuntimeWarning
dti = date_range(start="1677-09-22", periods=213503, freq="D")
assert dti[0] == Timestamp("1677-09-22")
assert len(dti) == 213503
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("1969-05-04", periods=200000000, freq="30000D")
def test_date_range_unsigned_overflow_handling(self):
# GH#24255
# case where `addend = periods * stride` overflows int64 bounds
# but not uint64 bounds
dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
dti2 = date_range(start=dti[0], periods=len(dti), freq="D")
assert dti2.equals(dti)
dti3 = date_range(end=dti[-1], periods=len(dti), freq="D")
assert dti3.equals(dti)
def test_date_range_int64_overflow_non_recoverable(self):
# GH#24255
# case with start later than 1970-01-01, overflow int64 but not uint64
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start="1970-02-01", periods=106752 * 24, freq="H")
# case with end before 1970-01-01, overflow int64 but not uint64
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1969-11-14", periods=106752 * 24, freq="H")
def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
# cases where stride * periods overflow int64 and stride/endpoint
# have different signs
start = Timestamp("2262-02-23")
end = Timestamp("1969-11-14")
expected = date_range(start=start, end=end, freq="-1H")
assert expected[0] == start
assert expected[-1] == end
dti = date_range(end=end, periods=len(expected), freq="-1H")
tm.assert_index_equal(dti, expected)
start2 = Timestamp("1970-02-01")
end2 = Timestamp("1677-10-22")
expected2 = date_range(start=start2, end=end2, freq="-1H")
assert expected2[0] == start2
assert expected2[-1] == end2
dti2 = date_range(start=start2, periods=len(expected2), freq="-1H")
tm.assert_index_equal(dti2, expected2)
def test_date_range_out_of_bounds(self):
# GH#14187
msg = "Cannot generate range"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("2016-01-01", periods=100000, freq="D")
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1763-10-12", periods=100000, freq="D")
def test_date_range_gen_error(self):
rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min")
assert len(rng) == 4
@pytest.mark.parametrize("freq", ["AS", "YS"])
def test_begin_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
freq=freq,
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["A", "Y"])
def test_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["BA", "BY"])
def test_business_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq(self):
# GH 11018
rng = date_range("2011-12-31", freq="-2A", periods=3)
exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2A"
rng = date_range("2011-01-31", freq="-2M", periods=3)
exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2M"
def test_date_range_bms_bug(self):
# #1645
rng = date_range("1/1/2000", periods=10, freq="BMS")
ex_first = Timestamp("2000-01-03")
assert rng[0] == ex_first
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq="2D")
offset = timedelta(2)
values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)
tm.assert_index_equal(rng, values)
rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B")
the_time = time(8, 15)
for val in rng:
assert val.time() == the_time
def test_date_range_fy5252(self):
dr = date_range(
start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"),
)
assert dr[0] == Timestamp("2013-01-31")
assert dr[1] == Timestamp("2014-01-30")
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start, end, periods=10, freq="s")
def test_date_range_convenience_periods(self):
# GH 20808
result = date_range("2018-04-24", "2018-04-27", periods=3)
expected = DatetimeIndex(
["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"],
freq=None,
)
tm.assert_index_equal(result, expected)
# Test if spacing remains linear if tz changes to dst in range
result = date_range(
"2018-04-01 01:00:00",
"2018-04-01 04:00:00",
tz="Australia/Sydney",
periods=3,
)
expected = DatetimeIndex(
[
Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"),
Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"),
Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"),
]
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start,end,result_tz",
[
["20180101", "20180103", "US/Eastern"],
[datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"],
[Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
"US/Eastern",
],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
None,
],
],
)
def test_date_range_linspacing_tz(self, start, end, result_tz):
# GH 20983
result = date_range(start, end, periods=3, tz=result_tz)
expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_date_range_businesshour(self):
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH")
rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
"2014-07-08 11:00",
"2014-07-08 12:00",
"2014-07-08 13:00",
"2014-07-08 14:00",
"2014-07-08 15:00",
"2014-07-08 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
def test_range_misspecified(self):
# GH #1095
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(periods=10)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(periods=10, freq="H")
with pytest.raises(ValueError, match=msg):
date_range()
def test_compat_replace(self):
# https://github.com/statsmodels/statsmodels/issues/3349
# replace should take ints/longs for compat
result = date_range(
Timestamp("1960-04-01 00:00:00", freq="QS-JAN"), periods=76, freq="QS-JAN"
)
assert len(result) == 76
def test_catch_infinite_loop(self):
offset = offsets.DateOffset(minute=5)
# blow up, don't loop forever
msg = "Offset <DateOffset: minute=5> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset)
@pytest.mark.parametrize("periods", (1, 2))
def test_wom_len(self, periods):
# https://github.com/pandas-dev/pandas/issues/20517
res = date_range(start="20110101", periods=periods, freq="WOM-1MON")
assert len(res) == periods
def test_construct_over_dst(self):
# GH 20854
pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=True
)
pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=False
)
expect_data = [
Timestamp("2010-11-07 00:00:00", tz="US/Pacific"),
pre_dst,
pst_dst,
]
expected = DatetimeIndex(expect_data, freq="H")
result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific")
tm.assert_index_equal(result, expected)
def test_construct_with_different_start_end_string_format(self):
# GH 12064
result = date_range(
"2013-01-01 00:00:00+09:00", "2013/01/01 02:00:00+09:00", freq="H"
)
expected = DatetimeIndex(
[
Timestamp("2013-01-01 00:00:00+09:00"),
Timestamp("2013-01-01 01:00:00+09:00"),
Timestamp("2013-01-01 02:00:00+09:00"),
],
freq="H",
)
tm.assert_index_equal(result, expected)
def test_error_with_zero_monthends(self):
msg = r"Offset <0 \* MonthEnds> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0))
def test_range_bug(self):
# GH #770
offset = DateOffset(months=3)
result = date_range("2011-1-1", "2012-1-31", freq=offset)
start = datetime(2011, 1, 1)
expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset)
tm.assert_index_equal(result, expected)
def test_range_tz_pytz(self):
# see gh-2906
tz = timezone("US/Eastern")
start = tz.localize(datetime(2011, 1, 1))
end = tz.localize(datetime(2011, 1, 3))
dr = date_range(start=start, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize(
"start, end",
[
[
Timestamp(datetime(2014, 3, 6), tz="US/Eastern"),
Timestamp(datetime(2014, 3, 12), tz="US/Eastern"),
],
[
Timestamp(datetime(2013, 11, 1), tz="US/Eastern"),
Timestamp(datetime(2013, 11, 6), tz="US/Eastern"),
],
],
)
def test_range_tz_dst_straddle_pytz(self, start, end):
dr = date_range(start, end, freq="D")
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(start, end, freq="D", tz="US/Eastern")
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(
start.replace(tzinfo=None),
end.replace(tzinfo=None),
freq="D",
tz="US/Eastern",
)
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
def test_range_tz_dateutil(self):
# see gh-2906
# Use maybe_get_tz to fix filename in tz under dateutil.
from pandas._libs.tslibs.timezones import maybe_get_tz
tz = lambda x: maybe_get_tz("dateutil/" + x)
start = datetime(2011, 1, 1, tzinfo=tz("US/Eastern"))
end = datetime(2011, 1, 3, tzinfo=tz("US/Eastern"))
dr = date_range(start=start, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"])
def test_range_closed(self, freq):
begin = datetime(2011, 1, 1)
end = datetime(2014, 1, 1)
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
def test_range_closed_with_tz_aware_start_end(self):
# GH12409, GH12684
begin = Timestamp("2011/1/1", tz="US/Eastern")
end = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
begin = | Timestamp("2011/1/1") | pandas.Timestamp |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series( | pd.timedelta_range("1 days 1 min", periods=5, freq="H") | pandas.timedelta_range |
import pandas as pd
import numpy as np
from pandas import to_datetime
from datetime import timedelta
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima_model import ARIMA
from arch import arch_model
import matplotlib.pyplot as plt
import os
import datetime
import time
import anomalies.config as config
import anomalies.anomaly_identification as ano
import plotly.plotly.plotly as py
import pandas as pd
import json
def evaluate(threshold, nneighbours, year, currency):
root_evaluate = "D:/coursework/L4S2/GroupProject/repo/TeamFxPortal/anomalies/evaluator/"
root_static_anomalies = "D:/coursework/L4S2/GroupProject/repo/TeamFxPortal/static/anomalies/"
black_regions = pd.read_csv('black_regions_hours/' + currency +'_'+str(year)+ '_true_anomalies.csv')
black_regions['hour'] = black_regions['true_anomalies'].apply(lambda x: to_datetime(x))
black_regions.index = black_regions['hour']
black_regions['label'] = 1
black_regions = black_regions.drop(['hour','true_anomalies' ], axis=1)
results = pd.read_csv(root_static_anomalies+"detected_black_regions/"+str(threshold)+'_'+str(nneighbours)+'_'+ currency + '_'+str(year)+'_all_anomalies.csv')
results['hour'] = results['DateHour'].apply(lambda x: to_datetime(x))
results.index = results['hour']
results['result'] = 1
results = results.drop(['hour','DateHour','Count','Average_lof', 'Ranking_Factor'], axis=1)
time_list = pd.read_csv('data/' + currency + '/DAT_MT_'+currency+'_M1_'+str(year)+'.csv')
time_list['Time'] = time_list[['Date', 'Time']].apply(lambda x: ' '.join(x), axis=1)
time_list = list(set(time_list['Time'].apply(lambda x: to_datetime(x).replace(minute=0, second=0, microsecond=0)).values))
time_df = pd.DataFrame(time_list)
time_df.index = time_df[0]
time_df['sample_loc']=0
time_df = time_df.drop([0], axis=1)
#Shape of passed values is (2, 78), indices imply (2, 56)
joined_table = pd.concat([time_df,black_regions, results], axis=1).fillna(0).sort_index().astype(int)
joined_table = joined_table.drop(['sample_loc'], axis=1)
actual_black_regions = joined_table.loc[joined_table['label'] == 1]
actual_non_black_regions = joined_table.loc[joined_table['result'] == 0]
true_positive = joined_table.loc[(joined_table['label'] == 1) & (joined_table['result'] == 1)]
true_negative = joined_table.loc[(joined_table['label'] == 0) & (joined_table['result'] == 0)]
false_positive = joined_table.loc[(joined_table['label'] == 0) & (joined_table['result'] == 1)]
false_negative = joined_table.loc[(joined_table['label'] == 1) & (joined_table['result'] == 0)]
accuracy = (len(true_positive) + len(true_negative)) / len(joined_table)
precision = len(true_positive) / (len(true_positive)+len(false_positive))
recall = len(true_positive) / (len(true_positive)+len(false_negative))
store_df = | pd.DataFrame() | pandas.DataFrame |
from os import sep
import pandas as pd
import requests
import pyorcid
from nameparser import HumanName
import logging
import time
import json
from _collections_abc import Iterable
#LogFile to control the correct processing of the ORCID API
logging.basicConfig(filename="logfileORCIDAPI.log", level=logging.INFO)
#AUTHORNAMEDISAMBIGUATION
#read inputdatabase
data1 = pd.read_csv('TRYDB1-50.csv', sep=',', engine='python')
data2 = pd.read_csv('TRYDB51-100.csv', sep=',', engine='python')
df1 = | pd.DataFrame(data1, columns=['Authors','Title']) | pandas.DataFrame |
from __future__ import absolute_import
import os
import sys
import gzip
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
from data_utils import get_file
seed = 2016
def get_p1_file(link):
fname = os.path.basename(link)
return get_file(fname, origin=link, cache_subdir='Pilot1')
def load_data(shuffle=True, n_cols=None):
train_path = get_p1_file('http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B2/P1B2.train.csv')
test_path = get_p1_file('http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B2/P1B2.test.csv')
usecols = list(range(n_cols)) if n_cols else None
df_train = | pd.read_csv(train_path, engine='c', usecols=usecols) | pandas.read_csv |
import pandas as pd
import os
import seaborn as sns
from matplotlib import pyplot as plt
from slc_hunger_risk.config import data_dir, plot_dir
from icecream import ic
pd.set_option("max_rows", 200)
| pd.set_option("max_seq_items", 200) | pandas.set_option |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
import pandas as pd
import math
import six
from six.moves import range
from scipy.stats import rankdata
import sys
import time
from sklearn import preprocessing
from scipy.stats import spearmanr
import random
import copy
import xgboost as xgb
from patsy import dmatrices
from numpy import asarray
from numpy import savetxt
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# @@@@ Functions
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
def dcg_at_k(r, k):
"""
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
def ndcg_at_k(r, k):
dcg_max = dcg_at_k(sorted(r, reverse=True), k)
if not dcg_max:
return 0.
return dcg_at_k(r, k) / dcg_max
def Performance_Metrics(S_in):
RANKS=S_in[S_in['RANK_True']==1]
ACC_All=S_in[S_in['dockq']>0.23]
Med_All=S_in[S_in['dockq']>0.49]
# Spearman
Spearman=spearmanr(S_in['RANK_Pred'], S_in['RANK_True'])[0]
Spearman_C=spearmanr(S_in['RANK_ClusPro'], S_in['RANK_True'])[0]
# Highest Quality
T_1_star= int(RANKS['RANK_Pred']<=1)
T_5_star= int(RANKS['RANK_Pred']<=5)
T_10_star= int(RANKS['RANK_Pred']<=10)
CT_1_star= int(RANKS['RANK_ClusPro']<=1)
CT_5_star= int(RANKS['RANK_ClusPro']<=5)
CT_10_star= int(RANKS['RANK_ClusPro']<=10)
# ACC
if ACC_All.empty:
ACCT_1=ACCT_5=ACCT_10=ACCCT_1=ACCCT_5=ACCCT_10=0
Have_Acc=0
else:
Have_Acc=1
ACCT_1=int(np.where( sum(x<=1 for x in ACC_All['RANK_Pred']) > 0.5 , 1, 0))
ACCT_5=int(np.where( sum(x<=5 for x in ACC_All['RANK_Pred']) > 0.5 , 1, 0))
ACCT_10= int(np.where( sum(x<=10 for x in ACC_All['RANK_Pred']) > 0.5 , 1, 0))
ACCCT_1=int(np.where( sum(x<=1 for x in ACC_All['RANK_ClusPro']) > 0.5 , 1, 0))
ACCCT_5=int(np.where( sum(x<=5 for x in ACC_All['RANK_ClusPro']) > 0.5 , 1, 0))
ACCCT_10=int(np.where( sum(x<=10 for x in ACC_All['RANK_ClusPro']) > 0.5 , 1, 0))
# Med
if Med_All.empty:
MedT_1=MedT_5=MedT_10=MedCT_1=MedCT_5=MedCT_10=0
Have_Med=0
else:
Have_Med=1
MedT_1=int(np.where( sum(x<=1 for x in Med_All['RANK_Pred']) > 0.5 , 1, 0))
MedT_5=int(np.where( sum(x<=5 for x in Med_All['RANK_Pred']) > 0.5 , 1, 0))
MedT_10=int(np.where( sum(x<=10 for x in Med_All['RANK_Pred']) > 0.5 , 1, 0))
MedCT_1=int(np.where( sum(x<=1 for x in Med_All['RANK_ClusPro']) > 0.5 , 1, 0))
MedCT_5=int(np.where( sum(x<=5 for x in Med_All['RANK_ClusPro']) > 0.5 , 1, 0))
MedCT_10=int(np.where( sum(x<=10 for x in Med_All['RANK_ClusPro']) > 0.5 , 1, 0))
return [Spearman,Spearman_C,T_1_star,CT_1_star,T_5_star,CT_5_star,T_10_star,CT_10_star,ACCT_1,ACCCT_1,ACCT_5,ACCCT_5,ACCT_10,ACCCT_10,Have_Acc,MedT_1,MedCT_1,MedT_5,MedCT_5,MedT_10,MedCT_10,Have_Med]
# =========-------------------------------------------------------------------
# Data Preprocessing -
# =========-------------------------------------------------------------------
Which_Type='ANTIBODY' # 'ANTIBODY' 'ENZYME' 'OTHERS'
Output_Type ='dockq' # ['dockq','fnat','lrms','irms']
Num_Features= 30 # Total number of features
if Which_Type == 'ANTIBODY':
Test_Proteins= ['3rvw','4dn4','4g6j','3hi6','3l5w','2vxt','2w9e','4g6m','3g6d','3eo1','3v6z','3hmx','3eoa','3mxw']
Train_Proteins=['1mlc','1iqd','2jel','1nca','1ahw','1e6j','1kxq','1wej','1dqj','2fd6','2i25','1jps','2hmi','1k4c',
'1i9r','1bj1','1bgx','1qfw','2vis','1nsn','1bvk','1fsk','1vfb']
Features_Ordered= ['mincomp_fa_rep','mincomp_score','mincomp_total_score','piper_fa_rep','piper_score','piper_total_score',
'movedcomp_total_score','movedcomp_score','var_mem_Elec','var_mem_Born','kurt_mem_Avdw','SPPS','skew_mem_Rvdw',
'kurt_mem_Born','avg_mem_Elec','mincomp_fa_dun','PREMIN_COMP_Torsions','movedcomp_fa_dun','POSTMIN_COMP_Torsions',
'PREMIN_SING_Torsions','POSTMIN_SING_Torsions','mincomp_rama_prepro','cen_Elec','movedcomp_rama_prepro','size',
'piper_fa_dun','mincomp_fa_atr','cen_Born','avg_mem_Born','piper_rama_prepro','mincomp_fa_sol',
'piper_fa_intra_sol_xover4','mincomp_omega','POSTMIN_SING_Impropers','piper_yhh_planarity','movedcomp_omega',
'movedcomp_fa_intra_rep','mincomp_pro_close','var_mem_Avdw','POSTMIN_SING_Bonded','piper_omega',
'movedcomp_pro_close','PREMIN_COMP_Impropers','mincomp_fa_intra_rep','PREMIN_SING_Impropers','POSTMIN_COMP_Impropers',
'PREMIN_COMP_Bonded','PREMIN_SING_Bonded','POSTMIN_COMP_Bonded','PREMIN_COMP_Angles','piper_pro_close',
'POSTMIN_SING_Angles','movedcomp_fa_intra_sol_xover4','PREMIN_SING_Angles','POSTMIN_COMP_Angles',
'mincomp_fa_intra_sol_xover4','skew_mem_Born','avg_mem_dist','movedcomp_yhh_planarity','cen_Avdw',
'mincomp_yhh_planarity','avg_mem_Avdw','var_mem_dist','movedcomp_ref','mincomp_ref','piper_fa_atr',
'movedcomp_fa_atr','var_mem_DARS','movedcomp_fa_sol','avg_mem_DARS','piper_fa_sol','var_mem_Rvdw','cen_DARS',
'piper_dslf_fa13','piper_ref','SPAR','var_mem_Teng','movedcomp_dslf_fa13','cen_Rvdw','movedcomp_hbond_lr_bb',
'mincomp_hbond_lr_bb','movedcomp_hbond_bb_sc','piper_hbond_bb_sc','movedcomp_hbond_sr_bb','mincomp_hbond_sr_bb',
'avg_mem_Rvdw','mincomp_hbond_bb_sc','piper_hbond_sr_bb','movedcomp_hbond_sc','mincomp_fa_elec','piper_hbond_sc',
'movedcomp_fa_elec','SPAS','mincomp_hbond_sc','cen_Teng','piper_fa_elec','avg_mem_Teng','piper_hbond_lr_bb','piper_time']
DATA_ALL = | pd.read_csv('/home/Final_DATA_Antibody.csv') | pandas.read_csv |
"""
Authors: <NAME>, <NAME>
Feature Selection module of chi2, anova, and mutual information.
The main object is to insert X, y, and output an dataframe with features and their scores.
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import f_classif,chi2, mutual_info_classif, SelectKBest
class Filter_Algorithms(object):
def __init__(self, X, y, test_size, seed=0):
"""
Parameters
----------
input:
X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores
y: array-like {n_samples}
Training labels
output:
R: Ranked features according particular algorithm
-------
"""
self.X = X # Feature values
self.y = y # Target values
self.seed = seed # Fixed seed
self.test_size = test_size # Split for train and test
def fit_Chi2(self):
scores_Chi2 = []
X_train, X_val, y_train, y_val = train_test_split(self.X, self.y, stratify=self.y, test_size=self.test_size, random_state=self.seed)
X_train = pd.DataFrame(data=X_train, columns=self.X.columns)
scores, pvalues = chi2(X_train, y_train)
for i in range(X_train.shape[1]):
scores_Chi2.append((scores[i], X_train.columns[i]))
df_Chi2 = | pd.DataFrame(data=scores_Chi2, columns=('score', 'feature')) | pandas.DataFrame |
import json
import pandas as pd
from utils.fileutl import save_obj
from utils.graph_util import *
from utils.trans import *
# read config
with open("conf.json",'r',encoding="utf-8") as f:
conf = json.load(f)
# 商家
pois_path = conf["base_path"] + conf["poi_name"]
# 菜品
spus_path = conf["base_path"] + conf["spus_name"]
# 用户
user_path = conf["base_path"] + conf["user_name"]
# 训练数据
spus_train_data_path = conf["base_path"] + conf["spus_train_name"]
# 训练标签
spus_train_label_path = conf["base_path"] + conf["spus_train_label"]
# 测试数据
spus_test_data_path = conf["base_path"] + conf["spus_test_name"]
# 测试标签
spus_test_label_path = conf["test_path"] + conf["spus_test_label"]
# 加载数据
# 商家
pois = pd.read_csv(pois_path,sep="\t")
# 菜品
spus = pd.read_csv(spus_path,sep="\t")
# 用户
user = pd.read_csv(user_path,sep="\t")
# 训练数据
train_data = pd.read_csv(spus_train_data_path,sep="\t")
# 训练标签
train_label = pd.read_csv(spus_train_label_path,sep="\t")
# 测试数据
test_data = | pd.read_csv(spus_test_data_path,sep="\t") | pandas.read_csv |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
)
import pandas._testing as tm
class TestDataFrameShift:
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
)
def test_shift_non_writable_array(self, input_data, output_data, frame_or_series):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
result = frame_or_series(input_data).shift(1)
if frame_or_series is not Series:
# need to explicitly specify columns in the empty case
expected = frame_or_series(
output_data,
index=range(len(output_data)),
columns=range(1),
dtype="float64",
)
else:
expected = frame_or_series(output_data, dtype="float64")
tm.assert_equal(result, expected)
def test_shift_mismatched_freq(self, frame_or_series):
ts = frame_or_series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
@pytest.mark.parametrize(
"obj",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, obj, shift_size, frame_or_series):
# GH#22397
if frame_or_series is not Series:
obj = obj.to_frame()
assert obj.shift(shift_size) is not obj
def test_shift_object_non_scalar_fill(self):
# shift requires scalar fill_value except for object dtype
ser = Series(range(3))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
ser.shift(1, fill_value=[])
df = ser.to_frame()
with pytest.raises(ValueError, match="fill_value must be a scalar"):
df.shift(1, fill_value=np.arange(3))
obj_ser = ser.astype(object)
result = obj_ser.shift(1, fill_value={})
assert result[0] == {}
obj_df = obj_ser.to_frame()
result = obj_df.shift(1, fill_value={})
assert result.iloc[0, 0] == {}
def test_shift_int(self, datetime_frame, frame_or_series):
ts = tm.get_obj(datetime_frame, frame_or_series).astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
tm.assert_equal(shifted, expected)
def test_shift_32bit_take(self, frame_or_series):
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
arr = np.arange(5, dtype=dtype)
s1 = frame_or_series(arr, index=index)
p = arr[1]
result = s1.shift(periods=p)
expected = frame_or_series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods, frame_or_series):
# GH#21275
obj = frame_or_series(
range(periods),
index=date_range("2016-1-1 00:00:00", periods=periods, freq="H"),
)
result = obj.shift(1, "2H")
expected = frame_or_series(
range(periods),
index=date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
tm.assert_equal(result, expected)
def test_shift_dst(self, frame_or_series):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
obj = frame_or_series(dates)
res = obj.shift(0)
tm.assert_equal(res, obj)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = obj.shift(ex)
exp = frame_or_series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
def test_shift_by_zero(self, datetime_frame, frame_or_series):
# shift by 0
obj = tm.get_obj(datetime_frame, frame_or_series)
unshifted = obj.shift(0)
tm.assert_equal(unshifted, obj)
def test_shift(self, datetime_frame):
# naive shift
ser = datetime_frame["A"]
shifted = datetime_frame.shift(5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(5)
tm.assert_series_equal(shifted["A"], shifted_ser)
shifted = datetime_frame.shift(-5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(-5)
tm.assert_series_equal(shifted["A"], shifted_ser)
unshifted = datetime_frame.shift(5).shift(-5)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_frame.values[:-5]
)
unshifted_ser = ser.shift(5).shift(-5)
tm.assert_numpy_array_equal(unshifted_ser.dropna().values, ser.values[:-5])
def test_shift_by_offset(self, datetime_frame, frame_or_series):
# shift by DateOffset
obj = tm.get_obj(datetime_frame, frame_or_series)
offset = offsets.BDay()
shifted = obj.shift(5, freq=offset)
assert len(shifted) == len(obj)
unshifted = shifted.shift(-5, freq=offset)
tm.assert_equal(unshifted, obj)
shifted2 = obj.shift(5, freq="B")
tm.assert_equal(shifted, shifted2)
unshifted = obj.shift(0, freq=offset)
tm.assert_equal(unshifted, obj)
d = obj.index[0]
shifted_d = d + offset * 5
if frame_or_series is DataFrame:
tm.assert_series_equal(obj.xs(d), shifted.xs(shifted_d), check_names=False)
else:
tm.assert_almost_equal(obj.at[d], shifted.at[shifted_d])
def test_shift_with_periodindex(self, frame_or_series):
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
if frame_or_series is DataFrame:
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
else:
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_equal(shifted2, shifted3)
tm.assert_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=offsets.BDay())
tm.assert_equal(shifted5, shifted4)
def test_shift_other_axis(self):
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
def test_shift_named_axis(self):
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical1(self, frame_or_series):
# GH#9416
obj = frame_or_series(["a", "b", "c", "d"], dtype="category")
rt = obj.shift(1).shift(-1)
tm.assert_equal(obj.iloc[:-1], rt.dropna())
def get_cat_values(ndframe):
# For Series we could just do ._values; for DataFrame
# we may be able to do this if we ever have 2D Categoricals
return ndframe._mgr.arrays[0]
cat = get_cat_values(obj)
sp1 = obj.shift(1)
tm.assert_index_equal(obj.index, sp1.index)
assert np.all(get_cat_values(sp1).codes[:1] == -1)
assert np.all(cat.codes[:-1] == get_cat_values(sp1).codes[1:])
sn2 = obj.shift(-2)
tm.assert_index_equal(obj.index, sn2.index)
assert np.all(get_cat_values(sn2).codes[-2:] == -1)
assert np.all(cat.codes[2:] == get_cat_values(sn2).codes[:-2])
tm.assert_index_equal(cat.categories, get_cat_values(sp1).categories)
tm.assert_index_equal(cat.categories, get_cat_values(sn2).categories)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_categorical_fill_value(self, frame_or_series):
ts = frame_or_series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = frame_or_series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = r"Cannot setitem on a Categorical with a new category \(f\)"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_fill_value(self, frame_or_series):
# GH#24128
dti = date_range("1/1/2000", periods=5, freq="H")
ts = frame_or_series([1.0, 2.0, 3.0, 4.0, 5.0], index=dti)
exp = frame_or_series([0.0, 1.0, 2.0, 3.0, 4.0], index=dti)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_equal(result, exp)
exp = frame_or_series([0.0, 0.0, 1.0, 2.0, 3.0], index=dti)
result = ts.shift(2, fill_value=0.0)
tm.assert_equal(result, exp)
ts = frame_or_series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert tm.get_dtype(res) == tm.get_dtype(ts)
# retain integer dtype
obj = frame_or_series([1, 2, 3, 4, 5], index=dti)
exp = frame_or_series([0, 1, 2, 3, 4], index=dti)
result = obj.shift(1, fill_value=0)
tm.assert_equal(result, exp)
exp = frame_or_series([0, 0, 1, 2, 3], index=dti)
result = obj.shift(2, fill_value=0)
tm.assert_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self, using_array_manager):
# GH#35488
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(2, axis=1)
expected = df3.take([-1, -1, 0, 1, 2], axis=1)
expected.iloc[:, :2] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
# rebuild df3 because `take` call above consolidated
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(-2, axis=1)
expected = df3.take([2, 3, 4, -1, -1], axis=1)
expected.iloc[:, -2:] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) axis=1 support
def test_shift_axis1_multiple_blocks_with_int_fill(self):
# GH#42719
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([-1, -1, 0, 1], axis=1)
expected.iloc[:, :2] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(-2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([2, 3, -1, -1], axis=1)
expected.iloc[:, -2:] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame, frame_or_series):
# TODO(2.0): remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
tm.assert_equal(shifted, shifted3)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.tshift(freq="M")
# DatetimeIndex
dtobj = tm.get_obj(datetime_frame, frame_or_series)
shifted = dtobj.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_equal(dtobj, unshifted)
shifted2 = dtobj.tshift(freq=dtobj.index.freq)
tm.assert_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
inferred_ts = tm.get_obj(inferred_ts, frame_or_series)
shifted = inferred_ts.tshift(1)
expected = dtobj.tshift(1)
expected.index = expected.index._with_freq(None)
tm.assert_equal(shifted, expected)
unshifted = shifted.tshift(-1)
tm.assert_equal(unshifted, inferred_ts)
no_freq = dtobj.iloc[[0, 5, 7]]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
def test_tshift_deprecated(self, datetime_frame, frame_or_series):
# GH#11631
dtobj = tm.get_obj(datetime_frame, frame_or_series)
with tm.assert_produces_warning(FutureWarning):
dtobj.tshift()
def test_period_index_frame_shift_with_freq(self, frame_or_series):
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_equal(unshifted, ps)
shifted2 = ps.shift(freq="B")
tm.assert_equal(shifted, shifted2)
shifted3 = ps.shift(freq=offsets.BDay())
tm.assert_equal(shifted, shifted3)
def test_datetime_frame_shift_with_freq(self, datetime_frame, frame_or_series):
dtobj = tm.get_obj(datetime_frame, frame_or_series)
shifted = dtobj.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_equal(dtobj, unshifted)
shifted2 = dtobj.shift(freq=dtobj.index.freq)
tm.assert_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
inferred_ts = tm.get_obj(inferred_ts, frame_or_series)
shifted = inferred_ts.shift(1, freq="infer")
expected = dtobj.shift(1, freq="infer")
expected.index = expected.index._with_freq(None)
tm.assert_equal(shifted, expected)
unshifted = shifted.shift(-1, freq="infer")
tm.assert_equal(unshifted, inferred_ts)
def test_period_index_frame_shift_with_freq_error(self, frame_or_series):
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="M")
def test_datetime_frame_shift_with_freq_error(
self, datetime_frame, frame_or_series
):
dtobj = tm.get_obj(datetime_frame, frame_or_series)
no_freq = dtobj.iloc[[0, 5, 7]]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.shift(freq="infer")
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) axis=1 support
def test_shift_dt64values_int_fill_deprecated(self):
# GH#31971
ser = Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")])
with tm.assert_produces_warning(FutureWarning):
result = ser.shift(1, fill_value=0)
expected = Series([pd.Timestamp(0), ser[0]])
tm.assert_series_equal(result, expected)
df = ser.to_frame()
with tm.assert_produces_warning(FutureWarning):
result = df.shift(1, fill_value=0)
expected = expected.to_frame()
tm.assert_frame_equal(result, expected)
# axis = 1
df2 = DataFrame({"A": ser, "B": ser})
df2._consolidate_inplace()
with tm.assert_produces_warning(FutureWarning):
result = df2.shift(1, axis=1, fill_value=0)
expected = DataFrame({"A": [pd.Timestamp(0), pd.Timestamp(0)], "B": df2["A"]})
tm.assert_frame_equal(result, expected)
# same thing but not consolidated
# This isn't great that we get different behavior, but
# that will go away when the deprecation is enforced
df3 = DataFrame({"A": ser})
df3["B"] = ser
assert len(df3._mgr.arrays) == 2
result = df3.shift(1, axis=1, fill_value=0)
expected = DataFrame({"A": [0, 0], "B": df2["A"]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"as_cat",
[
pytest.param(
True,
marks=pytest.mark.xfail(
reason="_can_hold_element incorrectly always returns True"
),
),
False,
],
)
@pytest.mark.parametrize(
"vals",
[
date_range("2020-01-01", periods=2),
date_range("2020-01-01", periods=2, tz="US/Pacific"),
pd.period_range("2020-01-01", periods=2, freq="D"),
pd.timedelta_range("2020 Days", periods=2, freq="D"),
pd.interval_range(0, 3, periods=2),
pytest.param(
pd.array([1, 2], dtype="Int64"),
marks=pytest.mark.xfail(
reason="_can_hold_element incorrectly always returns True"
),
),
pytest.param(
pd.array([1, 2], dtype="Float32"),
marks=pytest.mark.xfail(
reason="_can_hold_element incorrectly always returns True"
),
),
],
ids=lambda x: str(x.dtype),
)
# TODO(2.0): remove filtering
@pytest.mark.filterwarnings("ignore:Index.ravel.*:FutureWarning")
def test_shift_dt64values_axis1_invalid_fill(self, vals, as_cat, request):
# GH#44564
ser = Series(vals)
if as_cat:
ser = ser.astype("category")
df = DataFrame({"A": ser})
result = df.shift(-1, axis=1, fill_value="foo")
expected = DataFrame({"A": ["foo", "foo"]})
tm.assert_frame_equal(result, expected)
# same thing but multiple blocks
df2 = DataFrame({"A": ser, "B": ser})
df2._consolidate_inplace()
result = df2.shift(-1, axis=1, fill_value="foo")
expected = DataFrame({"A": df2["B"], "B": ["foo", "foo"]})
tm.assert_frame_equal(result, expected)
# same thing but not consolidated
df3 = DataFrame({"A": ser})
df3["B"] = ser
assert len(df3._mgr.arrays) == 2
result = df3.shift(-1, axis=1, fill_value="foo")
tm.assert_frame_equal(result, expected)
def test_shift_axis1_categorical_columns(self):
# GH#38434
ci = CategoricalIndex(["a", "b", "c"])
df = DataFrame(
{"a": [1, 3], "b": [2, 4], "c": [5, 6]}, index=ci[:-1], columns=ci
)
result = df.shift(axis=1)
expected = DataFrame(
{"a": [np.nan, np.nan], "b": [1, 3], "c": [2, 4]}, index=ci[:-1], columns=ci
)
tm.assert_frame_equal(result, expected)
# periods != 1
result = df.shift(2, axis=1)
expected = DataFrame(
{"a": [np.nan, np.nan], "b": [np.nan, np.nan], "c": [1, 3]},
index=ci[:-1],
columns=ci,
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
#!/usr/bin/env python3
import os
import pickle
import pandas as pd
import numpy as np
from data_preprocessing import preprocess_data, preprocess_data_4_catboost
from models import fit_train_test_cv, gboosting_train_test, catboost_pred, gboosting_pred, linear_regression_pred, linear_ridge_pred, linear_ridge_cv_pred, display_metrics
from ML_AP import ApplicantProfile
from constants import SCHOOLS_REVERSED, TARGET_LABELS
from sklearn.linear_model import LinearRegression, SGDRegressor, ElasticNet, Lasso, Ridge, RidgeCV
from sklearn.ensemble import GradientBoostingRegressor
OUT_DIR = os.path.join(os.path.dirname(__file__), 'data_out')
IN_FILE_NAME = 'pq_data_4_24_18.csv'
IN_FILE_NAME2 = 'pq_data_10_20_17.csv'
IN_FILE_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data_out', IN_FILE_NAME)
IN_FILE_PATH2 = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data_out', IN_FILE_NAME2)
OUT_FILE_PATH = os.path.join(OUT_DIR, "{}_processed.csv".format(IN_FILE_NAME.replace('.csv', '')))
input_data_df = pd.read_csv(IN_FILE_PATH)
other_data_df = | pd.read_csv(IN_FILE_PATH2) | pandas.read_csv |
# <NAME>, <NAME>, <NAME>, <NAME>
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
from textblob import TextBlob
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import nltk
#This function creates the attributes "polarity" and "subjectivity" for a given dataframe.
def sentiment_analysis(df):
x = df.to_numpy()
list_of_subjectivity = []
list_of_polarity = []
for item in x:
string = TextBlob(item)
list_of_subjectivity.append(string.sentiment.subjectivity)
list_of_polarity.append(string.sentiment.polarity)
df2 = pd.DataFrame(data=list_of_subjectivity)
df3 = | pd.DataFrame(data=list_of_polarity) | pandas.DataFrame |
import re
from flaski import app
from flask_login import current_user
from flask_caching import Cache
from flaski.routines import check_session_app
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from ._utils import handle_dash_exception, parse_table, protect_dashviews, validate_user_access, \
make_navbar, make_footer, make_options, make_table, META_TAGS, make_min_width, \
change_table_minWidth, change_fig_minWidth
from ._aadatalake import read_results_files, read_gene_expression, read_genes, read_significant_genes, \
filter_samples, filter_genes, filter_gene_expression, nFormat, read_dge,\
make_volcano_plot, make_ma_plot, make_pca_plot, make_annotated_col
import uuid
from werkzeug.utils import secure_filename
import json
from flask import session
import pandas as pd
import os
CURRENTAPP="aadatalake"
navbar_title="RNAseq data lake"
dashapp = dash.Dash(CURRENTAPP,url_base_pathname=f'/{CURRENTAPP}/' , meta_tags=META_TAGS, server=app, external_stylesheets=[dbc.themes.BOOTSTRAP], title="FLASKI", assets_folder="/flaski/flaski/static/dash/")
protect_dashviews(dashapp)
cache = Cache(dashapp.server, config={
'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': 'redis://:%s@%s' %( os.environ.get('REDIS_PASSWORD'), os.environ.get('REDIS_ADDRESS') ) #'redis://localhost:6379'),
})
controls = [
html.H5("Filters", style={"margin-top":10}),
html.Label('Data sets'), dcc.Dropdown( id='opt-datasets', multi=True),
html.Label('Groups',style={"margin-top":10}), dcc.Dropdown( id='opt-groups', multi=True),
html.Label('Samples',style={"margin-top":10}), dcc.Dropdown( id='opt-samples', multi=True),
html.Label('Gene names',style={"margin-top":10}), dcc.Dropdown( id='opt-genenames', multi=True),
html.Label('Gene IDs',style={"margin-top":10}), dcc.Dropdown( id='opt-geneids', multi=True),
html.Label('Download file prefix',style={"margin-top":10}), dcc.Input(id='download_name', value="data.lake", type='text') ]
side_bar=[ dbc.Card(controls, body=True),
html.Button(id='submit-button-state', n_clicks=0, children='Submit', style={"width": "100%","margin-top":4, "margin-bottom":4} )
]
# Define Layout
dashapp.layout = html.Div( [ html.Div(id="navbar"), dbc.Container(
fluid=True,
children=[
html.Div(id="app_access"),
html.Div(id="redirect-pca"),
html.Div(id="redirect-volcano"),
html.Div(id="redirect-ma"),
dcc.Store(data=str(uuid.uuid4()), id='session-id'),
dbc.Row(
[
dbc.Col( dcc.Loading(
id="loading-output-1",
type="default",
children=html.Div(id="side_bar"),
style={"margin-top":"0%"}
),
md=3, style={"height": "100%",'overflow': 'scroll'} ),
dbc.Col( dcc.Loading(
id="loading-output-2",
type="default",
children=[ html.Div(id="my-output")],
style={"margin-top":"50%","height": "100%"} ),
md=9, style={"height": "100%","width": "100%",'overflow': 'scroll'})
],
style={"min-height": "87vh"}),
] )
] + make_footer()
)
## all callback elements with `State` will be updated only once submit is pressed
## all callback elements wiht `Input` will be updated everytime the value gets changed
@dashapp.callback(
Output(component_id='my-output', component_property='children'),
Input('session-id', 'data'),
Input('submit-button-state', 'n_clicks'),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-samples", "value"),
State("opt-genenames", "value"),
State("opt-geneids", "value"),
State(component_id='download_name', component_property='value'),
)
def update_output(session_id, n_clicks, datasets, groups, samples, genenames, geneids, download_name):
if not validate_user_access(current_user,CURRENTAPP):
return None
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=samples, cache=cache)
## samples
results_files=selected_results_files[["Set","Group","Reps"]]
results_files.columns=["Set","Group","Sample"]
results_files=results_files.drop_duplicates()
results_files_=make_table(results_files,"results_files")
# results_files_ = dbc.Table.from_dataframe(results_files, striped=True, bordered=True, hover=True)
download_samples=html.Div(
[
html.Button(id='btn-samples', n_clicks=0, children='Download', style={"margin-top":4, 'background-color': "#5474d8", "color":"white"}),
dcc.Download(id="download-samples")
]
)
## gene expression
if datasets or groups or samples or genenames or geneids :
gene_expression=filter_gene_expression(ids2labels,genenames,geneids,cache)
gene_expression_=make_table(gene_expression,"gene_expression")#,fixed_columns={'headers': True, 'data': 2} )
# gene_expression_ = dbc.Table.from_dataframe(gene_expression, striped=True, bordered=True, hover=True)
download_geneexp=html.Div(
[
html.Button(id='btn-geneexp', n_clicks=0, children='Download', style={"margin-top":4, 'background-color': "#5474d8", "color":"white"}),
dcc.Download(id="download-geneexp")
]
)
gene_expression_bol=True
else:
gene_expression_bol=False
## PCA
selected_sets=list(set(selected_results_files["Set"]))
if len(selected_sets) == 1 :
pca_data=filter_gene_expression(ids2labels,None,None,cache)
pca_plot, pca_pa, pca_df=make_pca_plot(pca_data,selected_sets[0])
pca_config={ 'toImageButtonOptions': { 'format': 'svg', 'filename': download_name+".pca" }}
pca_plot=dcc.Graph(figure=pca_plot, config=pca_config, style={"width":"100%","overflow-x":"auto"})
iscatter_pca=html.Div(
[
html.Button(id='btn-iscatter_pca', n_clicks=0, children='iScatterplot',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"})
])
pca_bol=True
else:
pca_bol=False
## differential gene expression
dge_bol=False
volcano_plot=None
if not samples:
if len(selected_sets) == 1 :
dge_groups=list(set(selected_results_files["Group"]))
if len(dge_groups) == 2:
dge=read_dge(selected_sets[0], dge_groups, cache)
dge_plots=dge.copy()
if genenames:
dge=dge[dge["gene name"].isin(genenames)]
if geneids:
dge=dge[dge["gene id"].isin(geneids)]
dge_=make_table(dge,"dge")
download_dge=html.Div(
[
html.Button(id='btn-dge', n_clicks=0, children='Download', style={"margin-top":4, 'background-color': "#5474d8", "color":"white"}),
dcc.Download(id="download-dge")
]
)
annotate_genes=[]
if genenames:
genenames_=dge[dge["gene name"].isin(genenames)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
if geneids:
genenames_=dge[dge["gene id"].isin(geneids)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
volcano_config={ 'toImageButtonOptions': { 'format': 'svg', 'filename': download_name+".volcano" }}
volcano_plot, volcano_pa, volcano_df=make_volcano_plot(dge_plots, selected_sets[0], annotate_genes)
volcano_plot.update_layout(clickmode='event+select')
volcano_plot=dcc.Graph(figure=volcano_plot, config=volcano_config, style={"width":"100%","overflow-x":"auto"}, id="volcano_plot")
iscatter_volcano=html.Div(
[
html.Button(id='btn-iscatter_volcano', n_clicks=0, children='iScatterplot',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"})
])
ma_config={ 'toImageButtonOptions': { 'format': 'svg', 'filename': download_name+".ma" }}
ma_plot, ma_pa, ma_df=make_ma_plot(dge_plots, selected_sets[0],annotate_genes )
ma_plot.update_layout(clickmode='event+select')
ma_plot=dcc.Graph(figure=ma_plot, config=ma_config, style={"width":"100%","overflow-x":"auto"}, id="ma_plot")
iscatter_ma=html.Div(
[
html.Button(id='btn-iscatter_ma', n_clicks=0, children='iScatterplot',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"})
])
dge_bol=True
if ( dge_bol ) & ( pca_bol ) :
minwidth=["Samples","Expression", "PCA", "DGE","Volcano","MA"]
minwidth=len(minwidth) * 150
minwidth = str(minwidth) + "px"
results_files_=change_table_minWidth(results_files_,minwidth)
gene_expression_=change_table_minWidth(gene_expression_,minwidth)
dge_=change_table_minWidth(dge_,minwidth)
pca_plot=change_fig_minWidth(pca_plot,minwidth)
out=dcc.Tabs( [
dcc.Tab([ results_files_, download_samples],
label="Samples", id="tab-samples",
style={"margin-top":"0%"}),
dcc.Tab( [ pca_plot, iscatter_pca ],
label="PCA", id="tab-pca",
style={"margin-top":"0%"}),
dcc.Tab( [ gene_expression_, download_geneexp],
label="Expression", id="tab-geneexpression",
style={"margin-top":"0%"}),
dcc.Tab( [ dge_, download_dge],
label="DGE", id="tab-dge",
style={"margin-top":"0%"}),
dcc.Tab( [ dbc.Row( [
dbc.Col(volcano_plot),
dbc.Col( [ html.Div(id="volcano-plot-table") ]
) ],
style={"minWidth":minwidth}),
dbc.Row([iscatter_volcano,html.Div(id="volcano-bt")]),
],
label="Volcano", id="tab-volcano",
style={"margin-top":"0%"}),
dcc.Tab( [ dbc.Row( [
dbc.Col(ma_plot),
dbc.Col( [ html.Div(id="ma-plot-table") ]
) ],
style={"minWidth":minwidth}),
dbc.Row([iscatter_ma,html.Div(id="ma-bt")]),
] ,
label="MA", id="tab-ma",
style={"margin-top":"0%"})
],
mobile_breakpoint=0,
style={"height":"50px","margin-top":"0px","margin-botom":"0px", "width":"100%","overflow-x":"auto", "minWidth":minwidth} )
elif pca_bol :
minwidth=["Samples","Expression", "PCA"]
minwidth=len(minwidth) * 150
minwidth = str(minwidth) + "px"
results_files_=change_table_minWidth(results_files_,minwidth)
gene_expression_=change_table_minWidth(gene_expression_,minwidth)
pca_plot=change_fig_minWidth(pca_plot,minwidth)
out=dcc.Tabs( [
dcc.Tab([ results_files_, download_samples],
label="Samples", id="tab-samples",
style={"margin-top":"0%"}),
dcc.Tab( [ pca_plot, iscatter_pca ],
label="PCA", id="tab-pca",
style={"margin-top":"0%"}),
dcc.Tab( [ gene_expression_, download_geneexp],
label="Expression", id="tab-geneexpression",
style={"margin-top":"0%"}),
],
mobile_breakpoint=0,
style={"height":"50px","margin-top":"0px","margin-botom":"0px", "width":"100%","overflow-x":"auto", "minWidth":minwidth} )
elif gene_expression_bol:
minwidth=["Samples","Expression"]
minwidth=len(minwidth) * 150
minwidth = str(minwidth) + "px"
results_files_=change_table_minWidth(results_files_,minwidth)
gene_expression_=change_table_minWidth(gene_expression_,minwidth)
out=dcc.Tabs( [
dcc.Tab([ results_files_, download_samples],
label="Samples", id="tab-samples",
style={"margin-top":"0%"}),
dcc.Tab( [ gene_expression_, download_geneexp],
label="Expression", id="tab-geneexpression",
style={"margin-top":"0%"}),
],
mobile_breakpoint=0,
style={"height":"50px","margin-top":"0px","margin-botom":"0px", "width":"100%","overflow-x":"auto", "minWidth":minwidth} )
else:
minwidth=["Samples"]
minwidth=len(minwidth) * 150
minwidth = str(minwidth) + "px"
results_files_=change_table_minWidth(results_files_,minwidth)
out=dcc.Tabs( [
dcc.Tab([ results_files_, download_samples],
label="Samples", id="tab-samples",
style={"margin-top":"0%"}),
],
mobile_breakpoint=0,
style={"height":"50px","margin-top":"0px","margin-botom":"0px", "width":"100%","overflow-x":"auto", "minWidth":minwidth} )
return out
@dashapp.callback(
Output('volcano-plot-table', 'children'),
Output('volcano-bt', 'children'),
Input('volcano_plot', 'selectedData')
)
def display_volcano_data(selectedData):
if selectedData:
selected_genes=selectedData["points"]
selected_genes=[ s["text"] for s in selected_genes ]
df= | pd.DataFrame({"Selected genes":selected_genes}) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib.ticker as mtick
from matplotlib.collections import LineCollection
###############################################################################
#Non-Standard Imports
###############################################################################
import addpath
import dunlin as dn
import dunlin.simulate as sim
import dunlin.curvefit as cf
import dunlin.dataparser as dp
import dunlin.traceanalysis as ta
add = lambda x, y: x + y
minus = lambda x, y: x - y
mul = lambda x, y: x * y
div = lambda x, y: x / y
def apply2data(data, name, func, *states, **kwargs):
if type(func) == str:
if func == '+':
func_ = add
elif func == '-':
func_ = minus
elif func == '*':
func_ = mul
elif func == '/':
func_ = div
elif isnum(func):
func_ = lambda x: x*func
else:
func_ = func
vectors = [data[s] for s in states]
new = func_(*vectors, **kwargs)
result = join(data, name, new)
return result
def join(data, name, new):
temp = pd.concat( {name:new}, axis=1 )
result = data.join(temp)
return result
def isnum(x):
try:
float(x)
return True
except:
return False
def dai(mu):
gradient = 5.78656638987421
intercept = 0.03648482880435973
return mu*gradient + intercept
plt.close('all')
plt.style.use(dn.styles['light_style_multi'])
# plt.style.use(dn.styles['dark_style_multi'])
df = | pd.read_csv('data_MCr_AU.csv', header=[0, 1]) | pandas.read_csv |
from citrination_client import (
CitrinationClient,
ChemicalFieldQuery,
ChemicalFilter,
FieldQuery,
PropertyQuery,
Filter,
ReferenceQuery,
PifSystemQuery,
DatasetQuery,
DataQuery,
PifSystemReturningQuery,
)
import os
import time
import pandas as pd
from matminer.data_retrieval.retrieve_base import BaseDataRetrieval
from tqdm import tqdm
from pandas.io.json import json_normalize
import numpy as np
from collections import Counter
__author__ = [
"<NAME> <<EMAIL>>",
"<NAME> <<EMAIL>>",
]
def parse_scalars(scalars):
return get_value(scalars[0])
def get_value(dict_item):
# TODO: deal with rest of formats in a scalar object
if "value" in dict_item:
return dict_item["value"]
elif "minimum" in dict_item and "maximum" in dict_item:
return "Minimum = {}, Maximum = {}".format(dict_item["minimum"], dict_item["maximum"])
class CitrineDataRetrieval(BaseDataRetrieval):
"""
CitrineDataRetrieval is used to retrieve data from the Citrination database
See API client docs at api_link below.
"""
def __init__(self, api_key=None):
"""
Args:
api_key: (str) Your Citrine API key, or None if
you've set the CITRINE_KEY environment variable
"""
if api_key:
api_key = api_key
elif "CITRINATION_API_KEY" in os.environ:
api_key = os.environ["CITRINATION_API_KEY"]
elif "CITRINE_KEY" in os.environ:
api_key = os.environ["CITRINE_KEY"]
else:
raise AttributeError(
"""Citrine API key not found.
You need to get an API key from Citrination, and either supply it as an argument to
this class or set it as the value of the CITRINATION_API_KEY enviornmental variable
See https://citrineinformatics.github.io/api-documentation/quickstart/index.html
for details on how to get an API key"""
)
self.client = CitrinationClient(api_key, "https://citrination.com")
def api_link(self):
return "https://citrineinformatics.github.io/python-citrination-client/"
def get_dataframe(
self,
criteria,
properties=None,
common_fields=None,
secondary_fields=False,
print_properties_options=True,
):
"""
Gets a Pandas dataframe object from data retrieved from
the Citrine API.
Args:
criteria (dict): see get_data method for supported keys except
prop; prop should be included in properties.
properties ([str]): requested properties/fields/columns.
For example, ["Seebeck coefficient", "Band gap"]. If unsure
about the exact words, capitalization, etc try something like
["gap"] and "max_results": 3 and print_properties_options=True
to see the exact options for this field
common_fields ([str]): fields that are common to all the requested
properties. Common example can be "chemicalFormula". Look for
suggested common fields after a quick query for more info
secondary_fields (bool): if True, fields not included in properties
may be added to the output (e.g. references). Recommended only
if len(properties)==1
print_properties_options (bool): whether to print available options
for "properties" and "common_fields" arguments.
Returns: (object) Pandas dataframe object containing the results
"""
common_fields = common_fields or []
properties = properties or [None]
if criteria.get("prop"):
properties.append(criteria.pop("prop"))
properties = list(set(properties))
all_fields = []
for prop_counter, requested_prop in enumerate(properties):
jsons = self.get_data(prop=requested_prop, **criteria)
non_prop_df = pd.DataFrame() # df w/o measurement column
prop_df = pd.DataFrame() # df containing only measurement column
counter = 0 # variable to keep count of sample hit and set indexes
for hit in tqdm(jsons):
counter += 1
if "system" in hit.keys(): # Check if 'system' key exists, else skip
system_value = hit["system"]
system_normdf = json_normalize(system_value)
non_prop_cols = [cols for cols in system_normdf.columns if "properties" not in cols]
non_prop_row = pd.DataFrame()
for col in non_prop_cols:
non_prop_row[col] = system_normdf[col]
non_prop_row.index = [counter] * len(system_normdf)
non_prop_df = non_prop_df.append(non_prop_row)
if "properties" in system_value:
p_df = | pd.DataFrame() | pandas.DataFrame |
from sklearn import metrics
from math import sqrt
import pandas as pd
#---------------------------------------------------------------------------------------------------------------------#
def get_model_metrics(model,label_data,task, X_test, Y_test):
'''
Returns the dictionary cotaining metrics for the given data.
Parameters:
model (trained ml model)
label_data(dataframe) : to check number of classes
task (string): ml task prediction or classification
X test (dataframe): test data
Y test (dataframe): test labels
Returns:
stats (dictionary): contains the metrics for given data
'''
try:
number_of_classes = len( | pd.unique(label_data) | pandas.unique |
#! /urs/bin/env python
from __future__ import division
import click
import os
from HTSeq import GFF_Reader
import pandas as pd
import sys
INTERGENIC_INF = ['-', '-', '-', '-', '-', '-', '-', 'intergenic']
LNC_ORDER = ['divergent', 'sense_intronic',
'other_sense_overlap', 'antisense', 'intergenic']
OVERLAP_CUTOFF = 0.5
def iterintrons(tss, exon_sizes, exon_starts):
tss = int(tss)
exon_sizes_list = [int(each) for each in exon_sizes.split(',') if each]
exon_starts_list = [int(each) for each in exon_starts.split(',') if each]
exon_list = list()
for n, each_size in enumerate(exon_sizes_list):
each_start = exon_starts_list[n]
exon_list.append((tss + each_start,
tss + each_start + each_size))
e1 = exon_list[0]
for n in range(1, len(exon_list)):
e2 = exon_list[n]
yield e1[1], e2[0]
e1 = e2
def tss_in_interval(tss, iter_interval):
for each in iter_interval:
if tss >= each[0] and tss < each[1]:
return True
else:
return False
def overlap_portion(inter_rd):
overlap_len = inter_rd[24]
tr1_len = inter_rd[2] - inter_rd[1]
tr2_len = inter_rd[14] - inter_rd[13]
return tr1_len / overlap_len, tr2_len / overlap_len
def compare_class(lnc_class1, lnc_class2):
compare1 = lnc_class1[:]
compare2 = lnc_class2[:]
compare1[0] = list(reversed(LNC_ORDER)).index(compare1[0])
compare2[0] = list(reversed(LNC_ORDER)).index(compare2[0])
compare1[1] *= -1
compare2[1] *= -1
compare_list = [compare1, compare2]
compare_list.sort()
return compare_list.index(compare1)
@click.command()
@click.option(
'-g',
'--gtf',
type=click.Path(exists=True, dir_okay=False),
required=True,
help='lncRNA candidates gtf.')
@click.option(
'-f',
'--feelnc_classify',
type=click.Path(exists=True, dir_okay=False),
required=True,
help='FEElnc classify result.')
@click.option(
'-b',
'--bed_intersect',
type=click.Path(exists=True, dir_okay=False),
required=True,
help='bedtools intersect for lncRNA bed and mRNA bed.')
@click.option(
'-o',
'--out_dir',
type=click.Path(file_okay=False),
default=os.getcwd(),
help='output directory.')
def main(gtf, feelnc_classify, bed_intersect, out_dir):
feelnc_df = pd.read_table(feelnc_classify, index_col=2)
intersect_df = pd.read_table(bed_intersect, index_col=[3, 15], header=None)
lnc_class_list = []
out_header = list(feelnc_df.columns[1:])
out_header.insert(0, 'lncRNA_transcript')
out_header.append('lncRNA_class')
def get_class(fee_rd, intersect_df):
if fee_rd.type == 'intergenic':
if fee_rd.subtype == 'divergent':
return 'divergent', 0, 0
else:
return 'intergenic', 0, 0
else:
inter_index = (fee_rd.name, fee_rd.partnerRNA_transcript)
inter_rd = intersect_df.loc[inter_index]
overlap1, overlap2 = overlap_portion(inter_rd)
if fee_rd.direction == 'sense':
if fee_rd.subtype == 'containing':
return 'other_sense_overlap', overlap1, overlap2
elif fee_rd.subtype == 'nested':
return 'sense_intronic', overlap1, overlap2
elif fee_rd.subtype == 'overlapping':
if overlap1 >= OVERLAP_CUTOFF:
introns = iterintrons(inter_rd[13], inter_rd[22],
inter_rd[23])
lnc_can_start = inter_rd[1]
if tss_in_interval(lnc_can_start, introns):
return 'sense_intronic', overlap1, overlap2
return 'other_sense_overlap', overlap1, overlap2
else:
sys.exit('unkown type [{t.subtype}]'.format(t=fee_rd))
else:
if fee_rd.subtype == 'nested':
return 'antisense', overlap1, overlap2
else:
if overlap1 >= OVERLAP_CUTOFF:
return 'antisense', overlap1, overlap2
else:
return 'intergenic', overlap1, overlap2
def lnc_classify(tr_id, feelnc_df, intersect_df):
tr_detail_df = feelnc_df.loc[tr_id]
out_inf = []
class_inf = []
if tr_detail_df.index[0] == tr_id:
for n in range(len(tr_detail_df)):
class_value = list(get_class(tr_detail_df.ix[n], intersect_df))
dis = tr_detail_df.ix[n].distance
tmp_class_inf = class_value[:]
tmp_class_inf.insert(1, dis)
tmp_out_inf = list(tr_detail_df.ix[n][1:])
if not out_inf:
out_inf = tmp_out_inf
class_inf = tmp_class_inf
else:
if compare_class(tmp_class_inf, class_inf):
out_inf = tmp_out_inf
class_inf = tmp_class_inf
else:
class_value = list(get_class(tr_detail_df, intersect_df))
out_inf = list(tr_detail_df[1:])
class_inf = class_value
out_inf.insert(0, tr_id)
out_inf.append(class_inf[0])
return out_inf
for eachline in GFF_Reader(gtf):
if eachline.type == 'transcript':
tr_id = eachline.attr['transcript_id']
gene_id = eachline.attr['gene_id']
if tr_id not in feelnc_df.index:
out_inf = [tr_id, gene_id]
out_inf.extend(INTERGENIC_INF)
else:
out_inf = lnc_classify(tr_id, feelnc_df, intersect_df)
out_inf_series = pd.Series(out_inf, index=out_header)
lnc_class_list.append(out_inf_series)
out_df = | pd.concat(lnc_class_list, axis=1) | pandas.concat |
import os
import sys
import datetime
import numpy as np
import pandas as pd
from random import randrange, randint
from statistics import mean, median
import re
class Aggregator():
def __init__(self):
pass
def many_to_one(self, func, values=[]):
#print(values)
if func == 'max':
return max(values)
elif func == 'min':
return min(values)
elif func == 'mean' or func == 'avg':
return mean(values)
class TimeManage():
"""
Deals only with the dataframes that contains a column dedicated for date/time series
The input can be in string format.
This sets the platform for date based Aggregator
"""
df = pd.DataFrame()
def __init__(self, df):
"""
constructor expecting a dataframe with valid dates/times
"""
self.df = df
self.min_interval_in_seconds = 99999999999
def keyword_based_date_range_selection(self, keyword,keyword_value, aggfunc={},date_column=None, date_column_format="%Y-%m-%d %H:%M:%S", custom=[],grouping_colums=[]):
"""
this will create a subset of df
# TODO:
"""
expected_interval_for_aggregation_in_seconds = 0
# working code with converion of date limits commenting the below section for the testing of pivot tables and grouper below this section
# need to use reg exp but there is problem with separating kewa_value ex:10min should be separated as 10 min
# if keyword == 'custom':
# #print("Currently not supported")
# exit()
#
# elif 'min' in keyword:
# expected_seconds = 60
# expected_interval_for_aggregation_in_seconds = expected_seconds*keyword_value
# elif 'hour' in keyword:
# expected_seconds = 60*60
# expected_interval_for_aggregation_in_seconds = expected_seconds*keyword_value
# elif 'day' in keyword:
# expected_seconds = 60*60*24
# expected_interval_for_aggregation_in_seconds = expected_seconds*keyword_value
# elif 'week' in keyword:
# expected_seconds = 60*60*24*7
# expected_interval_for_aggregation_in_seconds = expected_seconds*keyword_value
# elif 'month' in keyword:
# expected_seconds = 60*60*24*30
# expected_interval_for_aggregation_in_seconds = expected_seconds*keyword_value
#uniquify the date column from the dataframe
# #now get the min_interval_in_seconds of the user
# min_seconds = self.get_min_interval_in_seconds(date_column=date_column,format_of_date=date_column_format)
#
# #print("the minimum interval seconds is", min_seconds)
# #print("expected_interval_for_aggregation_in_seconds", expected_interval_for_aggregation_in_seconds)
# #compare the min_seconds and expected_interval_for_aggregation_in_seconds if min_seconds is greated than expected_inteval then as for now its error result_df.
#
# if expected_interval_for_aggregation_in_seconds > min_seconds:
# #calculating the range to split the dataframe
# range = int(expected_interval_for_aggregation_in_seconds/min_seconds)
# #split the dataframr into multipldf based on range
# splited_dfs = self.split_df_to_many(range)
#
# date_value = []
# aggregation_value = []
# #here we get splited df according to range
# for df in splited_dfs:
# #print("splited dfs ",df)
# value_df = df.iloc[:,value_column]
# # #print("the value list is ",value_df)
# aggregation = Aggregator()
# #apply aggregation on each chucnk of divrded dataframe
# aggregation_result = aggregation.many_to_one(func,value_df)
# d = self.df.iloc[:,date_column]
# date_name = d.name
# #print("the date name",date_name)
# #append the first vale o date field into date_value list
# date_value.append(df[date_name].iloc[0])
# #append the result of aggregation class into aggregation_value list
# aggregation_value.append(aggregation_result)
# d = self.df.iloc[:,date_column]
# date_name = d.name
# v = self.df.iloc[:,value_column]
# value_name = v.name
#
# #generate the dict from both date_value list and aggregation_value list
# frame = {date_name:date_value,value_name:aggregation_value}
# #create a result dataframe
# result_df = pd.DataFrame(frame)
# #print("the results dataframe is ", result_df)
#
# #print("the expected range is",range)
#
# else:
# #print("-F- the interval range supporting is not found")
# exit()
# todo
# use self.df
##print(self.df.iloc[0:range,1])
# resulted_array = []
# for v in self.df.iloc[0:range,value_column]:
# resulted_array.append(v)
#
#
# agg = Aggregator()
# return agg.many_to_one(func, resulted_array)
# craeting the below section for the testing of pivot table and grouper methods.
df = self.df
if aggfunc:
if len(aggfunc)>0:
for column, value in aggfunc.items():
# #print("the converting column name is", column)
try:
df[column] = df[column].astype(float)
except:
result_df="Error"
# #print("the converted column name is",df.dtypes)
#Todo should convert the numerical columns to numbered datatype]
#for testing purpose e manually converted it
# #print("the keyword is ",keyword)
# #print("the date column is ",date_column)
# #print("the grouping_colums is ",grouping_colums)
# #print("the value column is ",value_column)
# #print("the aggrigation function is ",aggfunc)
# #print("in project query frequency",keyword)
if keyword:
if keyword == 'custom':
# #print("Currently not supported")
exit()
elif 'min' in keyword:
expected_freq = 'M'
# #print("the date column is ",date_column)
if aggfunc and grouping_colums :
try:
result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)
except:
result_df="Error"
elif aggfunc and not grouping_colums:
try:
result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)
# #print("new type of query",result_df)
except:
result_df="Error"
elif grouping_colums and not aggfunc:
try:
# #print("year just grouping")
grouping_colums.append(date_column)
grouped_df =df.groupby(grouping_colums)
result_df = pd.DataFrame(grouped_df.size().reset_index(name = "Count"))
except:
result_df="Error"
elif expected_freq:
try:
# #print("only frequency")
s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))
result_df = pd.DataFrame(s_df.size().reset_index(name = "Count"))
except:
result_df="Error"
elif 'hour' in keyword:
expected_freq = 'H'
# #print("the date column is ",date_column)
if aggfunc and grouping_colums :
try:
result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)
except:
result_df="Error"
elif aggfunc and not grouping_colums:
try:
result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)
# #print("new type of query",result_df)
except:
result_df="Error"
elif grouping_colums and not aggfunc:
try:
# #print("year just grouping")
grouping_colums.append(date_column)
grouped_df =df.groupby(grouping_colums)
result_df = pd.DataFrame(grouped_df.size().reset_index(name = "Count"))
except:
result_df="Error"
elif expected_freq:
try:
s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))
result_df = pd.DataFrame(s_df.size().reset_index(name = "Count"))
except:
result_df="Error"
elif 'day' in keyword:
expected_freq = 'D'
# #print("the date column is ",date_column)
if aggfunc and grouping_colums :
try:
result_df = df.pivot_table(index= grouping_colums,columns = | pd.Grouper(freq=expected_freq,key=date_column) | pandas.Grouper |
""" This file originated from the online analysis project at:
https://github.com/OlafHaag/UCM-WebApp
"""
import itertools
import pandas as pd
import pingouin as pg
import numpy as np
from scipy.stats import wilcoxon
from sklearn.decomposition import PCA
from sklearn.covariance import EllipticEnvelope
def preprocess_data(users, blocks, trials):
""" Clean data.
:param users: Data from users table
:type users: pandas.DataFrame
:param blocks: Data from circletask_blocks table.
:type blocks: pandas.DataFrame
:param trials: Data from circletask_trials table.
:type trials: pandas.DataFrame
:returns: Joined and recoded DataFrame. Number of erroneous blocks. Number of sessions removed as a consequence.
Number of removed trials.
:rtype: tuple[pandas.DataFrame, int, int, int]
"""
blocks, n_errors, invalid_sessions = remove_erroneous_blocks(blocks)
# Merge to 1 table.
df = join_data(users, blocks, trials)
# Remove invalid trials.
cleaned, n_trials_removed = get_valid_trials(df)
return cleaned, n_errors, len(invalid_sessions), n_trials_removed
def remove_erroneous_blocks(blocks, delta_time=2.0, n_blocks=3):
""" Remove sessions with erroneous data due to a NeuroPsy Research App malfunction.
The error causes block data to be duplicated and the values for df1 & df2 multiplied again by 100.
The duplicated blocks are identified by comparing their time stamps to the previous block (less than 2 seconds
difference). If the error caused the session to end early, the whole session is removed.
NeuroPsyResearchApp issue #1.
:param pandas.DataFrame blocks: Data about blocks.
:param float delta_time: Threshold in seconds for which a consecutive block in a session is considered invalid
if it was completed within this period after the previous. Default is 2.0 seconds.
:param int n_blocks: Required number of blocks per session. If a session doesn't have this many blocks,
it gets removed.
:returns: Cleaned block data. Number of errors found. List of sessions that were removed as a consequence.
:rtype: tuple[pandas.DataFrame, int, list]
"""
# Identify duplicated blocks. Consecutive time stamps are usually less than 2 seconds apart.
mask = blocks.groupby(['session_uid'])['time'].diff() < delta_time
try:
n_errors = mask.value_counts()[True]
except KeyError:
n_errors = 0
blocks = blocks.loc[~mask, :]
# Now, after removal of erroneous data a session might not have all 3 blocks we expect. Exclude whole session.
invalid_sessions = blocks['session_uid'].value_counts() != n_blocks
invalid_sessions = invalid_sessions.loc[invalid_sessions].index.to_list()
blocks = blocks.loc[~blocks['session_uid'].isin(invalid_sessions), :]
return blocks, n_errors, invalid_sessions
def join_data(users, blocks, trials):
""" Take data from different database tables and join them to a single DataFrame. Some variables are renamed and
recoded in the process, some are dropped.
:param users: Data from users table
:type users: pandas.DataFrame
:param blocks: Data from circletask_blocks table.
:type blocks: pandas.DataFrame
:param trials: Data from circletask_trials table.
:type trials: pandas.DataFrame
:return: Joined and recoded DataFrame.
:rtype: pandas.DataFrame
"""
# Use users' index instead of id for obfuscation and shorter display.
users_inv_map = pd.Series(users.index, index=users.id)
# Remove trials that don't belong to any block. Those have been excluded.
trials = trials.loc[trials['block_id'].isin(blocks.index), :]
# Start a new table for trials and augment with data from other tables.
df = pd.DataFrame(index=trials.index)
df['user'] = trials.user_id.map(users_inv_map).astype('category')
df['session'] = trials['block_id'].map(blocks['nth_session']).astype('category')
# Map whole sessions to the constraint in the treatment block as a condition for easier grouping during analysis.
df['condition'] = trials['block_id'].map(blocks[['session_uid', 'treatment']].replace(
{'treatment': {'': np.nan}}).groupby('session_uid')['treatment'].ffill().bfill()).astype('category')
df['block'] = trials['block_id'].map(blocks['nth_block']).astype('category')
# Add pre and post labels to trials for each block. Name it task instead of treatment.
# Theoretically, one could have changed number of blocks and order of treatment, but we assume default order here.
df['task'] = trials['block_id'].map(blocks['treatment'].replace('', np.nan).where(~blocks['treatment'].isna(),
blocks['nth_block'].map(
{1: 'pre',
3: 'post'
})
)
).astype('category')
#df['task'] = trials['block_id'].map(blocks['treatment'].replace(to_replace={r'\w+': 1, r'^\s*$': 0}, regex=True)
# ).astype('category')
df = pd.concat((df, trials), axis='columns')
# Add columns for easier filtering.
df['grab_diff'] = (df['df2_grab'] - df['df1_grab']).abs()
df['duration_diff'] = (df['df2_duration'] - df['df1_duration']).abs()
# Exclude columns.
df.drop(columns=['user_id'], inplace=True)
return df
def get_valid_trials(dataframe):
""" Remove trials where sliders where not grabbed concurrently or grabbed at all.
:param dataframe: Trial data.
:type dataframe: pandas.DataFrame
:returns: Filtered trials. Number of removed trials.
:rtype: tuple[pandas.DataFrame, int]
"""
# Remove trials with missing values. This means at least one slider wasn't grabbed.
df = dataframe.dropna(axis='index', how='any')
# Remove trials where sliders where not grabbed concurrently.
mask = ~((df['df1_release'] <= df['df2_grab']) | (df['df2_release'] <= df['df1_grab']))
df = df.loc[mask, :]
n_removed = len(dataframe) - len(df)
return df, n_removed
def get_outlyingness(data, contamination=0.1):
""" Outlier detection from covariance estimation in a Gaussian distributed dataset.
:param data: Data in which to detect outliers. Take care that n_samples > n_features ** 2 .
:type data: pandas.DataFrame
:param contamination: The amount of contamination of the data set, i.e. the proportion of outliers in the data set.
Range is (0, 0.5).
:type contamination: float
:returns: Decision on each row if it's an outlier. And contour array for drawing ellipse in graph.
:rtype: tuple[numpy.ndarray, numpy.ndarray]
"""
robust_cov = EllipticEnvelope(support_fraction=1., contamination=contamination)
outlyingness = robust_cov.fit_predict(data)
decision = (outlyingness-1).astype(bool)
# Visualisation.
xx, yy = np.meshgrid(np.linspace(0, 100, 101),
np.linspace(0, 100, 101))
z = robust_cov.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
return decision, z
#ToDo: remove blocks/sessions with sum mean way off.
#ToDo: remove sessions with less than 10 trials in any block.
def get_performance_data(dataframe):
"""[summary]
:param dataframe: [description]
:type dataframe: [type]
"""
dataframe.groupby(['user', 'block', 'task'])[['df1', 'df2']].mean().dropna().sort_index(level=['user','block'])
def get_pca_data(dataframe):
""" Conduct Principal Component Analysis on 2D dataset.
:param dataframe: Data holding 'df1' and 'df2' values as columns.
:type dataframe: pandas.DataFrame
:return: Explained variance, components and means.
:rtype: pandas.DataFrame
"""
# We don't reduce dimensionality, but overlay the 2 principal components in 2D.
pca = PCA(n_components=2)
x = dataframe[['df1', 'df2']].values
try:
# df1 and df2 have the same scale. No need to standardize. Standardizing might actually distort PCA here.
pca.fit(x)
except ValueError:
# Return empty.
df = pd.DataFrame(columns=['var_expl', 'var_expl_ratio', 'x', 'y', 'meanx', 'meany'])
else:
df = pd.DataFrame({'var_expl': pca.explained_variance_.T,
'var_expl_ratio': pca.explained_variance_ratio_.T * 100, # In percent
'x': pca.components_[:, 0],
'y': pca.components_[:, 1],
'meanx': pca.mean_[0],
'meany': pca.mean_[1],
},
index=[1, 2] # For designating principal components.
)
df.index.rename('PC', inplace=True)
return df
def get_pca_vectors(dataframe):
""" Get principal components as vectors. Vectors can then be used to annotate graphs.
:param dataframe: Tabular PCA data.
:type dataframe: pandas.DataFrame
:return: Principal components as vector pairs in input space with mean as origin first and offset second.
:rtype: list
"""
# Use the "components" to define the direction of the vectors,
# and the "explained variance" to define the squared-length of the vectors.
directions = dataframe[['x', 'y']] * np.sqrt(dataframe[['var_expl']].values) * 3
# Move the directions by the mean, so we get vectors pointing to the start and vectors pointing to the destination.
vector2 = directions + dataframe[['meanx', 'meany']].values
vectors = list(zip(dataframe[['meanx', 'meany']].values, vector2.values))
return vectors
def get_pca_vectors_by(dataframe, by=None):
""" Get principal components for each group as vectors. Vectors can then be used to annotate graphs.
:param dataframe: Data holding 'df1' and 'df2' values as columns.
:type dataframe: pandas.DataFrame
:param by: Column to group data by and return 2 vectors for each group.
:type by: str|list
:return: list of principal components as vector pairs in input space with mean as origin first and offset second.
:rtype: list
"""
vector_pairs = list()
if by is None:
pca_df = get_pca_data(dataframe)
v = get_pca_vectors(pca_df)
vector_pairs.append(v)
else:
grouped = dataframe.groupby(by, observed=True) # With categorical groupers we want only non-empty groups.
for group, data in grouped:
pca_df = get_pca_data(data)
v = get_pca_vectors(pca_df)
vector_pairs.append(v)
# ToDo: Augment by groupby criteria.
return vector_pairs
def get_interior_angle(vec0, vec1):
""" Get the smaller angle between vec0 and vec1 in degrees.
:param vec0: Vector 0
:type vec0: numpy.ndarray
:param vec1: Vector 1
:type vec1: numpy.ndarray
:return: Interior angle between vector0 and vector1 in degrees.
:rtype: float
"""
angle = np.math.atan2(np.linalg.det([vec0, vec1]), np.dot(vec0, vec1))
degrees = abs(np.degrees(angle))
# Min and max should be between 0° an 90°.
degrees = min(degrees, 180.0 - degrees)
return degrees
def get_ucm_vec(p0=None, p1=None):
""" Returns 2D unit vector in direction of uncontrolled manifold. """
if p0 is None:
p0 = np.array([25, 100])
if p1 is None:
p1 = np.array([100, 25])
parallel = p1 - p0
parallel = parallel / np.linalg.norm(parallel) # Normalize.
return parallel
def get_orthogonal_vec2d(vec):
""" Get a vector that is orthogonal to vec and has same length.
:param vec: 2D Vector
:return: 2D Vector orthogonal to vec.
:rtype: numpy.ndarray
"""
ortho = np.array([-vec[1], vec[0]])
return ortho
def get_pc_ucm_angles(dataframe, vec_ucm):
""" Computes the interior angles between pca vectors and ucm parallel/orthogonal vectors.
:param dataframe: PCA data.
:type dataframe: pandas.DataFrame
:param vec_ucm: Vector parallel to UCM.
:type vec_ucm: numpy.ndarray
:return: Each angle between principal components and UCM parallel and orthogonal vector.
:rtype: pandas.DataFrame
"""
df_angles = dataframe[['x', 'y']].transform(lambda x: (a:=get_interior_angle(vec_ucm, x), 90.0 - a),
axis='columns').rename(columns={'x': 'parallel', 'y': 'orthogonal'})
df_angles = pd.concat((dataframe[['task', 'PC']], df_angles), axis='columns')
return df_angles
def get_projections(points, vec_ucm):
""" Returns coefficients a and b in x = a*vec_ucm + b*vec_ortho with x being the difference of a data point and
the mean.
Projection is computed using a transformation matrix with ucm parallel and orthogonal vectors as basis.
:param points: Data of 2D points.
:type points: pandas.Dataframe
:param vec_ucm: Unit vector parallel to uncontrolled manifold.
:type vec_ucm: numpy.ndarray
:return: Array with projected lengths onto vector parallel to UCM as 'a', onto vector orthogonal to UCM as 'b'.
:rtype: pandas.Dataframe
"""
# Get the vector orthogonal to the UCM.
vec_ortho = get_orthogonal_vec2d(vec_ucm)
# Build a transformation matrix with vec_ucm and vec_ortho as new basis vectors.
A = np.vstack((vec_ucm, vec_ortho)).T # A is not an orthogonal projection matrix (A=A.T), but this works.
# Centralize the data. Analogous to calculating across trials deviation from average for each time step.
diffs = points - points.mean()
# For computational efficiency we shortcut the projection calculation with matrix multiplication.
# The actual math behind it:
# coeffs = vec_ucm.T@diff/np.sqrt(vec_ucm.T@vec_ucm), vec_ortho.T@diff/np.sqrt(vec_ortho.T@vec_ortho)
# Biased variance (normalized by (n-1)) of projection onto UCM vector:
# var_ucm = [email protected](diffs, bias=True, rowvar=False)@vec_ucm/(vec_ucm.T@vec_ucm) # Rayleigh fraction.
coeffs = diffs@A
coeffs.columns = ['parallel', 'orthogonal']
return coeffs
def get_synergy_indices(variances, n=2, d=1):
"""
n: Number of degrees of freedom. In our case 2.
d: Dimensionality of performance variable. In our case a scalar (1).
Vucm = 1/N * 1/(n - d) * sum(ProjUCM**2)
Vort = 1/N * 1/(d) * sum(ProjORT**2)
Vtotal = 1/n * (d * Vort + (n-d) * Vucm) # Anull the weights on Vucm and Vort for the sum.
dV = (Vucm - Vort) / Vtotal
dV = n*(Vucm - Vort) / ((n - d)*Vucm + d*Vort)
Zhang (2008) without weighting Vucm, Vort and Vtotal first:
dV = n * (Vucm/(n - d) - Vort/d) / (Vucm + Vort)
dVz = 0.5*ln((n / d + dV) / (n / ((n - d) - dV))
dVz = 0.5*ln((2 + dV) / (2 - dV))
Reference: https://www.frontiersin.org/articles/10.3389/fnagi.2019.00032/full#supplementary-material
:param variances: Unweighted variances of parallel and orthogonal projections to the UCM.
:type variances: pandas.DataFrame
:param n: Number of degrees of freedom. Defaults to 2.
:type: int
:param d: Dimensionality of performance variable. Defaults to 1.
:type d: int
:returns: Synergy index, Fisher's z-transformed synergy index.
:rtype: pandas.DataFrame
"""
try:
dV = n * (variances['parallel']/(n-d) - variances['orthogonal']/d) \
/ variances[['parallel', 'orthogonal']].sum(axis='columns')
except KeyError:
synergy_indices = pd.DataFrame(columns=["dV", "dVz"])
else:
dVz = 0.5 * np.log((n/d + dV)/(n/(n-d) - dV))
synergy_indices = pd.DataFrame({"dV": dV, "dVz": dVz})
return synergy_indices
def get_synergy_idx_bounds(n=2, d=1):
""" Get lower and upper bounds of the synergy index.
dV = n * (Vucm/(n - d) - Vort/d) / (Vucm + Vort)
If all variance lies within the UCM, then Vort=0 and it follows for the upper bound: dV = n/(n-d)
If all variance lies within Vort, then Vucm=0 and it follows for the lower bound: dV = -n/d
:param n: Number of degrees of freedom.
:type: int
:param d: Dimensionality of performance variable.
:type d: int
:returns: Lower and upper bounds of synergy index.
:rtype: tuple
"""
dV_lower = -n/d
dV_upper = n/(n-d)
return dV_lower, dV_upper
def get_mean(dataframe, column, by=None):
""" Return mean values of column x (optionally grouped)
:param dataframe: Data
:type dataframe: pandas.Dataframe
:param column: Column name
:type column: str
:param by: Column names by which to group.
:type by: str|list
:return: mean value, optionally for each group.
:rtype: numpy.float64|pandas.Series
"""
if by is None:
means = dataframe[column].mean()
else:
means = dataframe.groupby(by, observed=True)[column].mean()
return means
def get_descriptive_stats(data, by=None):
""" Return mean and variance statistics for data.
:param data: numerical data.
:type data: pandas.Dataframe
:param by: groupby column name(s)
:type by: str|List
:return: Dataframe with columns mean, var, count and column names of data as rows.
:rtype: pandas.Dataframe
"""
# There's a bug in pandas 1.0.4 where you can't use custom numpy functions in agg anymore (ValueError).
# Note that the variance of projections is usually divided by (n-d) for Vucm and d for Vort. Both are 1 in our case.
# Pandas default var returns unbiased population variance /(n-1). Doesn't make a difference for synergy indices.
f_var = lambda series: series.var(ddof=0)
f_var.__name__ = 'variance' # Column name gets function name.
# When there're no data, return empty DataFrame with columns.
if data.empty:
if by:
data.set_index(by, drop=True, inplace=True)
col_idx = pd.MultiIndex.from_product([data.columns, ['mean', f_var.__name__]])
stats = | pd.DataFrame(None, index=data.index, columns=col_idx) | pandas.DataFrame |
"""
This script contains the code the hyperparameter tuning using SMAC package.
The SMAC package (https://github.com/automl/SMAC3) is a tool for algorithm
configuration to optimize the parameters of arbitrary algorithms across a set of
instances. The main core consists of Bayesian Optimization in combination
with a aggressive racing mechanism to efficiently decide which of two
configuration performs better.
"""
# import packages
import os
import inspect
import itertools
import sys
import numpy as np
import pandas as pd
import tensorflow.contrib.training as training
from train_score import create_round_prediction
from utils import *
# Add TSPerf root directory to sys.path
file_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
tsperf_dir = os.path.join(file_dir, "../../../../")
if tsperf_dir not in sys.path:
sys.path.append(tsperf_dir)
import retail_sales.OrangeJuice_Pt_3Weeks_Weekly.common.benchmark_settings as bs
from common.evaluation_utils import MAPE
from smac.configspace import ConfigurationSpace
from smac.scenario.scenario import Scenario
from ConfigSpace.hyperparameters import (
CategoricalHyperparameter,
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
)
from smac.facade.smac_facade import SMAC
LIST_HYPERPARAMETER = ["decoder_input_dropout", "decoder_state_dropout", "decoder_output_dropout"]
data_relative_dir = "../../data"
def eval_function(hparams_dict):
"""
This function takes a haperparameter configuration, trains the
corresponding model on the training data set, creates the predictions,
and returns the evaluated MAPE on the evaluation data set.
"""
# set the data directory
file_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
data_dir = os.path.join(file_dir, data_relative_dir)
hparams_dict = dict(hparams_dict)
for key in LIST_HYPERPARAMETER:
hparams_dict[key] = [hparams_dict[key]]
# add the value of other hyper parameters which are not tuned
hparams_dict["encoder_rnn_layers"] = 1
hparams_dict["decoder_rnn_layers"] = 1
hparams_dict["decoder_variational_dropout"] = [False]
hparams_dict["asgd_decay"] = None
hparams = training.HParams(**hparams_dict)
# use round 1 training data for hyper parameter tuning to avoid data leakage for later rounds
submission_round = 1
make_features_flag = False
train_model_flag = True
train_back_offset = 3 # equal to predict_window
predict_cut_mode = "eval"
# get prediction
pred_o, train_mape = create_round_prediction(
data_dir,
submission_round,
hparams,
make_features_flag=make_features_flag,
train_model_flag=train_model_flag,
train_back_offset=train_back_offset,
predict_cut_mode=predict_cut_mode,
)
# get rid of prediction at horizon 1
pred_sub = pred_o[:, 1:].reshape((-1))
# evaluate the prediction on last two days in the first round training data
# TODO: get train error and evalution error for different parameters
train_file = os.path.join(data_dir, "train/train_round_{}.csv".format(submission_round))
train = | pd.read_csv(train_file, index_col=False) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
import numpy as np
from pyroofit.models import Gauss, Chebychev
from pyroofit.composites import AddPdf, ProdPdf, Convolution
def get_test_df(size=100):
d = {}
d['mbc1'] = np.random.random_sample(size)
d['mbc'] = np.random.random_sample(size)
return | pd.DataFrame(d) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Class to study the per month probability of a price decrease for properties on the market (rental and sales depending on
user choice), based on Zoopla data. This code creates a file with two columns: the time on the market from initial
listing to removal and the number of price updates for each property in the Zoopla database. This can be then used to
compute an initial probability of price update per month on the market. However, since the code here does only consider
properties with a non-null time on the market and a non-null number of price updates, this probability needs to be
re-scaled as
Real prob. = # properties with a null * 0.0 + # properties with no null * initial prob. / total # of properties,
where # properties with a null is the number of properties with a null time on market and/or number of price updates and
# properties with no null is the number rof properties with a non-null time on market and a non-null number of price
updates.
@author: <NAME>
"""
import numpy as np
from datetime import datetime
import pandas as pd
market = "RENT" # Must be "RENT" or "SALE"
root = r"" # ADD HERE PATH TO ZOOPLA DATA FOLDER
# Read data, filtering according to the following columns
# - MARKET: Indicates if the listing is SALE or RENT
# - CREATED: Listing creation date
# - DELETED: Listing deletion date
# - PRICE CHANGE: Difference between the first and latest asking prices
# - PRICE UPDATES: Number of times that the asking price was updated
chunk_size = 10000
filtered_data = pd.DataFrame()
for chunk in pd.read_csv(root + r"\New Zoopla\B Raw Listings (collation).csv", chunksize=chunk_size,
usecols=["MARKET", "CREATED", "DELETED", "PRICE CHANGE", "PRICE UPDATES"],
dtype={"MARKET": str, "CREATED": str, "DELETED": str, "PRICE CHANGE": str,
"PRICE UPDATES": str},
engine="c"):
# Keep only sale listings
chunk = chunk[chunk["MARKET"] == market]
# Keep only listings with non-null values in the required columns
chunk = chunk[(np.invert(pd.isnull(chunk["CREATED"])) & np.invert(pd.isnull(chunk["DELETED"]))
& np.invert(pd.isnull(chunk["PRICE CHANGE"])) & np.invert(pd.isnull(chunk["PRICE UPDATES"])))]
# Convert numerical columns to their respective types
chunk = chunk.astype({"PRICE CHANGE": float})
chunk = chunk.astype({"PRICE UPDATES": int})
# Keep only listings with negative price change
chunk = chunk[chunk["PRICE CHANGE"] < 0.0]
# Add filtered chunk to total filtered_data data frame
filtered_data = | pd.concat([filtered_data, chunk]) | pandas.concat |
import os
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from flask import Flask
public_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
corona_data = | pd.read_csv(public_url) | pandas.read_csv |
import torch
import numpy as np
import pandas as pd
import os
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set_theme()
sns.set(font_scale=3, rc={'text.usetex' : False})
sns.set_theme()
sns.set_style('whitegrid')
import glob
import models
import torch.optim
import torch
import argparse
import utils
#from torchvision import models, datasets, transforms
try:
from tqdm import tqdm
except:
def tqdm(x): return x
def process_df(quant, dirname, stats_ref=None, args=None, args_model=None, save=True):
global table_format
idx = pd.IndexSlice
#losses = quant.loc[:, idx[:, '#loss']]
#errors = quant.loc[:, idx[:, 'error']]
col_order = ["stat", "set", "layer"]
if quant.columns.names != col_order:
# the order is
# perform pivot
quant = pd.melt(quant.reset_index(), id_vars="draw").pivot(index="draw", columns=col_order, values="value")
if stats_ref is not None:
if stats_ref.index.names != ["stat", "set"]:
stats_ref = stats_ref.reorder_levels(["stat", "set"]).sort_index(axis=0)
quant.sort_index(axis=1, inplace=True)
if save:
quant.to_csv(os.path.join(dirname, 'quant.csv'))
stats_ref.to_csv(os.path.join(dirname, 'stats_ref.csv'))
quant.groupby(level=["stat", "set"], axis=1).describe().to_csv(os.path.join(dirname, 'describe.csv'))
# if len(stats_ref.keys()==1):
# stats_ref = stats_ref[stats_ref.keys()[0]]
N_L = len(quant.columns.unique(level="layer")) # number of hidden layers
#N_sets = len(quant.columns.unique(level="set"))
N_sets = 2 # only train and test
palette=sns.color_palette(n_colors=N_sets)
df_reset = quant.reset_index()
N_S = len(stats_ref)
stats_ref_val = stats_ref.iloc[np.repeat(np.arange(N_S), N_L)].transpose().values
quant_rel = (quant - stats_ref_val).abs()
quant_rel["error"] *= 100
quant["error"] *= 100
# else:
# table = quant_describe[["mean", "std", "min"]]
# formaters =
try:
# utils.to_latex(dirname, quant, table_format, is_vgg=True)
utils.to_latex(dirname, quant_rel, table_format, is_vgg=True)
except:
pass
df_plot = pd.melt(df_reset, id_vars='draw')
df_reset_rel = quant_rel.reset_index()
df_plot_rel = pd.melt(df_reset_rel, id_vars="draw")
rp = sns.relplot(
data=df_plot_rel.pivot(index="draw", columns=col_order).min(axis=0).to_frame(name="value"),
#col='log_mult',
hue='set',
hue_order=["train", "test"],
#dodge=False,
col='stat',
col_order=["loss", "error"],
#col='set',
#style='layer',
#col='log_mult',
x='layer',
y='value',
kind='line',
ci='sd',
palette=palette,
#ax=axes[0],
#kind='line',
#ylabel='%',
#ci=100,
#col_wrap=2,
facet_kws={
'sharey': False,
'sharex': True
}
)
rp.axes[0,0].set_title("Loss")
rp.axes[0,0].set_ylabel("absolute delta loss")
rp.axes[0,1].set_title("Error")
rp.axes[0,1].set_ylabel("absolute delta error (%)")
rp.legend.set_title("Datasets")
# rp.fig.set_size_inches(11, 4)
#rp.axes[0,0].margins(.05)
#rp.axes[0,1].margins(.05)
xlabels=["0", "conv1", "conv2", "conv3", "conv4", "conv5", "conv6", "conv7", "conv8", "fc1", "fc2"]
rp.set(xticks=range(0, len(xlabels)))
# rp.set_xticklabels(xlabels)
# rp.axes[0,0].locator_params(axis='x', nbins=len(xlabels))
# rp.axes[0,1].locator_params(axis='x', nbins=len(xlabels))
rp.set_xticklabels(xlabels, rotation=30)
# rp.axes[0,0].set_xticklabels(xlabels, rotation=30)
# rp.axes[0,1].set_xticklabels(xlabels, rotation=30)
#rp.set_xticks(len(xlabels))
#rp.set_xlabels(xlabels)
if args_model is not None:
rp.fig.suptitle("(A) VGG {}".format(args_model.dataset.upper()))
plt.savefig(fname=os.path.join(dirname, 'relplot.pdf'), bbox_inches="tight")
plt.figure()
rp = sns.relplot(
data=df_plot.pivot(index="draw", columns=col_order).min(axis=0).to_frame(name="value"),
hue='set',
hue_order=["train", "test"],
col='stat',
col_order=["loss", "error"],
x='layer',
y='value',
kind='line',
facet_kws={
'sharey': False,
'sharex': True
}
)
df_ref = df_plot.query('layer==0')
rp.axes[0,0].set_title("Loss")
rp.axes[0,0].set_ylabel("loss")
rp.axes[0,1].set_title("Error")
rp.axes[0,1].set_ylabel("error (%)")
plt.savefig(fname=os.path.join(dirname, 'rel_plot.pdf'))
fig=plt.figure()
df_reset = quant.notnull().reset_index()
df_plot = pd.melt(df_reset, id_vars='draw')
g = sns.relplot(
data = df_plot,
#col='',
#hue='set',
col='stat',
x='layer',
y='value',
kind='line',
ci=None,
#col_wrap=2,
facet_kws={
'sharey': False,
'sharex': True
}
)
g.fig.subplots_adjust(top=0.9, left=1/g.axes.shape[1] * 0.1)
if args_model is not None and args is not None:
width = args_model.width
if width is None:
if args_model.dataset == "mnist":
width = 245 # WARNING hard coded
removed = "width / {}".format(args.fraction) if hasattr(args, 'fraction') and args.fraction is not None else args.remove
g.fig.suptitle('ds = {}, width = {}, removed = {}, draw = {}'.format(args_model.dataset, width, removed, args.ndraw))
g.set(yscale='linear')
plt.savefig(fname=os.path.join(dirname, 'plot.pdf'))
g.set(yscale='log')
plt.savefig(fname=os.path.join(dirname, 'plot_log.pdf'))
plt.close('all')
return
def process_csv(file_csv):
'''Read and process a previously computed result stored inside a checkpoint'''
global device
idx = pd.IndexSlice
quant = pd.read_csv(file_csv, header=[0,1,2], index_col=0)
file_ref = os.path.join(os.path.dirname(file_csv), "stats_ref.csv")
if os.path.isfile(file_ref):
stats_ref = pd.read_csv(file_ref, index_col=[0,1])
layer_idx = quant.columns.names.index("layer")
if quant.columns.get_level_values(layer_idx).dtype != int: # 0 are the layers
new_layer = [int(c) for c in quant.columns.levels[layer_idx]]
new_layer.sort()
levels = list(quant.columns.levels[:layer_idx] + [new_layer] + quant.columns.levels[layer_idx+1:])
cols = pd.MultiIndex.from_product(levels, names=quant.columns.names)
quant.columns = cols
try:
chkpt_model = torch.load(os.path.join(os.path.dirname(os.path.dirname(file_csv)), 'checkpoint.pth'), map_location=device)
args_model = chkpt_model['args']
except:
args_model =None
#quant.loc[:, idx[:, :, 'error']] *= 100 # in percent
dirname = os.path.dirname(file_csv)
process_df(quant, dirname, stats_ref, args_model=args_model, save=False)
return
if __name__ == '__main__':
torch.autograd.set_detect_anomaly(True)
parser = argparse.ArgumentParser('Evaluating a copy of a classifier with removed units')
parser_device = parser.add_mutually_exclusive_group()
parser_device.add_argument('--cpu', action='store_true', dest='cpu', help='force the cpu model')
parser_device.add_argument('--cuda', action='store_false', dest='cpu')
parser.add_argument('--table_format', choices=["wide", "long"], default="long")
parser.set_defaults(cpu=False)
parser.add_argument('dirs', nargs='*', help='the directory to process')
args = parser.parse_args()
table_format = args.table_format
device = torch.device('cuda' if torch.cuda.is_available() and not args.cpu else 'cpu')
#device = torch.device('cpu')
dtype = torch.float
num_gpus = torch.cuda.device_count()
def get_parent(path):
return os.path.basename(os.path.dirname(path))
Idx = pd.IndexSlice
for directory in args.dirs:
# directory is the root of the model
# e.g. root/fraction-2/entry_10
if os.path.isfile(directory) and directory.endswith('.csv'):
process_csv(directory)
sys.exit(0)
models = glob.glob(os.path.join(os.path.dirname(directory.rstrip(os.sep)), "checkpoint.pth"))
for f in models:
model = torch.load(f, map_location=device)
quant_model = model["quant"].dropna()
args_model = model["args"]
idx_min = quant_model.idxmin(axis=0)["train", "loss"] # the epochs for each draw
stats_ref = quant_model.loc[idx_min]
d_m = os.path.dirname(f) # the directory
entry_dirs = glob.glob(os.path.join(d_m, "**", "entry_*"), recursive=True) # all the entries
roots = set(list(map(os.path.dirname, entry_dirs))) # the names of the
# root is e.g. fraction-2, and will be the root of the figure
for root in roots:
df_merge = pd.DataFrame()
df_min = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import pickle
from tqdm import tqdm_notebook
import matplotlib.pyplot as plt
import os
from ml_utils.plot_utils import plot_scatter, get_subplot_rows_cols
def covariate_shift(train, test, categorical_columns, n_samples, iterations = 200, weights_coef = 1, AUC_threshold = 0.8, importance_threshold = 0.9, max_loops = 20, test_size = 0.1, trys_all_influencer=5, calc_sample_weights=True, task_type="CPU", data_dir='', load_cov=False, save_cov=False, plot=True):
""" Select features without Covariate Shift between training and test set using iteratively CatBoostClassifier to identify relation between train and test """
import seaborn as sns
import catboost as cb
from sklearn.model_selection import train_test_split
if not os.path.exists(data_dir + 'cov_shift_features.pkl') or not load_cov:
train_sample = train.sample(n_samples)
train_sample.loc[:,'origin'] = 0
test_sample = test.sample(n_samples)
test_sample.loc[:,'origin'] = 1
combined_train, combined_test = train_test_split(
pd.concat([train_sample.reset_index(drop=True), test_sample.reset_index(drop=True)]),
test_size = test_size,
shuffle = True)
try:
influence_columns = []
count_all_influencer = 0
i = 0
AUC_score = 1
while i < max_loops and AUC_score > AUC_threshold:
x_columns = combined_train.columns.drop(['origin',] + influence_columns)
# Get the indexes for the categorical columns which CatBoost requires to out-perform other algorithms
cat_features_index = [list(x_columns).index(col) for col in categorical_columns if col in list(x_columns)]
# Do the feature selection once and only try again if no feature is selected
cov_shift_feature_selection = []
while len(cov_shift_feature_selection) == 0 and count_all_influencer < trys_all_influencer:
if count_all_influencer > 0:
print("Try again because model has set any feature as influencer")
cov_shift_model = cb.CatBoostClassifier(iterations = iterations,
eval_metric = "AUC",
cat_features = cat_features_index,
task_type = task_type,
verbose = False
)
cov_shift_feature_selection, df_cov_shift_feature_selection = shadow_feature_selection(
cov_shift_model,
combined_train['origin'], combined_train[x_columns],
need_cat_features_index=True, categorical_columns=categorical_columns,
collinear_threshold = 1,
n_iterations_mean = 1, times_no_change_features = 1
)
count_all_influencer += 1
if count_all_influencer == trys_all_influencer:
cov_shift_feature_selection = list(x_columns)
# Get the indexes for the categorical columns which CatBoost requires to out-perform other algorithms
cat_features_index = [cov_shift_feature_selection.index(col) for col in categorical_columns if col in cov_shift_feature_selection]
params = {'iterations' : 2*iterations, 'learning_rate' : 0.05, 'depth' : 6}
cov_shift_model = cb.CatBoostClassifier(iterations = iterations,
eval_metric = "AUC",
cat_features = cat_features_index,
scale_pos_weight = combined_train['origin'].value_counts()[0] / combined_train['origin'].value_counts()[1],
task_type = task_type,
verbose = False
)
cov_shift_model.set_params(**params)
cov_shift_model.fit(combined_train.drop('origin', axis = 1)[cov_shift_feature_selection],
combined_train['origin'],
eval_set = (combined_test.drop('origin', axis = 1)[cov_shift_feature_selection], combined_test['origin']),
use_best_model = True,
#sample_weight = sample_weight,
#early_stopping_rounds = True,
plot = False,
verbose = False)
AUC_score = cov_shift_model.get_best_score()['validation']['AUC']
print(f"Model score AUC of {AUC_score} on test")
# Remove the features which cumulative importance is relevant to predict origin of data (train or test)
if count_all_influencer != trys_all_influencer:
df_cov_shift_importance = pd.DataFrame(cov_shift_model.feature_importances_, columns = ['importance'], index = cov_shift_feature_selection)
df_cov_shift_importance['cumulative_importance'] = df_cov_shift_importance['importance'].cumsum() / df_cov_shift_importance['importance'].sum()
new_influence_columns = list(df_cov_shift_importance[df_cov_shift_importance['cumulative_importance'] < importance_threshold].index)
influence_columns = influence_columns + new_influence_columns
print(f"New {len(new_influence_columns)} columns will be removed from model: ", new_influence_columns)
print()
count_all_influencer = 0
i = i + 1
finally:
print()
print(f"Due to difference of influence of features to distinguish between data and submission, {len(influence_columns)} columns are removed:")
print(influence_columns)
if calc_sample_weights:
print("Calculating weights for each training sample")
probs = cov_shift_model.predict_proba(train[cov_shift_model.feature_names_])[:, 1] #calculating the probability
#print("Plot Train AUC")
#plot_roc_auc(pd.Serie(1,index = train.index), probs)
sample_weight = -np.log(probs)
sample_weight /= max(sample_weight) # Normalizing the weights
sample_weight = 1 + weights_coef * sample_weight
if plot:
plt.xlabel('Computed sample weight')
plt.ylabel('# Samples')
sns.distplot(sample_weight, kde=False)
if save_cov:
with open(data_dir + 'cov_shift_features.pkl', 'wb') as file:
print("Saving data in ", data_dir + 'cov_shift_features.pkl')
pickle.dump(influence_columns, file)
else:
print("Loading influence columns from ",data_dir)
with open(data_dir + 'cov_shift_features.pkl', 'rb') as file:
influence_columns = pickle.load(file)
cov_shift_model = None
sample_weight = [1,] * len(train)
return influence_columns, cov_shift_model, sample_weight
def stadistic_difference_distributions(data, submission, time_column, test_percentage=0.2, p_value_threshold=None,
verbose=False):
""" Calculate relation between initial and end part of the dataset for each column using Kolmogorov-Smirnov statistic on 2 samples """
from scipy import stats
from sklearn.model_selection import train_test_split
train, test = train_test_split(data.sort_values(time_column), test_size=test_percentage, shuffle=False)
time_analysis_df = pd.DataFrame(False, columns=['train_test', 'train_submission', 'test_submission'],
index=submission.columns.values)
for col in tqdm_notebook(submission.columns.values):
try:
KS_stat_test, p_value_test = stats.ks_2samp(train[col], test[col])
KS_stat_submission, p_value_submission = stats.ks_2samp(train[col], submission[col])
KS_stat_test_submission, p_value_test_submission = stats.ks_2samp(test[col], submission[col])
time_analysis_df.loc[col] = [p_value_test, p_value_submission, p_value_test_submission]
if verbose:
if p_value_test <= p_value_threshold or p_value_submission <= p_value_threshold or p_value_test_submission <= p_value_threshold:
print_s = f'Column {col} has different distribution'
if p_value_test <= p_value_threshold:
print_s = print_s + ' // train <--> test'
if p_value_submission <= p_value_threshold:
print_s = print_s + ' // train <--> submission'
if p_value_test_submission <= p_value_threshold:
print_s = print_s + ' // test <--> submission'
print(print_s)
except TypeError:
time_analysis_df.loc[col] = [np.nan, np.nan, np.nan]
if p_value_threshold == None:
cond1 = time_analysis_df['train_test'] == 0
cond2 = time_analysis_df['train_submission'] == 0
cond3 = time_analysis_df['test_submission'] == 0
else:
cond1 = time_analysis_df['train_test'] <= p_value_threshold
cond2 = time_analysis_df['train_submission'] <= p_value_threshold
cond3 = time_analysis_df['test_submission'] <= p_value_threshold
cols_to_remove = list(time_analysis_df[cond1 | cond2 | cond3].index)
return time_analysis_df, cols_to_remove
def outliers_analysis(full_data, features_names=None, x_column=None, subplot_rows=None, subplot_cols=None, starting_index=0,
index_offset=0, z_score_threshold=3.5, use_mean=False, plot=True, num_bins=50):
""" Calculate and visualize outliers analysis from Modified Z-score with MAD """
# Compatibility with numpy arrays
if type(full_data) == np.ndarray:
assert len(full_data.shape) <= 2
if len(full_data.shape) == 1:
columns = ['feature']
else:
columns = ['feature_'+str(i) for i in range(full_data.shape[-1])]
full_data = pd.DataFrame(full_data, columns=columns)
# Features not provided, use all the columns
if features_names is None:
features_names = list(full_data.columns)
if plot:
# Set a good relation rows/cols for the plot if not specified
if subplot_rows is None or subplot_cols is None:
subplot_rows, subplot_cols = get_subplot_rows_cols(len(features_names), [3,4,5])
# Resize for better visualization of subplots
plt.rcParams['figure.figsize'] = [subplot_cols * 5, subplot_rows * 4]
fig, axes = plt.subplots(subplot_rows, subplot_cols, sharex=False, sharey=False)
outliers_pd = full_data.copy()
outliers_summary = {}
i = starting_index
while i < len(features_names):
feature_name = features_names[i]
data = outliers_pd.loc[outliers_pd[feature_name].notnull(), feature_name]
# Modified Z-score with MAD (Median Absolute Deviation)
if use_mean:
outliers_pd.loc[outliers_pd[feature_name].notnull(), feature_name + '_zscore'] = 0.6745 * (data - data.mean()).abs() / (
data - data.mean()).abs().mean()
else:
outliers_pd.loc[outliers_pd[feature_name].notnull(), feature_name + '_zscore'] = 0.6745 * (data - data.median()).abs() / (
data - data.median()).abs().median()
outliers_pd[feature_name + '_zscore_outliers'] = outliers_pd[feature_name + '_zscore'] > z_score_threshold
if plot:
# Take into account the case of only one plot
if subplot_rows * subplot_cols == 1:
ax = axes
elif subplot_rows == 1:
ax = axes[(i + index_offset) % subplot_cols]
else:
ax = axes[(i + index_offset) // subplot_cols, (i + index_offset) % subplot_cols]
# If X_column provided plot scatter, otherwise histogram
if x_column is None:
bins = np.linspace(data.min(), data.max(), num_bins)
ax.hist(data[~outliers_pd[feature_name + '_zscore_outliers']], bins=bins, density=False)
ax.hist(data[outliers_pd[feature_name + '_zscore_outliers']], bins=bins, density=False)
ax.set_title(feature_name)
else:
plot_scatter(outliers_pd[outliers_pd[feature_name].notnull()], x_column=x_column, y_column=feature_name,
axes=ax, highlight_column=feature_name + '_zscore_outliers')
outliers_percentage = 100 * outliers_pd[feature_name + '_zscore_outliers'].sum() / outliers_pd[
feature_name + '_zscore_outliers'].count()
outliers_summary[feature_name] = outliers_percentage
print("Feature: ", feature_name, " - Percentage of outliers using modified Z-score approach is: ",
np.round(outliers_percentage, 2), "%")
i = i + 1
if plot:
fig.tight_layout()
# Resize to original settings
plt.rcParams['figure.figsize'] = [10, 6]
outliers_summary = pd.DataFrame.from_dict(outliers_summary, orient='index', columns=['Percentage'])
return outliers_summary, outliers_pd
def feature_selection(classifier_initial, y_train, x_train, n_top_features=50, baseline_features=[],
min_importance=None):
""" Select features which have the top N feature importance and/or above baseline """
classifier_model = classifier_initial.fit(x_train, y_train)
feature_importance = sorted(zip(map(lambda x: round(x, 4), classifier_model.feature_importances_), x_train),
reverse=True)
dict_feature_importance = dict(zip(x_train, map(lambda x: round(x, 4), estimator.feature_importances_)))
if baseline_features:
min_importance = max([importance for importance, feature in feature_importance if feature in baseline_features])
model_columns = []
i = 0
while i < n_top_features and i < len(feature_importance):
if feature_importance[i][0] > min_importance:
model_columns.append(feature_importance[i][1])
else:
break
i = i + 1
return model_columns
def cumulative_feature_selection(df_feature_importance, cum_importance_threshold):
""" Select features which are below of the cumulative feature importance threshold """
df_feature_importance = | pd.DataFrame(df_feature_importance, columns=['importance']) | pandas.DataFrame |
"""Solution problems / methods / algorithms module"""
import collections
import cvxpy as cp
import gurobipy as gp
import itertools
import numpy as np
import pandas as pd
import scipy.optimize
import scipy.sparse as sp
import typing
import mesmo.config
import mesmo.utils
logger = mesmo.config.get_logger(__name__)
class OptimizationProblem(mesmo.utils.ObjectBase):
r"""Optimization problem object class, which allows the definition and solution of convex optimization problems.
The optimization problem object serves as a container for the parameters, variables, constraints and objective
terms. The object provides methods for defining variables, parameters, constraints and objectives as well as
methods for solving the numerical optimization problem and obtaining results for variables, objective value and
dual values.
- This documentation assumes a fundamental understanding of convex optimization. As a general reference
on this topic, refer to: *<NAME> and <NAME>, Convex optimization. Cambridge University Press, 2004.*
Available at: https://web.stanford.edu/~boyd/cvxbook/
- The optimization problem object currently supports convex optimization problems in the form of
1) linear program (LP) or 2) quadratic program (QP) with only linear constraints.
- The solve method currently implements interfaces to 1) Gurobi and 2) CVXPY, where the latter is a high-level
convex optimization interface, which in turn allows interfacing further third-party solvers. The intention is
to implement more direct solver interfaces on a as-need basis (please raise an issue!), as these interfaces are
assumed to allow higher performance than CVXPY for large-scale problems. However, CVXPY is kept as a fallback to
allow a high degree of compatibility with various solvers.
The optimization problem object internally translates optimizations into LP / QP standard form. Where the following
formulation is assumed for the standard form:
.. math::
\begin{align}
\min_{\boldsymbol{x}} \quad
& \boldsymbol{c}^{\intercal} \boldsymbol{x}
+ \frac{1}{2} \boldsymbol{x}^{\intercal} \boldsymbol{Q} \boldsymbol{x} + d \\
\text{s.t.} \quad
& \boldsymbol{A} \boldsymbol{x} \leq \boldsymbol{b} \quad : \ \boldsymbol{\mu}
\end{align}
The vectors :math:`\boldsymbol{x}` and :math:`\boldsymbol{\mu}` are the variable vector and
associated constraint dual variable vector. The matrix :math:`\boldsymbol{A}` defines the linear
constraint coefficients, whereas the matrix :math:`\boldsymbol{Q}` defines quadradtic objective coefficients.
The vectors :math:`\boldsymbol{b}` and :math:`\boldsymbol{c}` define constant constraint terms
and linear objective coefficients. Lastly, the scalar :math:`d` defines the constant objective term.
Note that the scalar :math:`d` represents a slight abuse of the standard form to include constant objective term,
which may prove useful for comparing objective values across different problem definitions.
Example:
Consider the following optimization problem:
.. math::
\begin{align}
\min_{\boldsymbol{a},\boldsymbol{b}} \quad
& \sum_{i=1}^{n=1000} b_i \\
\text{s.t.} \quad
& \boldsymbol{b} = \boldsymbol{a} \cdot \boldsymbol{P} \\
& -10 \leq \boldsymbol{a} \leq +10
\end{align}
The matrix :math:`\boldsymbol{P} \in \mathbb{R}^{n \times n}` is an abitrary parameter matrix. The vectors
:math:`\boldsymbol{a}, \boldsymbol{b} \in \mathbb{R}^{n \times 1}` are decision variable vectors. The symbol
:math:`n` defines the problem dimension.
This problem can be defined and solved with the optimization problem interface as follows::
# Instantiate optimization problem.
optimization_problem = mesmo.solutions.OptimizationProblem()
# Define optimization parameters.
optimization_problem.define_parameter('parameter_matrix', parameter_matrix)
# Define optimization variables.
optimization_problem.define_variable('a_vector', a_index=range(dimension))
optimization_problem.define_variable('b_vector', b_index=range(dimension))
# Define optimization constraints.
optimization_problem.define_constraint(
('variable', 1.0, dict(name='b_vector')),
'==',
('variable', 'parameter_matrix', dict(name='a_vector')),
)
optimization_problem.define_constraint(
('constant', -10.0),
'<=',
('variable', 1.0, dict(name='a_vector')),
)
optimization_problem.define_constraint(
('constant', +10.0),
'>=',
('variable', 1.0, dict(name='a_vector')),
)
# Define optimization objective.
optimization_problem.define_objective(('variable', 1.0, dict(name='b_vector')))
# Solve optimization problem.
optimization_problem.solve()
# Obtain results.
results = optimization_problem.get_results()
a_vector = results['a_vector']
b_vector = results['b_vector']
This example is also available as standalone script at: ``examples/run_general_optimization_problem.py``
"""
variables: pd.DataFrame
constraints: pd.DataFrame
constraints_len: int
parameters: dict
flags: dict
a_dict: dict
b_dict: dict
c_dict: dict
q_dict: dict
d_dict: dict
x_vector: np.ndarray
mu_vector: np.ndarray
results: dict
duals: dict
objective: float
def __init__(self):
# Instantiate index sets.
# - Variables are instantiated with 'name' and 'timestep' keys, but more may be added in ``define_variable()``.
# - Constraints are instantiated with 'name', 'timestep' and 'constraint_type' keys,
# but more may be added in ``define_constraint()``.
self.variables = pd.DataFrame(columns=["name", "timestep", "variable_type"])
self.constraints = pd.DataFrame(columns=["name", "timestep", "constraint_type"])
self.constraints_len = 0
# Instantiate parameters / flags dictionary.
self.parameters = dict()
self.flags = dict()
# Instantiate A matrix / b vector / c vector / Q matrix / d constant dictionaries.
# - Final matrix / vector are only created in ``get_a_matrix()``, ``get_b_vector()``, ``get_c_vector()``,
# ``get_q_matrix()`` and ``get_d_constant()``.
# - Uses `defaultdict(list)` to enable more convenient collecting of elements into lists. This avoids
# accidental overwriting of dictionary entries.
self.a_dict = collections.defaultdict(list)
self.b_dict = collections.defaultdict(list)
self.c_dict = collections.defaultdict(list)
self.q_dict = collections.defaultdict(list)
self.d_dict = collections.defaultdict(list)
def define_variable(
self,
name: str,
variable_type: str = "continuous",
**keys,
):
"""Define decision variable with given name and key set.
- Variables are defined by passing a name string and index key sets. The variable dimension is determined by
the dimension of the index key sets. Accepted key set values are 1) lists, 2) tuples, 3) numpy arrays,
4) pandas index objects and 5) range objects.
- If multiple index key sets are passed, the variable dimension is determined as the cartesian product of
the key sets. However, note that variables always take the shape of column vectors in constraint and
objective definitions. That means, multiple key sets are not interpreted as array dimensions.
- The variable type can be defined with the keyword argument `variable_type` as either 'continuous', 'integer'
or 'binary'. The variable type defaults to 'continuous'.
"""
# Validate variable type.
variable_types = ["continuous", "integer", "binary"]
if variable_type not in ["continuous", "integer", "binary"]:
raise ValueError(
f"For variable definitions, the key `variable_type` is reserved and must be a valid variable type."
f"Valid variable types are {variable_types}."
)
# Obtain new variables based on ``keys``.
# - Variable dimensions are constructed based by taking the product of the given key sets.
new_variables = pd.DataFrame(
itertools.product(
[name],
[variable_type],
*[
list(value)
if type(value) in [pd.MultiIndex, pd.Index, pd.DatetimeIndex, np.ndarray, list, tuple, range]
else [value]
for value in keys.values()
],
),
columns=["name", "variable_type", *keys.keys()],
)
# Add new variables to index.
# - Duplicate definitions are automatically removed.
self.variables = pd.concat([self.variables, new_variables], ignore_index=True).drop_duplicates(
ignore_index=True
)
def define_parameter(self, name: str, value: typing.Union[float, np.ndarray, sp.spmatrix]):
"""Define constant parameters with given name and numerical value.
- Numerical values can be numerical value can be real-valued 1) float, 2) numpy array and
3) scipy sparse matrix.
- Defining parameters is optional. – Numerical values can also be directly passed in the constraints /
objective definitions. However, using parameters allows updating the numerical values of the problem
without re-defining the complete problem.
"""
# Validate dimensions, if parameter already defined.
if name in self.parameters.keys():
if np.shape(value) != np.shape(self.parameters[name]):
ValueError(f"Mismatch of redefined parameter: {name}")
# Set parameter value.
self.parameters[name] = value
def define_constraint(
self,
*elements: typing.Union[
str,
typing.Tuple[str, typing.Union[str, float, np.ndarray, sp.spmatrix]],
typing.Tuple[str, typing.Union[str, float, np.ndarray, sp.spmatrix], dict],
],
**kwargs,
):
"""Define linear constraint for given list of constraint elements.
- Constraints are defined as list of tuples and strings, where tuples are either 1) variable terms or
2) constant terms and strings represent operators (==, <= or >=). If multiple variable and constant
terms are on either side of the operator, these are interpreted as summation of the variables / constants.
- Constant terms are tuples in the form (‘constant’, numerical value), where the numerical value can be
real-valued 1) float, 2) numpy array, 3) scipy sparse matrix or 4) a parameter name string.
The numerical value is expected to represent a column vector with appropriate size matching the
constraint dimension. If a float value is given as numerical value, the value is multiplied with
a column vector of ones of appropriate size.
- Variable terms are tuples in the form (‘variable’, numerical factor, dict(name=variable name, keys…)),
where the numerical factor can be real-valued 1) float, 2) numpy array, 3) scipy sparse matrix or
4) a parameter name string. The numerical factor is multiplied with the variable vector and is expected
to represent a matrix of appropriate size for the multiplication. If a float value is given as
numerical factor, the value is multiplied with a identity matrix of appropriate size. Keys can
be optionally given to select / slice a portion of the variable vector.
Note that variables always take the shape of column vectors.
"""
# Instantiate constraint element aggregation variables.
variables = list()
constants = list()
operator = None
# Instantiate left-hand / right-hand side indicator. Starting from left-hand side.
side = "left"
# Aggregate constraint elements.
for element in elements:
# Tuples are variables / constants.
if isinstance(element, tuple):
# Obtain element attributes.
element_type = element[0]
element_value = element[1]
element_keys = element[2] if len(element) > 2 else None
# Identify variables.
if element_type in ("variable", "var", "v"):
# Move right-hand variables to left-hand side.
if side == "right":
factor = -1.0
else:
factor = 1.0
# Raise error if no keys defined.
if element_keys is None:
raise ValueError(f"Missing keys for variable: \n{element}")
# Append element to variables.
variables.append((factor, element_value, element_keys))
# Identify constants.
elif element_type in ("constant", "con", "c"):
# Move left-hand constants to right-hand side.
if side == "left":
factor = -1.0
else:
factor = 1.0
# Append element to constants.
constants.append((factor, element_value, element_keys))
# Raise error if element type cannot be identified.
else:
raise ValueError(f"Invalid constraint element type: {element_type}")
# Strings are operators.
elif element in ["==", "<=", ">="]:
# Raise error if operator is first element.
if element == elements[0]:
ValueError(f"Operator is first element of a constraint.")
# Raise error if operator is last element.
if element == elements[-1]:
ValueError(f"Operator is last element of a constraint.")
# Raise error if operator is already defined.
if operator is not None:
ValueError(f"Multiple operators defined in one constraint.")
# Set operator.
operator = element
# Update left-hand / right-hand side indicator. Moving to right-hand side.
side = "right"
# Raise error if element type cannot be identified.
else:
raise ValueError(f"Invalid constraint element: \n{element}")
# Raise error if operator missing.
if operator is None:
raise ValueError("Cannot define constraint without operator (==, <= or >=).")
self.define_constraint_low_level(variables, operator, constants, **kwargs)
def define_constraint_low_level(
self,
variables: typing.List[typing.Tuple[float, typing.Union[str, float, np.ndarray, sp.spmatrix], dict]],
operator: str,
constants: typing.List[typing.Tuple[float, typing.Union[str, float, np.ndarray, sp.spmatrix], dict]],
keys: dict = None,
broadcast: typing.Union[str, list, tuple] = None,
):
# Raise error if no variables in constraint.
if len(variables) == 0:
raise ValueError(f"Cannot define constraint without variables.")
# Run checks for constraint index keys.
if keys is not None:
# Raise error if ``keys`` is not a dictionary.
if type(keys) is not dict:
raise TypeError(f"Constraint `keys` parameter must be a dictionary, but instead is: {type(keys)}")
# Raise error if no 'name' key was defined.
if "name" not in keys.keys():
raise ValueError(f"'name' key is required in constraint `keys` dictionary. Only found: {keys.keys()}")
# TODO: Raise error if using reserved 'constraint_type' key.
# Run type checks for broadcast argument.
if broadcast is not None:
if type(broadcast) is str:
broadcast = [broadcast]
elif type(broadcast) not in [list, tuple]:
raise ValueError(f"Invalid type of broadcast argument: {type(broadcast)}")
# For equality constraint, define separate upper / lower inequality.
if operator in ["=="]:
# Define upper inequality.
self.define_constraint_low_level(
variables,
">=",
constants,
keys=dict(keys, constraint_type="==>=") if keys is not None else None,
broadcast=broadcast,
)
# Define lower inequality.
self.define_constraint_low_level(
variables,
"<=",
constants,
keys=dict(keys, constraint_type="==<=") if keys is not None else None,
broadcast=broadcast,
)
# For inequality constraint, add into A matrix / b vector dictionaries.
elif operator in ["<=", ">="]:
# If greater-than-equal, invert signs.
if operator == ">=":
operator_factor = -1.0
else:
operator_factor = 1.0
# Instantiate constraint index.
constraint_index = None
# Process variables.
for variable_factor, variable_value, variable_keys in variables:
# If any variable key values are empty, ignore variable & do not add any A matrix entry.
for key_value in variable_keys.values():
if isinstance(key_value, (list, tuple, pd.MultiIndex, pd.Index, np.ndarray)):
if len(key_value) == 0:
continue # Skip variable & go to next iteration.
# Obtain variable integer index & raise error if variable or key does not exist.
variable_index = tuple(self.get_variable_index(**variable_keys, raise_empty_index_error=True))
# Obtain broadcast dimension length for variable.
if broadcast is not None:
broadcast_len = 1
for broadcast_key in broadcast:
if broadcast_key not in variable_keys.keys():
raise ValueError(f"Invalid broadcast dimension: {broadcast_key}")
else:
broadcast_len *= len(variable_keys[broadcast_key])
else:
broadcast_len = 1
# String values are interpreted as parameter name.
if type(variable_value) is str:
parameter_name = variable_value
variable_value = self.parameters[parameter_name]
else:
parameter_name = None
# Flat arrays are interpreted as row vectors (1, n).
if len(np.shape(variable_value)) == 1:
variable_value = np.array([variable_value])
# Scalar values are multiplied with identity matrix of appropriate size.
if len(np.shape(variable_value)) == 0:
variable_value = variable_value * sp.eye(len(variable_index))
# If broadcasting, value is repeated in block-diagonal matrix.
elif broadcast_len > 1:
if type(variable_value) is np.matrix:
variable_value = np.array(variable_value)
variable_value = sp.block_diag([variable_value] * broadcast_len)
# If not yet defined, obtain constraint index based on dimension of first variable.
if constraint_index is None:
constraint_index = tuple(
range(self.constraints_len, self.constraints_len + np.shape(variable_value)[0])
)
# Raise error if variable dimensions are inconsistent.
if np.shape(variable_value) != (len(constraint_index), len(variable_index)):
raise ValueError(f"Dimension mismatch at variable: \n{variable_keys}")
# Append A matrix entry.
# - If parameter, pass tuple of factor, parameter name and broadcasting dimension length.
if parameter_name is None:
self.a_dict[constraint_index, variable_index].append(
operator_factor * variable_factor * variable_value
)
else:
self.a_dict[constraint_index, variable_index].append(
(operator_factor * variable_factor, parameter_name, broadcast_len)
)
# Process constants.
for constant_factor, constant_value, constant_keys in constants:
# If constant value is string, it is interpreted as parameter.
if type(constant_value) is str:
parameter_name = constant_value
constant_value = self.parameters[parameter_name]
else:
parameter_name = None
# Obtain broadcast dimension length for constant.
if (broadcast is not None) and (constant_keys is not None):
broadcast_len = 1
for broadcast_key in broadcast:
# TODO: Raise error if not in keys.
if broadcast_key in constant_keys.keys():
broadcast_len *= len(constant_keys[broadcast_key])
else:
broadcast_len = 1
# If constant is sparse, convert to dense array.
if isinstance(constant_value, sp.spmatrix):
constant_value = constant_value.toarray()
# If constant is scalar, cast into vector of appropriate size.
if len(np.shape(constant_value)) == 0:
constant_value = constant_value * np.ones(len(constraint_index))
# If broadcasting, values are repeated along broadcast dimension.
elif broadcast_len > 1:
constant_value = np.concatenate([constant_value] * broadcast_len, axis=0)
# Raise error if constant is not a scalar, column vector (n, 1) or flat array (n, ).
if len(np.shape(constant_value)) > 1:
if np.shape(constant_value)[1] > 1:
raise ValueError(f"Constant must be column vector (n, 1), not row vector (1, n).")
# If not yet defined, obtain constraint index based on dimension of first constant.
if constraint_index is None:
constraint_index = tuple(range(self.constraints_len, self.constraints_len + len(constant_value)))
# Raise error if constant dimensions are inconsistent.
if len(constant_value) != len(constraint_index):
raise ValueError(f"Dimension mismatch at constant: \n{constant_keys}")
# Append b vector entry.
if parameter_name is None:
self.b_dict[constraint_index].append(operator_factor * constant_factor * constant_value)
else:
self.b_dict[constraint_index].append(
(operator_factor * constant_factor, parameter_name, broadcast_len)
)
# Append constraints index entries.
if keys is not None:
# Set constraint type:
if "constraint_type" in keys.keys():
if keys["constraint_type"] not in ("==>=", "==<="):
keys["constraint_type"] = operator
else:
keys["constraint_type"] = operator
# Obtain new constraints based on ``keys``.
# - Constraint dimensions are constructed based by taking the product of the given key sets.
new_constraints = pd.DataFrame(
itertools.product(
*[
list(value)
if type(value) in [pd.MultiIndex, pd.Index, pd.DatetimeIndex, np.ndarray, list, tuple]
else [value]
for value in keys.values()
]
),
columns=keys.keys(),
)
# Raise error if key set dimension does not align with constant dimension.
if len(new_constraints) != len(constraint_index):
raise ValueError(
f"Constraint key set dimension ({len(new_constraints)})"
f" does not align with constraint value dimension ({len(constraint_index)})."
)
# Add new constraints to index.
new_constraints.index = constraint_index
self.constraints = self.constraints.append(new_constraints)
self.constraints_len += len(constraint_index)
else:
# Only change constraints size, if no ``keys`` defined.
# - This is for speedup, as updating the constraints index set with above operation is slow.
self.constraints_len += len(constraint_index)
# Raise error for invalid operator.
else:
ValueError(f"Invalid constraint operator: {operator}")
def define_objective(
self,
*elements: typing.Union[
str,
typing.Tuple[str, typing.Union[str, float, np.ndarray, sp.spmatrix]],
typing.Tuple[str, typing.Union[str, float, np.ndarray, sp.spmatrix], dict],
typing.Tuple[str, typing.Union[str, float, np.ndarray, sp.spmatrix], dict, dict],
],
**kwargs,
):
"""Define objective terms for the given list of objective elements.
- Objective terms are defined as list of tuples, where tuples are either 1) variable terms or
2) constant terms. Each term is expected to evaluate to a scalar value. If multiple variable and
constant terms are defined, these are interpreted as summation of the variables / constants.
- Constant terms are tuples in the form (‘constant’, numerical value), where the numerical value can be
1) float value or 2) a parameter name string.
- Variable terms are tuples in the form (‘variable’, numerical factor, dict(name=variable name, keys…)),
where the numerical factor can be 1) float value, 2) numpy array, 3) scipy sparse matrix or
4) a parameter name string. The numerical factor is multiplied with the variable vector and is expected
to represent a matrix of appropriate size for the multiplication, such that the multiplication evaluates
to a scalar. If a float value is given as numerical factor, the value is multiplied with a row vector of
ones of appropriate size. Keys can be optionally given to select / slice a portion of the variable vector.
Note that variables always take the shape of column vectors.
"""
# Instantiate objective element aggregation variables.
variables = list()
variables_quadratic = list()
constants = list()
# Aggregate objective elements.
for element in elements:
# Tuples are variables / constants.
if isinstance(element, tuple):
# Obtain element attributes.
element_type = element[0]
element_value = element[1]
element_keys_1 = element[2] if len(element) > 2 else None
element_keys_2 = element[3] if len(element) > 3 else None
# Identify variables.
if element_type in ("variable", "var", "v"):
# Append element to variables / quadratic variables.
if element_keys_2 is None:
variables.append((element_value, element_keys_1))
else:
variables_quadratic.append((element_value, element_keys_1, element_keys_2))
# Identify constants.
elif element_type in ("constant", "con", "c"):
# Add element to constant.
constants.append((element_value, element_keys_1))
# Raise error if element type cannot be identified.
else:
raise ValueError(f"Invalid objective element type: {element[0]}")
# Raise error if element type cannot be identified.
else:
raise ValueError(f"Invalid objective element: \n{element}")
self.define_objective_low_level(variables, variables_quadratic, constants, **kwargs)
def define_objective_low_level(
self,
variables: typing.List[typing.Tuple[typing.Union[str, float, np.ndarray, sp.spmatrix], dict]],
variables_quadratic: typing.List[typing.Tuple[typing.Union[str, float, np.ndarray, sp.spmatrix], dict, dict]],
constants: typing.List[typing.Tuple[typing.Union[str, float, np.ndarray, sp.spmatrix], dict]],
broadcast: typing.Union[str, list, tuple] = None,
):
# Run type checks for broadcast argument.
if broadcast is not None:
if type(broadcast) is str:
broadcast = [broadcast]
elif type(broadcast) not in [list, tuple]:
raise ValueError(f"Invalid type of broadcast argument: {type(broadcast)}")
# Process variables.
for variable_value, variable_keys in variables:
# If any variable key values are empty, ignore variable & do not add any c vector entry.
for key_value in variable_keys.values():
if isinstance(key_value, (list, tuple, pd.MultiIndex, pd.Index, np.ndarray)):
if len(key_value) == 0:
continue # Skip variable & go to next iteration.
# Obtain variable index & raise error if variable or key does not exist.
variable_index = tuple(self.get_variable_index(**variable_keys, raise_empty_index_error=True))
# Obtain broadcast dimension length for variable.
if broadcast is not None:
broadcast_len = 1
for broadcast_key in broadcast:
if broadcast_key not in variable_keys.keys():
raise ValueError(f"Invalid broadcast dimension: {broadcast_key}")
else:
broadcast_len *= len(variable_keys[broadcast_key])
else:
broadcast_len = 1
# String values are interpreted as parameter name.
if type(variable_value) is str:
parameter_name = variable_value
variable_value = self.parameters[parameter_name]
else:
parameter_name = None
# Scalar values are multiplied with row vector of ones of appropriate size.
if len(np.shape(variable_value)) == 0:
variable_value = variable_value * np.ones((1, len(variable_index)))
# If broadcasting, values are repeated along broadcast dimension.
else:
if broadcast_len > 1:
if len(np.shape(variable_value)) > 1:
variable_value = np.concatenate([variable_value] * broadcast_len, axis=1)
else:
variable_value = np.concatenate([[variable_value]] * broadcast_len, axis=1)
# Raise error if vector is not a row vector (1, n) or flat array (n, ).
if len(np.shape(variable_value)) > 1:
if np.shape(variable_value)[0] > 1:
raise ValueError(
f"Objective factor must be row vector (1, n) or flat array (n, ),"
f" not column vector (n, 1) nor matrix (m, n)."
)
# Raise error if variable dimensions are inconsistent.
if (
np.shape(variable_value)[1] != len(variable_index)
if len(np.shape(variable_value)) > 1
else np.shape(variable_value)[0] != len(variable_index)
):
raise ValueError(f"Objective factor dimension mismatch at variable: \n{variable_keys}")
# Add c vector entry.
# - If parameter, pass tuple of parameter name and broadcasting dimension length.
if parameter_name is None:
self.c_dict[variable_index].append(variable_value)
else:
self.c_dict[variable_index].append((parameter_name, broadcast_len))
# Process quadratic variables.
for variable_value, variable_keys_1, variable_keys_2 in variables_quadratic:
# If any variable key values are empty, ignore variable & do not add any c vector entry.
for key_value in list(variable_keys_1.values()) + list(variable_keys_2.values()):
if isinstance(key_value, (list, tuple, pd.MultiIndex, pd.Index, np.ndarray)):
if len(key_value) == 0:
continue # Skip variable & go to next iteration.
# Obtain variable index & raise error if variable or key does not exist.
variable_1_index = tuple(self.get_variable_index(**variable_keys_1, raise_empty_index_error=True))
variable_2_index = tuple(self.get_variable_index(**variable_keys_2, raise_empty_index_error=True))
# Obtain broadcast dimension length for variable.
if broadcast is not None:
broadcast_len = 1
for broadcast_key in broadcast:
if broadcast_key not in variable_keys_1.keys():
raise ValueError(f"Invalid broadcast dimension: {broadcast_key}")
else:
broadcast_len *= len(variable_keys_1[broadcast_key])
else:
broadcast_len = 1
# String values are interpreted as parameter name.
if type(variable_value) is str:
parameter_name = variable_value
variable_value = self.parameters[parameter_name]
else:
parameter_name = None
# Flat arrays are interpreted as diagonal matrix.
if len(np.shape(variable_value)) == 1:
# TODO: Raise error for flat arrays instead?
variable_value = sp.diags(variable_value)
# Scalar values are multiplied with diagonal matrix of ones of appropriate size.
if len(np.shape(variable_value)) == 0:
variable_value = variable_value * sp.eye(len(variable_1_index))
# If broadcasting, values are repeated along broadcast dimension.
else:
if type(variable_value) is np.matrix:
variable_value = np.array(variable_value)
variable_value = sp.block_diag([variable_value] * broadcast_len)
# Raise error if variable dimensions are inconsistent.
if np.shape(variable_value)[0] != len(variable_1_index):
raise ValueError(
f"Quadratic objective factor dimension mismatch at variable 1: \n{variable_keys_1}"
f"\nThe shape of quadratic objective factor matrix must be "
f"{(len(variable_1_index), len(variable_2_index))}, based on the variable dimensions."
)
if np.shape(variable_value)[1] != len(variable_2_index):
raise ValueError(
f"Quadratic objective factor dimension mismatch at variable 2: \n{variable_keys_2}"
f"\nThe shape of quadratic objective factor matrix must be "
f"{(len(variable_1_index), len(variable_2_index))}, based on the variable dimensions."
)
# Add Q matrix entry.
# - If parameter, pass tuple of parameter name and broadcasting dimension length.
if parameter_name is None:
self.q_dict[variable_1_index, variable_2_index].append(variable_value)
else:
self.q_dict[variable_1_index, variable_2_index].append((parameter_name, broadcast_len))
# Process constants.
for constant_value, constant_keys in constants:
# If constant value is string, it is interpreted as parameter.
if type(constant_value) is str:
parameter_name = constant_value
constant_value = self.parameters[parameter_name]
else:
parameter_name = None
# Obtain broadcast dimension length for constant.
if (broadcast is not None) and (constant_keys is not None):
broadcast_len = 1
for broadcast_key in broadcast:
if broadcast_key in constant_keys.keys():
broadcast_len *= len(constant_keys[broadcast_key])
else:
broadcast_len = 1
# Raise error if constant is not a scalar (1, ) or (1, 1) or float.
if type(constant_value) is not float:
if np.shape(constant_value) not in [(1,), (1, 1)]:
raise ValueError(f"Objective constant must be scalar or (1, ) or (1, 1).")
# If broadcasting, value is repeated along broadcast dimension.
if broadcast_len > 1:
constant_value = constant_value * broadcast_len
# Append d constant entry.
if parameter_name is None:
self.d_dict[0].append(constant_value)
else:
self.d_dict[0].append((parameter_name, broadcast_len))
def get_variable_index(self, name: str, raise_empty_index_error: bool = False, **keys):
"""Utility method for obtaining a variable integer index vector for given variable name / keys."""
return mesmo.utils.get_index(self.variables, name=name, **keys, raise_empty_index_error=raise_empty_index_error)
def get_variable_keys(self, name: str, **keys):
"""Utility method for obtaining a variable key dataframe for given variable name / keys.
- This intended for debugging / inspection of the key value order, e.g. such that numerical factors
can be constructed accordingly.
"""
return self.variables.loc[self.get_variable_index(name, **keys)].dropna(axis="columns", how="all")
def get_a_matrix(self) -> sp.csr_matrix:
r"""Obtain :math:`\boldsymbol{A}` matrix for the standard-form problem (see :class:`OptimizationProblem`)."""
# Log time.
mesmo.utils.log_time("get optimization problem A matrix", logger_object=logger)
# Instantiate collections.
values_list = list()
rows_list = list()
columns_list = list()
# Collect matrix entries.
for constraint_index, variable_index in self.a_dict:
for values in self.a_dict[constraint_index, variable_index]:
# If value is tuple, treat as parameter.
if type(values) is tuple:
factor, parameter_name, broadcast_len = values
values = self.parameters[parameter_name]
if len(np.shape(values)) == 1:
values = np.array([values])
if len(np.shape(values)) == 0:
values = values * sp.eye(len(variable_index))
elif broadcast_len > 1:
if type(values) is np.matrix:
values = np.array(values)
values = sp.block_diag([values] * broadcast_len)
values = values * factor
# Obtain row index, column index and values for entry in A matrix.
rows, columns, values = sp.find(values)
rows = np.array(constraint_index)[rows]
columns = np.array(variable_index)[columns]
# Insert entry in collections.
values_list.append(values)
rows_list.append(rows)
columns_list.append(columns)
# Instantiate A matrix.
a_matrix = sp.coo_matrix(
(np.concatenate(values_list), (np.concatenate(rows_list), np.concatenate(columns_list))),
shape=(self.constraints_len, len(self.variables)),
).tocsr()
# Log time.
mesmo.utils.log_time("get optimization problem A matrix", logger_object=logger)
return a_matrix
def get_b_vector(self) -> np.ndarray:
r"""Obtain :math:`\boldsymbol{b}` vector for the standard-form problem (see :class:`OptimizationProblem`)."""
# Log time.
mesmo.utils.log_time("get optimization problem b vector", logger_object=logger)
# Instantiate array.
b_vector = np.zeros((self.constraints_len, 1))
# Fill vector entries.
for constraint_index in self.b_dict:
for values in self.b_dict[constraint_index]:
# If value is tuple, treat as parameter.
if type(values) is tuple:
factor, parameter_name, broadcast_len = values
values = self.parameters[parameter_name]
if len(np.shape(values)) == 0:
values = values * np.ones(len(constraint_index))
elif broadcast_len > 1:
values = np.concatenate([values] * broadcast_len, axis=0)
values = values * factor
# Insert entry in b vector.
b_vector[constraint_index, 0] += values.ravel()
# Log time.
mesmo.utils.log_time("get optimization problem b vector", logger_object=logger)
return b_vector
def get_c_vector(self) -> np.ndarray:
r"""Obtain :math:`\boldsymbol{c}` vector for the standard-form problem (see :class:`OptimizationProblem`)."""
# Log time.
mesmo.utils.log_time("get optimization problem c vector", logger_object=logger)
# Instantiate array.
c_vector = np.zeros((1, len(self.variables)))
# Fill vector entries.
for variable_index in self.c_dict:
for values in self.c_dict[variable_index]:
# If value is tuple, treat as parameter.
if type(values) is tuple:
parameter_name, broadcast_len = values
values = self.parameters[parameter_name]
if len(np.shape(values)) == 0:
values = values * np.ones(len(variable_index))
elif broadcast_len > 1:
if len(np.shape(values)) > 1:
values = np.concatenate([values] * broadcast_len, axis=1)
else:
values = np.concatenate([[values]] * broadcast_len, axis=1)
# Insert entry in c vector.
c_vector[0, variable_index] += values.ravel()
# Log time.
mesmo.utils.log_time("get optimization problem c vector", logger_object=logger)
return c_vector
def get_q_matrix(self) -> sp.spmatrix:
r"""Obtain :math:`\boldsymbol{Q}` matrix for the standard-form problem (see :class:`OptimizationProblem`)."""
# Log time.
mesmo.utils.log_time("get optimization problem Q matrix", logger_object=logger)
# Instantiate collections.
values_list = list()
rows_list = list()
columns_list = list()
# Collect matrix entries.
for variable_1_index, variable_2_index in self.q_dict:
for values in self.q_dict[variable_1_index, variable_2_index]:
# If value is tuple, treat as parameter.
if type(values) is tuple:
parameter_name, broadcast_len = values
values = self.parameters[parameter_name]
if len(np.shape(values)) == 1:
values = sp.diags(values)
if len(np.shape(values)) == 0:
values = values * sp.eye(len(variable_1_index))
elif broadcast_len > 1:
if type(values) is np.matrix:
values = np.array(values)
values = sp.block_diag([values] * broadcast_len)
# Obtain row index, column index and values for entry in Q matrix.
rows, columns, values = sp.find(values)
rows = np.array(variable_1_index)[rows]
columns = np.array(variable_2_index)[columns]
# Insert entry in collections.
values_list.append(values)
rows_list.append(rows)
columns_list.append(columns)
# Instantiate Q matrix.
q_matrix = (
sp.coo_matrix(
(np.concatenate(values_list), (np.concatenate(rows_list), np.concatenate(columns_list))),
shape=(len(self.variables), len(self.variables)),
).tocsr()
if len(self.q_dict) > 0
else sp.csr_matrix((len(self.variables), len(self.variables)))
)
# Log time.
mesmo.utils.log_time("get optimization problem Q matrix", logger_object=logger)
return q_matrix
def get_d_constant(self) -> float:
r"""Obtain :math:`d` value for the standard-form problem (see :class:`OptimizationProblem`)."""
# Log time.
mesmo.utils.log_time("get optimization problem d constant", logger_object=logger)
# Instantiate array.
d_constant = 0.0
# Fill vector entries.
for values in self.d_dict[0]:
# If value is tuple, treat as parameter.
if type(values) is tuple:
parameter_name, broadcast_len = values
values = self.parameters[parameter_name]
if broadcast_len > 1:
values = values * broadcast_len
# Insert entry to d constant.
d_constant += float(values)
# Log time.
mesmo.utils.log_time("get optimization problem d constant", logger_object=logger)
return d_constant
def solve(self):
r"""Solve the optimization problem.
- The solve method compiles the standard form of the optimization problem
(see :class:`OptimizationProblem`) and passes the standard-form problem to the optimization
solver interface.
- The solve method currently implements interfaces to 1) Gurobi and 2) CVXPY, where the latter is a high-level
convex optimization interface, which in turn allows interfacing further third-party solvers. The intention is
to implement more direct solver interfaces on a as-need basis (please raise an issue!), as these interfaces
are assumed to allow higher performance than CVXPY for large-scale problems. However, CVXPY is kept as
a fallback to allow a high degree of compatibility with various solvers.
- The choice of solver and solver interface can be controlled through the config parameters
``optimization > solver_name`` and ``optimization > solver_interface`` (see ``mesmo/config_default.yml``).
The default workflow of the solve method is as follows:
1. Obtain problem definition through selected solver interface via :meth:`get_cvxpy_problem()` or
:meth:`get_gurobi_problem()`.
2. Solve optimization problem and obtain standard-form results via :meth:`solve_cvxpy()` or
:meth:`solve_gurobi()`. The standard-form results include the 1) :math:`\boldsymbol{x}` variable vector
value, 2) :math:`\boldsymbol{\mu}` dual vector value and 3) objective value, which are stored into the
object attributes :attr:`x_vector`, :attr:`mu_vector` and :attr:`objective`.
3. Obtain results with respect to the original problem formulation via :meth:`get_results()` and
:meth:`get_duals()`. These results are 1) decision variable values and
2) constraint dual values, which are stored into the object attributes :attr:`results` and :attr:`duals`.
Low-level customizations of the problem definition are possible, e.g. definition of quadratic constraints or
second-order conic (SOC) constraints via the solver interfaces, with the following workflow.
1. Obtain problem definition through selected solver interface via :meth:`get_cvxpy_problem()` or
:meth:`get_gurobi_problem()`.
2. Customize problem definitions, e.g. add custom constraints directly with the Gurobi or CVXPY interfaces.
3. Solve optimization problem and obtain standard-form results via :meth:`solve_cvxpy()` or
:meth:`solve_gurobi()`.
4. Obtain results with respect to the original problem formulation via :meth:`get_results()` and
:meth:`get_duals()`.
"""
# TODO: Add example for low-level customization solve workflow.
# Log time.
mesmo.utils.log_time(f"solve optimization problem problem", logger_object=logger)
logger.debug(
f"Solver name: {mesmo.config.config['optimization']['solver_name']};"
f" Solver interface: {mesmo.config.config['optimization']['solver_interface']};"
f" Problem statistics: {len(self.variables)} variables, {self.constraints_len} constraints"
)
# Use CVXPY solver interface, if selected.
if mesmo.config.config["optimization"]["solver_interface"] == "cvxpy":
self.solve_cvxpy(*self.get_cvxpy_problem())
# Use direct solver interfaces, if selected.
elif mesmo.config.config["optimization"]["solver_interface"] == "direct":
if mesmo.config.config["optimization"]["solver_name"] == "gurobi":
self.solve_gurobi(*self.get_gurobi_problem())
elif mesmo.config.config["optimization"]["solver_name"] == "highs":
self.solve_highs()
# If no direct solver interface found, fall back to CVXPY interface.
else:
logger.debug(
f"No direct solver interface implemented for"
f" '{mesmo.config.config['optimization']['solver_name']}'. Falling back to CVXPY."
)
self.solve_cvxpy(*self.get_cvxpy_problem())
# Raise error, if invalid solver interface selected.
else:
raise ValueError(f"Invalid solver interface: '{mesmo.config.config['optimization']['solver_interface']}'")
# Get results / duals.
self.results = self.get_results()
# Do not retrieve dual variables if mu vector cannot be retrieved. See `solve_gurobi()`.
if not all(np.isnan(self.mu_vector)):
self.duals = self.get_duals()
# Log time.
mesmo.utils.log_time(f"solve optimization problem problem", logger_object=logger)
def get_gurobi_problem(self) -> (gp.Model, gp.MVar, gp.MConstr, gp.MQuadExpr):
"""Obtain standard-form problem via Gurobi direct interface."""
# Instantiate Gurobi model.
# - A Gurobi model holds a single optimization problem. It consists of a set of variables, a set of constraints,
# and the associated attributes.
gurobipy_problem = gp.Model()
# Set solver parameters.
gurobipy_problem.setParam("OutputFlag", int(mesmo.config.config["optimization"]["show_solver_output"]))
for key, value in mesmo.config.solver_parameters.items():
gurobipy_problem.setParam(key, value)
# Define variables.
# - Need to express vectors as 1-D arrays to enable matrix multiplication in constraints (gurobipy limitation).
# - Lower bound defaults to 0 and needs to be explicitly overwritten.
x_vector = gurobipy_problem.addMVar(
shape=(len(self.variables),), lb=-np.inf, ub=np.inf, vtype=gp.GRB.CONTINUOUS, name="x_vector"
)
if (self.variables.loc[:, "variable_type"] == "integer").any():
x_vector[self.variables.loc[:, "variable_type"] == "integer"].setAttr("vtype", gp.GRB.INTEGER)
if (self.variables.loc[:, "variable_type"] == "binary").any():
x_vector[self.variables.loc[:, "variable_type"] == "binary"].setAttr("vtype", gp.GRB.BINARY)
# Define constraints.
# - 1-D arrays are interpreted as column vectors (n, 1) (based on gurobipy convention).
constraints = self.get_a_matrix() @ x_vector <= self.get_b_vector().ravel()
constraints = gurobipy_problem.addConstr(constraints, name="constraints")
# Define objective.
# - 1-D arrays are interpreted as column vectors (n, 1) (based on gurobipy convention).
objective = (
self.get_c_vector().ravel() @ x_vector
+ x_vector @ (0.5 * self.get_q_matrix()) @ x_vector
+ self.get_d_constant()
)
gurobipy_problem.setObjective(objective, gp.GRB.MINIMIZE)
return (gurobipy_problem, x_vector, constraints, objective)
def solve_gurobi(
self, gurobipy_problem: gp.Model, x_vector: gp.MVar, constraints: gp.MConstr, objective: gp.MQuadExpr
) -> gp.Model:
"""Solve optimization problem via Gurobi direct interface."""
# Solve optimization problem.
gurobipy_problem.optimize()
# Raise error if no optimal solution.
status_labels = {
gp.GRB.INFEASIBLE: "Infeasible",
gp.GRB.INF_OR_UNBD: "Infeasible or Unbounded",
gp.GRB.UNBOUNDED: "Unbounded",
gp.GRB.SUBOPTIMAL: "Suboptimal",
}
status = gurobipy_problem.getAttr("Status")
if status not in [gp.GRB.OPTIMAL, gp.GRB.SUBOPTIMAL]:
status = status_labels[status] if status in status_labels.keys() else f"{status} (See Gurobi documentation)"
raise RuntimeError(f"Gurobi exited with non-optimal solution status: {status}")
elif status == gp.GRB.SUBOPTIMAL:
status = status_labels[status] if status in status_labels.keys() else f"{status} (See Gurobi documentation)"
logger.warning(f"Gurobi exited with non-optimal solution status: {status}")
# Store results.
self.x_vector = np.transpose([x_vector.getAttr("x")])
if (
(gurobipy_problem.getAttr("NumQCNZs") == 0)
and not ((self.variables.loc[:, "variable_type"] == "integer").any())
and not ((self.variables.loc[:, "variable_type"] == "binary").any())
):
self.mu_vector = np.transpose([constraints.getAttr("Pi")])
else:
# Duals are not retrieved if quadratic or SOC constraints have been added to the model.
logger.warning(
f"Duals of the optimization problem's constraints are not retrieved,"
f" because either variables have been defined as non-continuous"
f" or quadratic / SOC constraints have been added to the problem."
f"\nPlease retrieve the duals manually."
)
self.mu_vector = np.nan * np.zeros(constraints.shape)
self.objective = float(objective.getValue())
return gurobipy_problem
def get_cvxpy_problem(
self,
) -> (cp.Variable, typing.List[typing.Union[cp.NonPos, cp.Zero, cp.SOC, cp.PSD]], cp.Expression):
"""Obtain standard-form problem via CVXPY interface."""
# Define variables.
x_vector = cp.Variable(
shape=(len(self.variables), 1),
name="x_vector",
integer=(
(index, 0)
for index, is_integer in enumerate(self.variables.loc[:, "variable_type"] == "integer")
if is_integer
)
if (self.variables.loc[:, "variable_type"] == "integer").any()
else False,
boolean=(
(index, 0)
for index, is_binary in enumerate(self.variables.loc[:, "variable_type"] == "binary")
if is_binary
)
if (self.variables.loc[:, "variable_type"] == "binary").any()
else False,
)
# Define constraints.
constraints = [self.get_a_matrix() @ x_vector <= self.get_b_vector()]
# Define objective.
objective = (
self.get_c_vector() @ x_vector + cp.quad_form(x_vector, 0.5 * self.get_q_matrix()) + self.get_d_constant()
)
return (x_vector, constraints, objective)
def solve_cvxpy(
self,
x_vector: cp.Variable,
constraints: typing.List[typing.Union[cp.NonPos, cp.Zero, cp.SOC, cp.PSD]],
objective: cp.Expression,
) -> cp.Problem:
"""Solve optimization problem via CVXPY interface."""
# Instantiate CVXPY problem.
cvxpy_problem = cp.Problem(cp.Minimize(objective), constraints)
# Solve optimization problem.
cvxpy_problem.solve(
solver=(
mesmo.config.config["optimization"]["solver_name"].upper()
if mesmo.config.config["optimization"]["solver_name"] is not None
else None
),
verbose=mesmo.config.config["optimization"]["show_solver_output"],
**mesmo.config.solver_parameters,
)
# Assert that solver exited with an optimal solution. If not, raise an error.
if not (cvxpy_problem.status == cp.OPTIMAL):
raise RuntimeError(f"CVXPY exited with non-optimal solution status: {cvxpy_problem.status}")
# Store results.
self.x_vector = x_vector.value
self.mu_vector = constraints[0].dual_value
self.objective = float(cvxpy_problem.objective.value)
return cvxpy_problem
def solve_highs(self) -> scipy.optimize.OptimizeResult:
"""Solve optimization problem via SciPy HiGHS interface."""
# Raise warning if Q matrix is not zero, because HiGHS interface below doesn't consider QP expressions yet.
if any((self.get_q_matrix() != 0).data):
logger.warning(f"Found QP expression: The HiGHS solver interface does not yet support QP solution.")
# Replace infinite values in b vector with maximum floating point value.
# - Reason: SciPy optimization interface doesn't accept infinite values.
b_vector = self.get_b_vector().ravel()
b_vector[b_vector == np.inf] = np.finfo(float).max
# Solve optimization problem.
scipy_result = scipy.optimize.linprog(
self.get_c_vector().ravel(),
A_ub=self.get_a_matrix(),
b_ub=b_vector,
bounds=(None, None),
method="highs",
options=dict(
disp=mesmo.config.config["optimization"]["show_solver_output"],
time_limit=mesmo.config.config["optimization"]["time_limit"],
),
)
# Assert that solver exited with an optimal solution. If not, raise an error.
if not (scipy_result.status == 0):
raise RuntimeError(f"HiGHS exited with non-optimal solution status: {scipy_result.message}")
# Store results.
self.x_vector = np.transpose([scipy_result.x])
self.mu_vector = np.transpose([scipy_result.ineqlin.marginals])
self.objective = scipy_result.fun
return scipy_result
def get_results(self, x_vector: typing.Union[cp.Variable, np.ndarray] = None) -> dict:
"""Obtain results for decisions variables.
- Results are returned as dictionary with keys corresponding to the variable names that have been defined.
"""
# Log time.
mesmo.utils.log_time("get optimization problem results", logger_object=logger)
# Obtain x vector.
if x_vector is None:
x_vector = self.x_vector
elif type(x_vector) is cp.Variable:
x_vector = x_vector.value
# Instantiate results object.
results = dict.fromkeys(self.variables.loc[:, "name"].unique())
# Obtain results for each variable.
for name in results:
# Get variable dimensions.
variable_dimensions = (
self.variables.iloc[self.get_variable_index(name), :]
.drop(["name", "variable_type"], axis=1)
.drop_duplicates()
.dropna(axis=1)
)
if len(variable_dimensions.columns) > 0:
# Get results from x vector as pandas series.
results[name] = pd.Series(
x_vector[self.get_variable_index(name), 0], index= | pd.MultiIndex.from_frame(variable_dimensions) | pandas.MultiIndex.from_frame |
#/usr/bin/env python3
import numpy as np
import pandas as pd
def series():
#podemos hacer arrays numpy de tiempo np.array('2019-10-12',dtype=np.datetime64)
#para ello contamos con el paquete pandas de python
index=pd.DatetimeIndex(['2000-10-12','2000-10-24','2001-12-12'])
ser=pd.Series([12,54,67],index=index)
print(ser)
#podemos acceder a cada elemnto como un pandas normal
print(ser['2000-10-01':'2000-10-24'])
#podemos filtra los elemntos por año
print(ser['2000'])
#estructuras de datos de series temporales de pandas
#para las marcas de tiempo pandas proporciona..> TimeStamp() con estructura de indice asociadda DatatimeIndex()
#para periodos de tiempo tenemos a Period() con estructura de indice PeriodIndex()
#para deltas de tiempo o duraciones tenemos a TimeDelta() con index TimedeltaIndex(
def series1():
#podemos crear periodos de fechas simultaneamente
#podemos reeplmzara formas anteriores y comprarlas con esecto a la creacion d indices
index= | pd.DatetimeIndex(['20000-12-12','2003-12-23']) | pandas.DatetimeIndex |
import pytest
import sys
import numpy as np
import swan_vis as swan
import networkx as nx
import math
import pandas as pd
import anndata
###########################################################################
################# Related to input/error handling #########################
###########################################################################
###########################################################################
############################## Colors ###################################
###########################################################################
class TestColors(object):
# tests set_metadata_colors
# test set_metadata_colors - vanilla
def test_set_metadata_colors_1(self):
sg = get_die_test_sg()
cmap = {'GM12878': 'red', 'K562': 'blue'}
test = sg.set_metadata_colors('sample', cmap)
assert sg.adata.uns.sample_colors == ['red', 'blue']
# test set_metadata_colors - obs_col does not exist
def test_set_metadata_colors_1(self):
sg = get_die_test_sg()
cmap = {1: 'red', 2: 'blue'}
with pytest.raises(Exception) as e:
test = sg.set_metadata_colors('stage', cmap)
assert 'Metadata column' in str(e.value)
###########################################################################
################# Related to plotting Swan Plots ##########################
###########################################################################
class TestPlotting(object):
# done: test_new_gene, calc_pos_sizes, calc_edge_curves, plot_graph,
# plot_transcript_path
# init_plot_settings test do not check for indicate_novel / indicate settigns
# init_plot_settings tests do not check for new dataset addition
# test init_plot_settings - https://github.com/mortazavilab/swan_vis/issues/8
# gene summary -> transcript path (same gene) -> gene summary (same gene)
def test_init_9(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test2_gid', display=False)
sg.plot_transcript_path('test5', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting gene summary to
# gene summary (same gene), also tests working from gene name
def test_init_8(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test2_gid', display=False)
sg.plot_graph('test2_gname', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting gene summary to
# gene summary (different gene)
def test_init_7(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test4_gid', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting transcript path
# to transcript path (same gene)
def test_init_6(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_transcript_path('test3', display=False)
sg.plot_transcript_path('test2', display=False)
# edge_df
sg.pg.edge_df.drop(['curve'], axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon_gray', None],
[14, '-', 'intron', 3, 5, 'intron_gray', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon_gray', None],
[11, '-', 'intron', 1, 4, 'intron_gray', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = | pd.DataFrame(data=data, columns=cols) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta, time
import numpy as np
from pandas import (DatetimeIndex, Float64Index, Index, Int64Index,
NaT, Period, PeriodIndex, Series, Timedelta,
TimedeltaIndex, date_range, period_range,
timedelta_range, notnull)
import pandas.util.testing as tm
import pandas as pd
from pandas.lib import Timestamp
from .common import Base
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
self.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
self.assertFalse("length=%s" % len(idx) in str(idx))
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
if hasattr(idx, 'tz'):
if idx.tz is not None:
self.assertTrue(idx.tz in str(idx))
if hasattr(idx, 'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
def test_view(self):
super(DatetimeLike, self).test_view()
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
class TestDatetimeIndex(DatetimeLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
self.assert_index_equal(result, expected)
def test_construction_with_alt(self):
i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
i2 = DatetimeIndex(i, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
self.assert_index_equal(i2, expected)
# incompat tz/dtype
self.assertRaises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_pickle_compat_construction(self):
pass
def test_construction_index_with_mixed_timezones(self):
# GH 11488
# no tz results in DatetimeIndex
result = Index(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([ | Timestamp('2011-01-01 10:00') | pandas.lib.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import requests
import pandas as pd
from bs4 import BeautifulSoup
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
# In[ ]:
#Se descargo el historico y para las fechas siguientes, se hace un append
#de los datos que aparecen en la tabla de la pagina
url = 'https://dinem.agroindustria.gob.ar/dinem_fas.cfasn.aspx'
# In[ ]:
#Se descarga la página y se la parsea
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
# In[ ]:
#se busca la tabla que contiene los valores
input_data = soup.find('input', {'name':'GridContainerDataV'})
# In[ ]:
#Se convierte a dataframe y se renombra las columnas
df = pd.DataFrame(eval(input_data.get('value')),
columns=['Date', 'Trigo Pan', 'Maíz', 'Girasol', 'Soja', 'Aceite crudo de Girasol', 'Aceite crudo de Soja'])
# In[ ]:
#Se establece el indice y se agrega el country
df['Date'] = | pd.to_datetime(df['Date'], format='%d/%m/%Y') | pandas.to_datetime |
import numpy as np
import random
import math
class evolution_benchmark():
def __init__(self, F, bounds, num_pop, num_gen):
self.F = F #Fit function NB. Best fit = Higher fitnes value, so to find minimum put the - before!
self.bounds = np.array(bounds) #Function bounds
self.num_pop = num_pop #Number of individuals in population
self.fit = np.zeros(num_pop) #evaluation/fit for each indivial
self.num_gen = num_gen #Number of generations
self.mutation_rate = 0.5 #Probability that a mutation occour, Xover and Mutation have the same probability
self.n_best = math.floor(num_pop*0.25) #Number of best individuals allowed to reproduce (where required)
self.mutation_coef = 0.01 #Magnitude of the mutation
def _create_individual(self, mode='uni_sampled'):
if mode == 'std_sampled':
genotype=np.random.standard_normal(2)
elif mode == 'uni_sampled':
genotype=np.random.uniform(size=(2))
return np.multiply(genotype, self.bounds[:,1]-self.bounds[:,0])+self.bounds[:,0]
def initialization(self, mode='uni_sampled'):
self.population = []
for _ in range(self.num_pop):
self.population.append(self._create_individual(mode))
print('Population inizialized')
def evaluation(self):
for i in range(self.num_pop):
self.fit[i]=self.F(self.population[i])
#Ordered population
tmp = sorted(zip(self.population, self.fit), reverse=True, key = lambda a : a[1])
self.population = [x[0] for x in tmp]
self.fit = [x[1] for x in tmp]
def selection_reproduction(self, mode='trbs', n_best=3):
new_population = []
if mode == 'trbs': #Truncated Rank-Based Selection
n_children = int(self.num_pop/n_best)
while len(new_population)<self.num_pop:
for i in range(n_best):
new_population.append(self.population[i].copy())
if len(new_population)>=self.num_pop: break
return new_population
elif mode == 'elitism':
for i in range(n_best):
new_population.append(self.population[i].copy())
k = 0
for i in range(n_best,len(self.population)):
if (k >= n_best):
k = 0
if (random.random() > 0.75):
new_population.append(self.population[k].copy())
k += 1
else:
new_population.append(self.population[i].copy())
return new_population
def Xover(self, p1, p2, mode=0):
if mode == 0: #arithmetic
if random.random()<0.5:
x = p1 + p2
else:
x = p1 - p2
x[0] = min(self.bounds[0,1], max(self.bounds[0,0], x[0]))
x[1] = min(self.bounds[1,1], max(self.bounds[1,0], x[1]))
return x
elif mode == 1: #uniform
return np.array([p1[0],p2[1]])
elif mode == 2: #average
return (p1 + p2) /2
def mutation(self, p):
noise = self.mutation_coef * random.random()- self.mutation_coef/2
i = int(random.random())
x = p[i]+noise
p[i] = min(self.bounds[i,1], max(self.bounds[i,0], x))
return p
def evolution(self, mantain_best=True):
##########################
positions = [[],[]]
##########################
self.initialization()
for g in range(self.num_gen):
self.evaluation()
#########################
positions[0].append([])
positions[1].append([])
for individual in self.population:
positions[0][g].append(individual[0])
positions[1][g].append(individual[1])
#########################
print('Generation ',g,' Best: ',self.population[0],' with value: ', self.fit[0])
self.population = self.selection_reproduction(mode='elitism', n_best=self.n_best)
start = 0 if not mantain_best else self.n_best
for p in range(start, self.num_pop):
if random.random()>self.mutation_rate:
if random.random()<0.5:
self.population[p] = self.Xover(self.population[p], self.population[random.randint(0, self.num_pop-1)], mode=random.randint(0, 2))
else:
self.population[p] = self.mutation(self.population[p])
#######################
return positions
def benchmark_1(p):
#Rosenbrock
x = p[0]
y = p[1]
a = 0
b = 1
f = pow((a-pow(x, 2)), 2) + b * pow((y-pow(x, 2)), 2)
return -f
def benchmark_2(p):
#Rastrigin
x = p[0]
y = p[1]
n = 2
f = pow(x, 2) - 10 * math.cos(2 * math.pi * x) + pow(y, 2) - 10 * math.cos(2 * math.pi * y) + 10*n
return -f
##################################################################
eb = evolution_benchmark(benchmark_2, [[-10,10],[-10,10]], 50, 30)
positions = eb.evolution()
##################################################################
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import pandas as pd
from random import random
import seaborn as sns
AXIS_X_LFT = -10
AXIS_X_RGT = 10
AXIS_Y_BOT = -10
AXIS_Y_TOP = 10
Writer = animation.writers['ffmpeg']
writer = Writer(fps=8, metadata=dict(artist='Me'), bitrate=1800)
#Init of the function graph
fig = plt.figure(figsize=(10,6))
plt.xlim(AXIS_X_LFT, AXIS_X_RGT)
plt.ylim(AXIS_Y_BOT, AXIS_Y_TOP)
plt.xlabel('X')
plt.ylabel('Y')
plt.title('plot')
#Benchmark functions
a = 0; b = 1
x = np.arange(AXIS_X_LFT, AXIS_X_RGT, 0.1)
y = np.arange(AXIS_Y_BOT, AXIS_Y_TOP, 0.1)
xx, yy = np.meshgrid(x, y)
#z = ((a-(xx**2))**2) + b * ((yy - (xx**2))**2) #Rosenbrock
z = 10 * 2 + ((xx**2) - 10 * np.cos(2 * math.pi * xx)) + ((yy**2) - 10 * np.cos(2 * math.pi * yy)) #Rastrigin
df1 = | pd.DataFrame(data=z, index=y, columns=x) | pandas.DataFrame |
import argparse
from itertools import product
import warnings
from joblib import Parallel, delayed
import librosa
import numpy as np
import pandas as pd
from scipy import signal, stats
from sklearn.linear_model import LinearRegression
from tqdm import tqdm
from tsfresh.feature_extraction import feature_calculators
from earthquake import config
warnings.filterwarnings("ignore")
class FeatureGenerator(object):
"""Feature engineering.
"""
def __init__(
self,
path_to_store,
is_train=True,
n_rows=1e6,
n_jobs=1,
segment_size=150000
):
"""Decomposition of initial signal into the set of features.
Args:
path_to_store:
Path to .hdf store with original signal data.
is_train:
True, if creating the training set.
n_rows:
Amount of rows in training store.
n_jobs:
Amount of parallel jobs.
segment_size:
Amount of observations in each segment
"""
self.path_to_store = path_to_store
self.n_rows = n_rows
self.n_jobs = n_jobs
self.segment_size = segment_size
self.is_train = is_train
if self.is_train:
self.total = int(self.n_rows / self.segment_size)
self.store = None
self.keys = None
else:
self.store = pd.HDFStore(self.path_to_store, mode='r')
self.keys = self.store.keys()
self.total = len(self.keys)
def __del__(self):
if self.store is not None:
self.store.close()
def segments(self):
"""Returns generator object to iterate over segments.
"""
if self.is_train:
for i in range(self.total):
start = i * self.segment_size
stop = (i + 1) * self.segment_size
# read one segment of data from .hdf store
data = | pd.read_hdf(self.path_to_store, start=start, stop=stop) | pandas.read_hdf |
import time
from datetime import datetime, timedelta
import cufflinks.datagen as cfdg
import pandas
import ujson
from sklearn.datasets import make_classification
from tornado_sqlalchemy_login.utils import construct_path, safe_post
from ..enums import CompetitionMetric, CompetitionType, DatasetFormat
from ..types.competition import CompetitionSpec
def classify1(host, cookies=None, proxies=None):
dataset = make_classification()
competition = CompetitionSpec(
title="Classify this dataset",
type=CompetitionType.CLASSIFY,
expiration=datetime.now() + timedelta(minutes=1),
prize=1.0,
dataset= | pandas.DataFrame(dataset[0]) | pandas.DataFrame |
from gnn_benchmark.common.run_db import RunState
import collections
from gnn_benchmark.common.utils import run_entries_to_df, confidence_interval_95
import seaborn as sns
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import lines
import functools
import copy
class Analysis:
task_col = "run_definition.task_name"
time_col = "results.duration"
mem_usage_col = "results.gpu_mem_usage"
def __init__(self, runs_db, metric_col, metric_comp="max"):
self.runs_db = runs_db
self.metric_col = metric_col
assert metric_comp in ["max", "min"]
self.metric_comp = metric_comp
@functools.lru_cache()
def _read_runs(self):
n_runs = self.runs_db.n_runs()
n_finished = self.runs_db.n_runs(RunState.finished)
if n_runs > n_finished:
print(f"\n\nNot all runs finished! "
f"Currently, {n_finished}/{n_runs} are finished ({(100 * n_finished) // n_runs}%)\n\n")
runs_df = run_entries_to_df(self.runs_db.find_finished(), replace_none="None")
return runs_df
def _best_run_indices(self, runs_df, compare_col):
"""Computes the indices of the best runs for the interesting parameter"""
best_indices = []
model_names = runs_df[compare_col].unique()
op = "idxmax" if self.metric_comp == "max" else "idxmin"
for m in model_names:
best_indices.append(
getattr(runs_df[runs_df[compare_col] == m][self.metric_col], op)()
)
return best_indices
def _get_param_of_best_run(self, compare_col, param):
cmp = self.best_runs_df(compare_col)
cmp = cmp.reset_index(level=[1])
tasks = cmp.index.unique()
evaluation_results = {}
for d in tasks:
best_run_rows = cmp[cmp.index == d]
best_run_rows = best_run_rows.set_index(
compare_col, drop=True
)
evaluation_results[d] = best_run_rows[param]
best_summarized = pd.concat(evaluation_results, axis=1)
return best_summarized
def best_results_df(self, compare_col):
"""Gives a high-level overview dataframe containing the performances of the compare_col x the tasks"""
return self._get_param_of_best_run(compare_col, self.metric_col)
def runtimes_df(self, compare_col):
"""Gives a high-level overview dataframe containing the runtimes of the best compare_col x the tasks"""
return self._get_param_of_best_run(compare_col, self.time_col)
def mem_usage_df(self, compare_col):
"""Gives a high-level overview dataframe containing the memory usage of the best compare_col x the tasks"""
return self._get_param_of_best_run(compare_col, self.mem_usage_col) // 1024 // 1024
def best_runs_df(self, compare_col):
"""Returns, for every task/compare_col combination, the best run and its results"""
runs_df = self._read_runs()
tasks = runs_df[self.task_col].unique()
best_hparams = {}
for d in tasks:
best_run_idxes = self._best_run_indices(runs_df[runs_df[self.task_col] == d], compare_col)
best_run_rows = runs_df.loc[best_run_idxes]
best_run_rows = best_run_rows.set_index(
compare_col, drop=True
)
best_hparams[d] = best_run_rows
best_hparams = pd.concat(best_hparams, axis=0)
return best_hparams
def human_readable(self, df):
def edit_string(s):
if s is None:
return s
s = s.replace("run_definition.", "")
s = s.replace("results.", "")
s = s.replace("_metrics.", ".")
return s
df = copy.deepcopy(df)
columns = df.columns
if isinstance(columns, pd.MultiIndex):
for i, level_names in enumerate(columns.levels):
new_names = [edit_string(n) for n in level_names]
columns = columns.set_levels(new_names, level=i)
else:
columns = columns.to_list()
for i, c in enumerate(columns):
c = edit_string(c)
columns[i] = c
df.columns = columns
df.index.name = edit_string(df.index.name)
return df
def ranking_df(self, compare_col):
best_summarized = self.best_results_df(compare_col)
finished_cols = best_summarized.columns[(~pd.isna(best_summarized).any(axis=0)).values.nonzero()]
ranking = best_summarized[finished_cols].rank(ascending=self.metric_comp == "min")
mean_ranking = ranking.mean(axis=1)
ranking["total"] = mean_ranking
return ranking
def relative_performance(self, compare_col):
best_summarized = self.best_results_df(compare_col)
if self.metric_comp == "max":
max_performances = best_summarized.max(axis=0)
else:
max_performances = best_summarized.min(axis=0)
relative_performances = best_summarized / max_performances
mean_relative_performance = relative_performances.mean(axis=1)
relative_performances["mean"] = mean_relative_performance
return relative_performances
def _plot_overfitting_task(self, df, compare_col, metric_x, metric_y, ax=None, jitter_x=0., jitter_y=0.,
same_scale=False):
if ax is None:
fig, ax = plt.subplots(1, 1)
x = np.array(df[metric_x])
x = x + np.random.normal(0, jitter_x, x.shape)
y = np.array(df[metric_y])
y = y + np.random.normal(0, jitter_y, y.shape)
hue = df[compare_col]
ax = sns.scatterplot(x=x, y=y, hue=hue,
alpha=0.5, ax=ax)
ax.set_xlabel(metric_x)
ax.set_ylabel(metric_y)
if same_scale:
lims = list(zip(ax.get_xlim(), ax.get_ylim()))
newlims = min(lims[0]), max(lims[1])
diagonal = lines.Line2D(newlims, newlims, c=(0, 0, 0, 0.1))
ax.add_line(diagonal)
ax.set_xlim(newlims)
ax.set_ylim(newlims)
# Setting equal size
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
asp = abs((xmax - xmin) / (ymax - ymin))
ax.set_aspect(asp)
return ax
def overfitting_fig(self, compare_col, metric_x, metric_y, jitter_x=0., jitter_y=0., same_scale=False):
df = self._read_runs()
tasks = df[self.task_col].unique()
ntasks = len(tasks)
if ntasks <= 3:
ncols = ntasks
nrows = 1
elif ntasks <= 6:
ncols = 3
nrows = 2
else:
nrows = int(np.ceil((len(tasks) / 1.5)**0.5))
ncols = int(np.ceil(nrows * 1.5)) - 1
fig, axes = plt.subplots(nrows, ncols, squeeze=False)
for ax, t in zip(axes.flatten(), tasks):
self._plot_overfitting_task(
df[df[self.task_col] == t], compare_col, metric_x, metric_y, ax=ax, jitter_x=jitter_x,
jitter_y=jitter_y, same_scale=same_scale
)
ax.set_title(t)
handles, labels = axes[0, 0].get_legend_handles_labels()
fig.legend(handles, labels, loc='lower right')
[ax.get_legend().remove() for ax in axes.flatten() if ax.get_legend() is not None]
return fig
def print_default_analysis(self, interesting_col, metric_col):
best_results_df = self.human_readable(self.best_results_df(interesting_col))
best_runs_df = self.human_readable(self.best_runs_df(interesting_col))
ranking = self.human_readable(self.ranking_df(interesting_col))
overfitting_fig = self.overfitting_fig(
compare_col=interesting_col,
metric_x=metric_col.replace("test_metrics", "train_metrics"),
metric_y=metric_col,
same_scale=True
)
relative = self.human_readable(self.relative_performance(interesting_col))
runtimes = self.runtimes_df(interesting_col)
mem_usage = self.human_readable(self.mem_usage_df(interesting_col))
with pd.option_context("display.width", 0):
print("run summary")
print(best_results_df)
print("\n\nconfigs of the best runs")
print(best_runs_df)
print("\n\nranking")
print(ranking)
print("\n\nrelative performance")
print(relative)
print("\n\nruntimes (s)")
print(runtimes)
print("\n\nGPU mem_usage (MB)")
print(mem_usage)
plt.show()
class FoldedAnalysis(Analysis):
# TODO: Print out confidences in the overview evaluation
fold_idx_col = "run_definition.fold_idx"
def _unique_hparams(self, df):
run_def_cols = [
c for c in df.columns if c.startswith("run_definition.")
and c != self.fold_idx_col
]
filtered_hparam_columns = []
for h in run_def_cols:
if isinstance(df[h].iloc[0], collections.abc.Hashable):
if len(df[h].unique()) > 1:
filtered_hparam_columns.append(h)
else:
if len(df[h].transform(tuple).unique()) > 1:
filtered_hparam_columns.append(h)
return filtered_hparam_columns
def _create_hparam_hash(self, df, to_keep=None):
# creates a fake "hyperparameter hash" that uniquely defines hparams. This allows us to find all related folds
# we look for all columns in which there are runs that differ, to later build a string representation (for each run)
# of which hyperparameter they differ in.
to_keep = to_keep or set()
filtered_hparam_columns = self._unique_hparams(df)
filtered_hparam_columns = list(set(filtered_hparam_columns).union(set(to_keep)))
return ["|".join(v) for v in df[filtered_hparam_columns].astype(str).values]
def _statistics_by_fold(self, runs_df, to_keep=None):
to_keep = to_keep or []
metrics = [c for c in runs_df.columns if c.startswith("results.")]
run_parameters = [c for c in runs_df.columns if c.startswith("run_definition.")]
def create_new_run(cur_run, agg_vals, extracted_runs):
concats = pd.concat(agg_vals, axis=1).T
mean_dict = concats.mean().to_dict()
std_dict = concats.agg(confidence_interval_95).to_dict()
conf_dict = {k + ".conf": v for k, v in std_dict.items() if np.isfinite(v)}
extracted_runs.append({**cur_run, **mean_dict, **conf_dict})
extracted_runs = []
runs_df["hparam_config"] = self._create_hparam_hash(
runs_df, to_keep=to_keep
)
runs_df = runs_df.sort_values(by="hparam_config")
cur_run = None
agg_vals = []
cur_hparam_config = None
for (_, row), (_, metrics_row) in zip(runs_df.iterrows(), runs_df[metrics].iterrows()):
if cur_hparam_config is None or cur_hparam_config != row["hparam_config"]:
if cur_hparam_config is not None:
create_new_run(cur_run, agg_vals, extracted_runs)
cur_run = row[run_parameters].to_dict()
cur_hparam_config = row["hparam_config"]
agg_vals = []
agg_vals.append(metrics_row)
create_new_run(cur_run, agg_vals, extracted_runs)
return pd.DataFrame(extracted_runs)
@functools.lru_cache()
def best_runs_df(self, compare_col):
"""Returns, for every task/compare_col combination, the best run and its results"""
runs_df = self._read_runs()
runs_df = self._statistics_by_fold(runs_df, to_keep=[compare_col])
tasks = runs_df[self.task_col].unique()
best_hparams = {}
for d in tasks:
best_run_idxes = self._best_run_indices(runs_df[runs_df[self.task_col] == d], compare_col)
best_run_rows = runs_df.loc[best_run_idxes]
best_run_rows = best_run_rows.set_index(
compare_col, drop=True
)
best_hparams[d] = best_run_rows
best_hparams = pd.concat(best_hparams, axis=0)
return best_hparams
def best_results_df(self, compare_col, return_conf=False):
"""Gives a high-level overview dataframe containing the performances of the compare_col x the tasks"""
if return_conf:
return self._get_param_of_best_run(compare_col, [self.metric_col, self.metric_col + ".conf"])
else:
return self._get_param_of_best_run(compare_col, self.metric_col)
def print_default_analysis(self, interesting_col, metric_col):
best_results_df = self.human_readable(self.best_results_df(interesting_col, return_conf=True))
best_runs_df = self.human_readable(self.best_runs_df(interesting_col))
ranking = self.human_readable(self.ranking_df(interesting_col))
overfitting_fig = self.overfitting_fig(
compare_col=interesting_col,
metric_x=metric_col.replace("test_metrics", "train_metrics"),
metric_y=metric_col,
same_scale=True
)
relative = self.human_readable(self.relative_performance(interesting_col))
runtimes = self.runtimes_df(interesting_col)
mem_usage = self.human_readable(self.mem_usage_df(interesting_col))
with | pd.option_context("display.width", 0) | pandas.option_context |
"""
Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
"""
from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
SemiMonthBegin,
SemiMonthEnd,
)
from pandas import (
DatetimeIndex,
Series,
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask="Mon Wed Fri")
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthEnd>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, d, expected = case
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
| CBMonthEnd() | pandas._libs.tslibs.offsets.CBMonthEnd |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 19 18:28:28 2021
"""
class Crystal_input:
# This creates a crystal_input object
def __init__(self):
# Initialise the object
pass
def from_blocks(self, geom_block, bs_block, func_block, scf_block, title = 'Input generated by crystal_functions'):
# Build the input from blocks
# All the blocks are list of strings or lists
self.geom_block = geom_block
self.bs_block = bs_block
self.func_block = func_block
self.scf_block = scf_block
self.title = [title+'\n']
if 'BASISSET\n' in self.bs_block:
self.is_basisset = True
return self
def from_file(self, input_name):
# input_name: name of the input file
import sys
self.name = input_name
try:
if input_name[-3:] != 'd12':
input_name = input_name+'.d12'
file = open(input_name, 'r')
data = file.readlines()
file.close()
except:
print('EXITING: a .d12 file needs to be specified')
sys.exit(1)
# The first line is the title
self.title = [data[0]]
# Check is the basis set is a built in one
if 'BASISSET\n' in data:
end_index = []
if 'OPTGEOM\n' in data:
end_index = [i for i, s in enumerate(data) if 'END' in s]
end_index.insert(1, data.index('BASISSET\n')+1)
else:
end_index.append(data.index('BASISSET\n')-1)
end_index.append(data.index('BASISSET\n')+1)
end_index.extend([i for i, s in enumerate(
data[end_index[1]:]) if 'END' in s])
self.is_basisset = True
else:
end_index = [i for i, s in enumerate(data) if 'END' in s]
self.is_basisset = False
self.geom_block = []
self.bs_block = []
self.func_block = []
self.scf_block = []
if len(end_index) == 4:
self.geom_block = data[1:end_index[0]+1]
self.bs_block = data[end_index[0]+1:end_index[1]+1]
self.func_block = data[end_index[1]+1:end_index[2]+1]
for i in range(end_index[2]+1, end_index[-1]):
if data[i+1][0].isnumeric():
self.scf_block.append([data[i], data[i+1]])
else:
if data[i][0].isalpha():
self.scf_block.append(data[i])
else:
pass
# The loop cannot go over the last element
self.scf_block.append('ENDSCF\n')
elif len(end_index) == 5:
self.geom_block = data[:end_index[1]+1]
self.bs_block = data[end_index[1]+1:end_index[2]+1]
self.func_block = data[end_index[2]+1:end_index[3]+1]
for i in range(end_index[3]+1, end_index[-1]):
if data[i+1][0].isnumeric():
self.scf_block.append([data[i], data[i+1]])
else:
if data[i][0].isalpha():
self.scf_block.append(data[i])
else:
pass
# The loop cannot go over the last element
self.scf_block.append('ENDSCF\n')
return self
def add_ghost(self, ghost_atoms):
# Add ghost functions to the input object
if self.is_basisset == True:
self.bs_block.append('GHOSTS\n')
self.bs_block.append('%s\n' % len(ghost_atoms))
self.bs_block.append(' '.join([str(x) for x in ghost_atoms])+'\n')
else:
self.bs_block.insert(-1, 'GHOSTS\n')
self.bs_block.insert(-1, '%s\n' % len(ghost_atoms))
self.bs_block.insert(-1, ' '.join([str(x)
for x in ghost_atoms])+'\n')
def add_guessp(self):
# Add the GUESSP keyword functions to the input object
self.scf_block.insert(-1,'GUESSP\n')
def remove_ghost(self):
# Remove ghost functions from the input object
if 'GHOST\n' in self.bs_block:
del self.bs_block[-3:-1]
def opt_to_sp(self):
# Make an optgeom calculation into single_point
if 'OPTGEOM\n' in self.geom_block:
init = self.geom_block.index('OPTGEOM\n')
final = self.geom_block.index('END\n')
del self.geom_block[init:final+1]
def sp_to_opt(self,opt_options = []):
# Make a single point calculation into an optgeom
if 'OPTGEOM\n' not in self.geom_block:
if self.is_basisset == True:
self.geom_block.append('OPTGEOM\n')
self.geom_block.extend(opt_options)
self.geom_block.append('END\n')
else:
self.geom_block.insert(-1, 'OPTGEOM\n')
for option in opt_options:
self.geom_block.insert(-1,option)
self.geom_block.insert(-1, 'END\n')
def print_input(self):
# Print the whole input file
if len(self.__dict__) > 0:
blocks = [self.geom_block, self.bs_block, self.func_block, self.scf_block]
if self.title is not None:
print(self.title[0][:-1])
else:
print('crystal_functions generated input')
for block in blocks:
for line in block:
if type(line) == list:
print(line[0][:-1])
print(line[1][:-1])
else:
print(line[:-1])
else:
print('The input is initialised, but the blocks were not defined')
class Crystal_output:
# This class reads a CRYSTAL output and generates an object
def __init__(self):
# Initialise the Crystal_output
pass
def read_cry_output(self,output_name):
# output_name: name of the output file
import sys
import re
self.name = output_name
# Check if the file exists
try:
if output_name[-3:] != 'out' and output_name[-4:] != 'outp':
output_name = output_name+'.out'
file = open(output_name, 'r')
self.data = file.readlines()
file.close()
except:
print('EXITING: a .out file needs to be specified')
sys.exit(1)
# Check the calculation converged
self.converged = False
for i, line in enumerate(self.data[::-1]):
if re.match(r'^ EEEEEEEEEE TERMINATION', line):
self.converged = True
# This is the end of output
self.eoo = len(self.data)-1-i
break
if self.converged == False:
self.eoo = len(self.data)
return self
def get_dimensionality(self):
# Get the dimsensionality of the system
import re
for line in self.data:
if re.match(r'\sGEOMETRY FOR WAVE FUNCTION - DIMENSIONALITY OF THE SYSTEM', line) != None:
self.dimensionality = int(line.split()[9])
return self.dimensionality
def get_final_energy(self):
# Get the final energy of the system
import re
self.final_energy = None
for line in self.data[self.eoo::-1]:
if re.match(r'\s\W OPT END - CONVERGED', line) != None:
self.final_energy = float(line.split()[7])*27.2114
elif re.match(r'^ == SCF ENDED', line) != None:
self.final_energy = float(line.split()[8])*27.2114
if self.final_energy == None:
print('WARNING: no final energy found in the output file. energy = None')
return self.final_energy
def get_scf_convergence(self, all_cycles=False):
# Returns the scf convergence energy and energy difference
# all_cycles == True returns all the steps for a geometry opt
import re
import numpy as np
self.scf_energy = []
self.scf_deltae = []
scf_energy = []
scf_deltae = []
for line in self.data:
if re.match(r'^ CYC ', line):
scf_energy.append(float(line.split()[3]))
scf_deltae.append(float(line.split()[5]))
if re.match(r'^ == SCF ENDED - CONVERGENCE ON ENERGY', line):
if all_cycles == False:
self.scf_energy = np.array(scf_energy)*27.2114
self.scf_deltae = np.array(scf_deltae)*27.2114
return self.scf_energy, self.scf_deltae
elif all_cycles == True:
self.scf_energy.append(scf_energy)
self.scf_deltae.append(scf_deltae)
scf_energy = []
scf_deltae = []
self.scf_convergence = [self.scf_energy, self.scf_deltae]
return self.scf_convergence
def get_opt_convergence_energy(self):
# Returns the energy for each opt step
self.opt_energy = []
for line in self.data:
if re.match(r'^ == SCF ENDED - CONVERGENCE ON ENERGY', line):
self.opt_energy.append(float(line.split()[8])*27.2114)
return self.opt_energy
def get_num_cycles(self):
# Returns the number of scf cycles
import re
for line in self.data[::-1]:
if re.match(r'^ CYC ', line):
self.num_cycles = int(line.split()[1])
return self.num_cycles
return None
def get_fermi_energy(self):
# Returns the system Fermi energy
import re
self.fermi_energy = None
for i, line in enumerate(self.data[len(self.data)::-1]):
# This is in case the .out is from a BAND calculation
if re.match(r'^ TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT BAND', self.data[len(self.data)-(i+4)]) != None:
for j, line1 in enumerate(self.data[len(self.data)-i::-1]):
if re.match(r'^ ENERGY RANGE ', line1):
self.fermi_energy = float(line1.split()[7])*27.2114
# Define from what type of calcualtion the Fermi energy was exctracted
self.efermi_from = 'band'
break
# This is in case the .out is from a DOSS calculation
if re.match(r'^ TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT DOSS', self.data[len(self.data)-(i+4)]) != None:
for j, line1 in enumerate(self.data[len(self.data)-i::-1]):
if re.match(r'^ N. OF SCF CYCLES ', line1):
self.fermi_energy = float(line1.split()[7])*27.2114
# Define from what type of calcualtion the Fermi energy was exctracted
self.efermi_from = 'doss'
break
# This is in case the .out is from a sp/optgeom calculation
# For non metals think about top valence band
else:
for j, line1 in enumerate(self.data[:i:-1]):
if re.match(r'^ FERMI ENERGY:', line1) != None:
self.fermi_energy = float(line1.split()[2])*27.2114
self.efermi_from = 'scf'
break
if re.match(r'^ POSSIBLY CONDUCTING STATE - EFERMI', line1) != None:
self.fermi_energy = float(line1.split()[5]) * 27.2114
self.efermi_from = 'scf'
break
if self.fermi_energy == None:
for j, line1 in enumerate(self.data[:i:-1]):
if re.match(r'^ TOP OF VALENCE BANDS', line1) != None:
self.fermi_energy = float(
line1.split()[10])*27.2114
self.efermi_from = 'scf_top_valence'
break
if self.fermi_energy == None:
print('WARNING: no Fermi energy found in the output file. efermi = None')
return self.fermi_energy
def get_primitive_lattice(self, initial=True):
# Returns the pritive lattice of the system
# Initial == False: read the last lattice vectors. Useful in case of optgeom
import re
import numpy as np
lattice = []
self.primitive_lattice = None
if initial == True:
for i, line in enumerate(self.data):
if re.match(r'^ DIRECT LATTICE VECTORS CARTESIAN', line):
for j in range(i+2, i+5):
lattice_line = [float(n) for n in self.data[j].split()]
lattice.append(lattice_line)
self.primitive_lattice = np.array(lattice)
break
elif initial == False:
for i, line in enumerate(self.data[::-1]):
if re.match(r'^ DIRECT LATTICE VECTORS CARTESIAN', line):
for j in range(len(self.data)-i+1, len(self.data)-i+4):
lattice_line = [float(n) for n in self.data[j].split()]
lattice.append(lattice_line)
self.primitive_lattice = np.array(lattice)
break
if lattice == []:
print('WARNING: no lattice vectors found in the output file. lattice = []')
return self.primitive_lattice
def get_reciprocal_lattice(self, initial=True):
# Returns the reciprocal pritive lattice of the system
# Initial == False: read the last reciprocal lattice vectors. Useful in case of optgeom
import re
import numpy as np
lattice = []
if initial == True:
for i, line in enumerate(self.data):
if re.match(r'^ DIRECT LATTICE VECTORS COMPON. \(A.U.\)', line):
for j in range(i+2, i+5):
lattice_line = [
float(n)/0.52917721067121 for n in self.data[j].split()[3:]]
lattice.append(lattice_line)
self.reciprocal_lattice = np.array(lattice)
return self.reciprocal_lattice
elif initial == False:
for i, line in enumerate(self.data[::-1]):
if re.match(r'^ DIRECT LATTICE VECTORS COMPON. \(A.U.\)', line):
for j in range(len(self.data)-i+1, len(self.data)-i+4):
lattice_line = [
float(n)/0.52917721067121 for n in self.data[j].split()[3:]]
lattice.append(lattice_line)
self.reciprocal_lattice = np.array(lattice)
return self.reciprocal_lattice
return None
def get_band_gap(self):
# Returns the system band gap
import re
import numpy as np
# Check if the system is spin polarised
self.spin_pol = False
for line in self.data:
if re.match(r'^ SPIN POLARIZED', line):
self.spin_pol = True
break
for i, line in enumerate(self.data[len(self.data)::-1]):
if self.spin_pol == False:
if re.match(r'^\s\w+\s\w+ BAND GAP', line):
self.band_gap = float(line.split()[4])
return self.band_gap
elif re.match(r'^\s\w+ ENERGY BAND GAP', line):
self.band_gap = float(line.split()[4])
return self.band_gap
elif re.match(r'^ POSSIBLY CONDUCTING STATE', line):
self.band_gap = False
return self.band_gap
else:
# This might need some more work
band_gap_spin = []
if re.match(r'\s+ BETA \s+ ELECTRONS', line):
band_gap_spin.append(
float(self.data[len(self.data)-i-3].split()[4]))
band_gap_spin.append(
float(self.data[len(self.data)-i+3].split()[4]))
self.band_gap = np.array(band_gap_spin)
return self.band_gap
if band_gap_spin == []:
print(
'DEV WARNING: check this output and the band gap function in file_readwrite')
def get_last_geom(self, write_gui_file=True, symm_info='pymatgen'):
# Return the last optimised geometry
# write_gui_file == True writes the last geometry to the gui file
# symm_info == 'pymatgen' uses the symmetry info from a pymatgen object
# otherwise it is taken from the existing gui file
import re
from mendeleev import element
import numpy as np
import sys
from pymatgen.core.structure import Structure, Molecule
from crystal_functions.convert import cry_pmg2gui
dimensionality = self.get_dimensionality()
# Check if the geometry optimisation converged
self.opt_converged = False
for line in self.data:
if re.match(r'^ FINAL OPTIMIZED GEOMETRY', line):
self.opt_converged = True
break
# Find the last geometry
for i, line in enumerate(self.data):
if re.match(r' TRANSFORMATION MATRIX PRIMITIVE-CRYSTALLOGRAPHIC CELL', line):
trans_matrix_flat = [float(x) for x in self.data[i+1].split()]
self.trans_matrix = []
for i in range(0, len(trans_matrix_flat), 3):
self.trans_matrix.append(trans_matrix_flat[i:i+3])
self.trans_matrix = np.array(self.trans_matrix)
for i, line in enumerate(self.data[len(self.data)::-1]):
if re.match(r'^ T = ATOM BELONGING TO THE ASYMMETRIC UNIT', line):
self.n_atoms = int(self.data[len(self.data)-i-3].split()[0])
self.atom_positions = []
self.atom_symbols = []
self.atom_numbers = []
for j in range(self.n_atoms):
atom_line = self.data[len(
self.data)-i-2-int(self.n_atoms)+j].split()[3:]
self.atom_symbols.append(str(atom_line[0]))
self.atom_positions.append(
[float(x) for x in atom_line[1:]]) # These are fractional
for atom in self.atom_symbols:
self.atom_numbers.append(
element(atom.capitalize()).atomic_number)
self.atom_positions_cart = np.array(self.atom_positions)
if dimensionality > 0:
lattice = self.get_primitive_lattice(initial=False)
else:
min_max = max( [
(max(self.atom_positions_cart[:,0]) - min(self.atom_positions_cart[:,0])),
(max(self.atom_positions_cart[:,1]) - min(self.atom_positions_cart[:,1])),
(max(self.atom_positions_cart[:,2]) - min(self.atom_positions_cart[:,2]))
] )
lattice = np.identity(3)*(min_max+10)
if dimensionality > 0:
self.atom_positions_cart[:dimensionality] = np.matmul(
np.array(self.atom_positions)[:,:dimensionality], lattice[:dimensionality,:dimensionality])
self.cart_coords = []
for i in range(len(self.atom_numbers)):
self.cart_coords.append([self.atom_numbers[i], self.atom_positions_cart[i]
[0], self.atom_positions_cart[i][1], self.atom_positions_cart[i][2]])
self.cart_coords = np.array(self.cart_coords)
if dimensionality > 0:
lattice = self.get_primitive_lattice(initial=False)
else:
min_max = max( [
(max(self.cart_coords[:,0]) - min(self.cart_coords[:,0])),
(max(self.cart_coords[:,1]) - min(self.cart_coords[:,1])),
(max(self.cart_coords[:,2]) - min(self.cart_coords[:,2]))
] )
lattice = np.identity(3)*(min_max+10)
# Write the gui file
if write_gui_file == True:
# Write the gui file
# This is a duplication from write_gui, but the input is different
# It requires both the output and gui files with the same name and in the same directory
if symm_info == 'pymatgen':
if self.name[-3:] == 'out':
gui_file = self.name[:-4]+'.gui'
elif self.name[-4:] == 'outp':
gui_file = self.name[:-5]+'.gui'
else:
gui_file = self.name+'.gui'
structure = Structure(lattice, self.atom_numbers,
self.atom_positions_cart, coords_are_cartesian=True)
gui_object = cry_pmg2gui(structure)
write_crystal_gui(gui_file, gui_object)
else:
gui_file = symm_info
try:
file = open(gui_file, 'r')
gui_data = file.readlines()
file.close()
except:
print(
'EXITING: a .gui file with the same name as the input need to be present in the directory.')
sys.exit(1)
# Replace the lattice vectors with the optimised ones
for i, vector in enumerate(lattice.tolist()):
gui_data[i+1] = ' '.join([str(x)
for x in vector])+'\n'
n_symmops = int(gui_data[4])
for i in range(len(self.atom_numbers)):
gui_data[i+n_symmops*4+6] = '{} {}\n'.format(
self.atom_numbers[i], ' '.join(str(x) for x in self.atom_positions_cart[i][:]))
with open(gui_file[:-4]+'_last.gui', 'w') as file:
for line in gui_data:
file.writelines(line)
self.last_geom = [lattice.tolist(
), self.atom_numbers, self.atom_positions_cart.tolist()]
return self.last_geom
def get_symm_ops(self):
# Return the symmetry operators
import re
import numpy as np
symmops = []
for i, line in enumerate(self.data):
if re.match(r'^ \*\*\*\* \d+ SYMMOPS - TRANSLATORS IN FRACTIONAL UNITS', line):
self.n_symm_ops = int(line.split()[1])
for j in range(0, self.n_symm_ops):
symmops.append([float(x)
for x in self.data[i+3+j].split()[2:]])
self.symm_ops = np.array(symmops)
return self.symm_ops
def get_forces(self, initial=False, grad=False):
# Return the forces from an optgeom calculation
# initial == False returns the last calculated forces
# grad == False does not return the gradient on atoms
if ' OPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPTOPT\n' not in self.data:
print('WARNING: this is not a geometry optimisation.')
return None
else:
import re
import numpy as np
self.forces_atoms = []
self.forces_cell = []
# Number of atoms
for i, line in enumerate(self.data[len(self.data)::-1]):
if re.match(r'^ T = ATOM BELONGING TO THE ASYMMETRIC UNIT', line):
self.n_atoms = int(self.data[len(self.data)-i-3].split()[0])
break
if grad == True:
self.grad = []
self.rms_grad = []
self.disp = []
self.rms_disp = []
for i, line in enumerate(self.data):
if re.match(r'^ MAX GRADIENT', line):
self.grad.append(line.split()[2])
if re.match(r'^ RMS GRADIENT', line):
self.rms_grad.append(line.split()[2])
if re.match(r'^ MAX DISPLAC.', line):
self.disp.append(line.split()[2])
if re.match(r'^ RMS DISPLAC.', line):
self.rms_disp.append(line.split()[2])
if initial == True:
for i, line in enumerate(self.data):
if re.match(r'^ CARTESIAN FORCES IN HARTREE/BOHR \(ANALYTICAL\)', line):
for j in range(i+2, i+2+self.n_atoms):
self.forces_atoms.append(
[float(x) for x in self.data[j].split()[2:]])
self.forces_atoms = np.array(self.forces_atoms)
if re.match(r'^ GRADIENT WITH RESPECT TO THE CELL PARAMETER IN HARTREE/BOHR', line):
for j in range(i+4, i+7):
self.forces_cell.append(
[float(x) for x in self.data[j].split()])
self.forces_cell = np.array(self.forces_cell)
self.forces = [self.forces_cell, self.forces_atoms]
return self.forces
elif initial == False:
for i, line in enumerate(self.data[::-1]):
if re.match(r'^ GRADIENT WITH RESPECT TO THE CELL PARAMETER IN HARTREE/BOHR', line):
for j in range(len(self.data)-i+3, len(self.data)-i+6):
self.forces_cell.append(
[float(x) for x in self.data[j].split()])
self.forces_cell = np.array(self.forces_cell)
if re.match(r'^ CARTESIAN FORCES IN HARTREE/BOHR \(ANALYTICAL\)', line):
for j in range(len(self.data)-i+1, len(self.data)-i+1+self.n_atoms):
self.forces_atoms.append(
[float(x) for x in self.data[j].split()[2:]])
self.forces_atoms = np.array(self.forces_atoms)
self.forces = [self.forces_cell, self.forces_atoms]
return self.forces
def get_mulliken_charges(self):
# Return the Mulliken charges (PPAN keyword in input)
import re
self.mulliken_charges = []
for i, line in enumerate(self.data):
if re.match(r'^ MULLIKEN POPULATION ANALYSIS', line):
for j in range(len(self.data[i:])):
line1 = self.data[i+4+j].split()
if line1 == []:
return self.mulliken_charges
elif line1[0].isdigit() == True:
self.mulliken_charges.append(float(line1[3]))
return self.mulliken_charges
def get_config_analysis(self):
# Return the configuration analysis for solid solutions (CONFCON keyword in input)
import re
import numpy as np
# Check this is a configuration analysis calculation
try:
begin = self.data.index(
' CONFIGURATION ANALYSIS\n')
except:
return "WARNING: this is not a CONFCNT analysis."
for i, line in enumerate(self.data[begin:]):
if re.match(r'^ COMPOSITION', line):
self.n_classes = line.split()[9]
original_atom = str(line.split()[2])
begin = begin+i
config_list = []
# Read all the configurations
for line in self.data[begin:]:
if not re.match(r'^ WARNING', line):
config_list.extend(line.split())
config_list = np.array(config_list)
warning = np.where(config_list == 'WARNING')
config_list = np.delete(config_list, warning)
atom1_begin = np.where(config_list == original_atom)[0]
atom1_end = np.where(
config_list == '------------------------------------------------------------------------------')[0]
atom2_begin = np.where(config_list == 'XX')[0]
atom2_end = np.where(
config_list == '<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>')[0]
end = np.where(
config_list == '===============================================================================')[0][-1]
atom2_end = np.append(atom2_end, end)
atom_type1 = []
atom_type2 = []
config_list = config_list.tolist()
for i in range(len(atom1_end)):
atom_type1.append(
[int(x) for x in config_list[atom1_begin[i+1]+1:atom1_end[i]]])
atom_type2.append(
[int(x) for x in config_list[atom2_begin[i]+1:atom2_end[i]]])
self.atom_type1 = atom_type1
self.atom_type2 = atom_type2
return [self.atom_type1, self.atom_type2]
class Properties_input:
# This creates a properties_input object
def __init__(self, input_name=None):
#Initialise the object
self.is_newk = False
def from_file(self, input_name):
# input_name is the path to an existing properties input
import sys
self.name = input_name
if input_name is not None:
try:
if input_name[-3:] != 'd12':
input_name = input_name+'.d12'
file = open(input_name, 'r')
self.data = file.readlines()
file.close()
except:
print('EXITING: a .d3 file needs to be specified')
sys.exit(1)
# Check if NEWK is in the input
if 'NEWK\n' in self.data:
self.is_newk = True
self.newk_block = self.data[0:2]
self.property_block = self.data[2:]
else:
self.is_newk = False
self.property_block = self.data
return self
def make_newk_block(self, shrink1, shrink2, Fermi=1, print_option=0):
# Returns the newk block
# shrink1 and shrink 2 are the newk shrinking factors
# Fermi: 1 if recalculated, 0 if not
# print_options: Properties printing options
self.is_newk = True
self.newk_block = ['NEWK\n', '%s %s\n' % (shrink1, shrink2),
'%s %s\n' % (Fermi, print_option)]
def make_bands_block(self, k_path, n_kpoints, first_band, last_band, print_eig=0, print_option=1,
title='BAND STRUCTURE CALCULATION'):
# Return the bands block to be used in a bands calculation
# k_path can be:
# list of list
# pymatgen HighSymmKpath object
# first_band: first band for bands calculation
# last_band: last band for bands calculation
# print_eig: printing options for eigenvalues
# print_option: properties printing options
import numpy as np
import sys
bands_block = []
# path from a pymatgen k_path object
if 'HighSymmKpath' in str(type(k_path)):
k_path_flat = [item for sublist in k_path.kpath['path']
for item in sublist]
k_path_pmg = []
for i in k_path_flat:
# This is a pmg HighSymmKpath object
k_path_pmg.append(k_path.kpath['kpoints'][i].tolist())
k_path = np.array(k_path_pmg)
elif type(k_path[0]) == list:
# This is a list of lists
k_path = np.array(k_path)
else:
print('EXITING: k_path type must be a list of list (k coordinates) or\
a pymatgen HighSymmKpath object. %s selected' % type(k_path))
sys.exit(1)
k_unique = np.unique(k_path)
# Find the shrinking factor
k_unique = np.array(np.around(k_unique, 4)*10000, dtype=int)
if len(k_unique) > 2:
gcd = np.gcd.reduce(k_unique)
else:
gcd = np.gcd(k_unique[0], k_unique[1])
k_path = np.array((k_path/gcd)*10000, dtype=int)
shrink = int(10000/gcd)
bands_block.append('BAND\n')
bands_block.append(title+'\n')
bands_block.append(str(len(k_path)-1)+' '+str(shrink)+' '+str(n_kpoints) +
' '+str(first_band)+' '+str(last_band)+' ' +
str(print_option)+' '+str(print_eig)+'\n')
# Add the symmetry lines
for i in range(len(k_path[:-1])):
bands_block.append(' '.join([str(x) for x in k_path[i]])+' ' +
' '.join([str(x) for x in k_path[i+1]])+'\n')
bands_block.append('END\n')
self.property_block = bands_block
return self
def make_doss_block(self, n_points=200, band_range=None, e_range=None, plotting_option=2,
poly=12, print_option=1):
# Return the doss block to be used in a doss calculation
# n_points : number of points in the energy range
# band range: which bands to include in the doss calculation
# e_range: in eV
# plotting_options: properties printing options
# poly: maximum exponent for the polynomial fit
# print_option: properties printing options
import sys
# either band_range or e_range needs to be specified
doss_block = []
if band_range == None and e_range == None:
print('EXITING: please specify either band_range or e_range. None selected')
sys.exit(1)
elif band_range != None and e_range != None:
print('EXITING: please specify either band_range or e_range. Both selected')
sys.exit(1)
elif type(band_range) == list and len(band_range) == 2:
doss_range = band_range
elif type(e_range) == list and len(e_range) == 2:
doss_range = [-1, -1]
else:
print('EXITING: either the band_range argument or the e_range argument\
do not match the required format (2 item list)')
sys.exit(1)
doss_block.append('DOSS\n')
doss_block.append(str(0)+' '+str(n_points)+' '+str(doss_range[0])+' ' +
str(doss_range[1])+' '+str(plotting_option)+' '+str(poly)+' ' +
str(print_option)+'\n')
if doss_range == [-1, -1]:
doss_block.append(
str(e_range[0]/27.2114)+' '+str(e_range[1]/27.2114)+'\n')
doss_block.append('END\n')
self.property_block = doss_block
return self
def make_pdoss_block(self, projections, proj_type='atom', output_file=None, n_points=200, band_range=None,
e_range=None, plotting_option=2, poly=12, print_option=1):
# Return the pdoss block to be used in a pdoss calculation
# projections is a list of lists of atoms or atomic orbitals
# n_points : number of points in the energy range
# proj_type == 'atom' is an atom projected DOSS, proj_type == 'ao' is an atomic orbital projected DOSS
# e_range: in eV
# plotting_options: properties printing options
# poly: maximum exponent for the polynomial fit
# print_option: properties printing options
import sys
pdoss_block = []
if band_range == None and e_range == None:
print('EXITING: please specify either band_range or e_range. None selected')
sys.exit(1)
elif band_range != None and e_range != None:
print('EXITING: please specify either band_range or e_range. Both selected')
sys.exit(1)
elif type(band_range) == list and len(band_range) == 2:
pdoss_range = band_range
range_is_bands = True
elif type(e_range) == list and len(e_range) == 2:
pdoss_range = [-1,-1]
range_is_bands = False
else:
print('EXITING: either the band_range argument or the e_range argument\
do not match the required format (2 item list)')
sys.exit(1)
pdoss_block.append('PDOS\n')
pdoss_block.append(str(len(projections))+' '+str(n_points)+' '+str(pdoss_range[0])+' ' +
str(pdoss_range[1])+' '+str(plotting_option)+' '+str(poly)+' ' +
str(print_option)+'\n')
if range_is_bands == False:
pdoss_block.append(
str(round(pdoss_range[0]/27.2114,6))+' '+str(round(pdoss_range[1]/27.2114,6))+'\n')
flat_proj = [x for sublist in projections for x in sublist]
if all(isinstance(x, int) for x in flat_proj):
if proj_type == 'atom':
for proj in projections:
pdoss_block.append(str(-len(proj))+' ' +
' '.join([str(x) for x in proj])+'\n')
if proj_type == 'ao':
for proj in projections:
pdoss_block.append(str(len(proj))+' ' +
' '.join([str(x) for x in proj])+'\n')
elif proj_type != 'atom' and proj_type != 'ao':
print(
'EXITING: please specify either atom or ao projection. %s selected' % proj_type)
sys.exit(1)
elif all(isinstance(x, str) for x in flat_proj):
if output_file == None:
print(
'EXITING: please specify an outut file to use the atoms projection.')
sys.exit(1)
else:
output = Crystal_output(output_file)
output.get_last_geom()
atoms_symbols = output.atom_symbols
atoms_symbols.insert(0, 0)
for proj in projections:
atom_positions_list = []
for element in proj:
index = [i for i, ele in enumerate(
atoms_symbols) if ele == element.upper()]
atom_positions_list.append([str(x) for x in index])
pdoss_block.append(
str(-len(index))+' '+' '.join([str(x) for x in index])+'\n')
pdoss_block.append('END\n')
self.property_block = pdoss_block
return self
class Properties_output:
# This creates a properties_output object
def __init__(self):
# properties_output is the properties output file
pass
def read_file(self, properties_output):
# Function to parse the properties output file.
# It is not meant to be calles directly, but to be used by the
# functions below to read the properties file.
import sys
import os
self.file_name = properties_output
try:
file = open(self.file_name, 'r')
self.data = file.readlines()
file.close()
#directory
dir_name = os.path.split(properties_output)[0]
self.abspath = os.path.join(dir_name)
#title (named "title" only to distinguish from "file_name" which means another thing)
self.title = os.path.split(properties_output)[1]
except:
print('EXITING: a CRYSTAL properties file needs to be specified')
sys.exit(1)
def read_cry_bands(self, properties_output):
# This class contains the bands objects created from reading the
# CRYSTAL band files
# Returns an array where the band energy is expressed in eV
import re
import numpy as np
self.read_file(properties_output)
data = self.data
# Read the information about the file
# number of k points in the calculation
self.n_kpoints = int(data[0].split()[2])
# number of bands in the calculation
self.n_bands = int(data[0].split()[4])
self.spin = int(data[0].split()[6]) # number of spin
# number of tick in the band plot
self.n_tick = int(data[1].split()[2])+1
self.k_point_inp_coordinates = []
self.n_points = []
# finds all the coordinates of the ticks and the k points
for i in range(self.n_tick):
self.n_points.append(int(data[2+i].split()[1]))
coord = []
for j in range(3):
l = re.findall('\d+', data[2+i].split()[2])
coord.append(float(l[j])/float(l[3]))
self.k_point_inp_coordinates.append(coord)
self.k_point_inp_coordinates = np.array(self.k_point_inp_coordinates)
self.k_point_coordinates = [self.k_point_inp_coordinates[0]]
for i in range(1, self.n_tick):
step = (self.k_point_inp_coordinates[i]-self.k_point_inp_coordinates[i-1])/float(
self.n_points[i]-self.n_points[i-1])
for j in range(self.n_points[i]-self.n_points[i-1]):
# coordinates of the k_points in the calculation
self.k_point_coordinates.append(
(self.k_point_inp_coordinates[i-1]+step*float(j+1)).tolist())
self.tick_position = [] # positions of the ticks
self.tick_label = [] # tick labels
for i in range(self.n_tick):
self.tick_position.append(
float(data[16+self.n_tick+i*2].split()[4]))
self.tick_label.append(
str(data[17+self.n_tick+i*2].split()[3][2:]))
self.efermi = float(data[-1].split()[3])*27.2114
# Allocate the bands as np arrays
self.bands = np.zeros(
(self.n_bands, self.n_kpoints, self.spin), dtype=float)
# Allocate the k_points a one dimensional array
self.k_point_plot = np.zeros(self.n_kpoints)
# line where the first band is. Written this way to help identify
# where the error might be if there are different file lenghts
first_k = 2 + self.n_tick + 14 + 2*self.n_tick + 2
# Read the bands and store them into a numpy array
for i, line in enumerate(data[first_k:first_k+self.n_kpoints]):
self.bands[:self.n_bands+1, i,
0] = np.array([float(n) for n in line.split()[1:]])
self.k_point_plot[i] = float(line.split()[0])
if self.spin == 2:
# line where the first beta band is. Written this way to help identify
first_k_beta = first_k + self.n_kpoints + 15 + 2*self.n_tick + 2
for i, line in enumerate(data[first_k_beta:-1]):
self.bands[:self.n_bands+1, i,
1] = np.array([float(n) for n in line.split()[1:]])
# Convert all the energy to eV
self.bands[:, :, :] = self.bands[:, :, :]*27.2114
return self
def read_cry_doss(self, properties_output):
# This class contains the bands objects created from reading the
# CRYSTAL doss files
# Returns an array where the band energy is expressed in eV
import re
import numpy as np
self.read_file(properties_output)
data = self.data
# Read the information about the file
self.n_energy = int(data[0].split()[2])
self.n_proj = int(data[0].split()[4])
self.spin = int(data[0].split()[6])
self.efermi = float(data[-1].split()[3])*27.2114
first_energy = 4
# Allocate the doss as np arrays
self.doss = np.zeros(
(self.n_energy, self.n_proj+1, self.spin), dtype=float)
# Read the doss and store them into a numpy array
for i, line in enumerate(data[first_energy:first_energy+self.n_energy]):
self.doss[i, :self.n_proj+1,
0] = np.array([float(n) for n in line.split()])
if self.spin == 2:
# line where the first beta energy is. Written this way to help identify
first_energy_beta = first_energy + self.n_energy + 3
for i, line in enumerate(data[first_energy_beta:-1]):
self.doss[i, :self.n_proj+1,
1] = np.array([float(n) for n in line.split()])
# Convert all the energy to eV
self.doss[:, 0, :] = self.doss[:, 0, :]*27.2114
return self
def read_cry_contour(self, properties_output):
import sys
import re
import pandas as pd
import numpy as np
self.read_file(properties_output)
filename = self.abspath
if (filename.endswith('SURFRHOO.DAT')) or (filename.endswith('SURFLAPP.DAT')) or (filename.endswith('SURFLAPM.DAT')) or (filename.endswith('SURFGRHO.DAT')) or (filename.endswith('SURFELFB.DAT')) or (filename.endswith('SURFVIRI.DAT')) or (filename.endswith('SURFGKIN.DAT')) or (filename.endswith('SURFELFB.DAT')) or (filename.endswith('SURFKKIN.DAT')) or (filename.endswith('SURFRHOO_ref.DAT')) or (filename.endswith('SURFLAPP_ref.DAT')) or (filename.endswith('SURFLAPM_ref.DAT')) or (filename.endswith('SURFELFB_ref.DAT')):
pass
else:
sys.exit('please, choose a valid file or rename it properly')
tipo = ''
if (filename.endswith('SURFRHOO.DAT')) or (filename.endswith('SURFRHOO_ref.DAT')):
self.tipo = 'SURFRHOO'
self.path = filename
elif (filename.endswith('SURFLAPP.DAT')) or (filename.endswith('SURFLAPP_ref.DAT')):
self.tipo = 'SURFLAPP'
self.path = filename
elif (filename.endswith('SURFLAPM.DAT')) or (filename.endswith('SURFLAPM_ref.DAT')):
self.tipo = 'SURFLAPM'
self.path = filename
elif (filename.endswith('SURFGRHO.DAT')):
self.tipo = 'SURFGRHO'
self.path = filename
elif (filename.endswith('SURFELFB.DAT')) or (filename.endswith('SURFELFB_ref.DAT')):
self.tipo = 'SURFELFB'
self.path = filename
elif (filename.endswith('SURFVIRI.DAT')):
self.tipo = 'SURFVIRI'
self.path = filename
elif (filename.endswith('SURFGKIN.DAT')):
self.tipo = 'SURFGKIN'
self.path = filename
elif (filename.endswith('SURFKKIN.DAT')):
self.tipo = 'SURFKKIN'
self.path = filename
factor = 0.529177249
l_dens = self.data
n_punti_x = int(l_dens[1].strip().split()[0])
n_punti_y = int(l_dens[1].strip().split()[1])
self.npx = n_punti_x
x_min = float(l_dens[2].strip().split()[0]) * factor
x_max = float(l_dens[2].strip().split()[1]) * factor
x_step = float(l_dens[2].strip().split()[2]) * factor
y_min = float(l_dens[3].strip().split()[0]) * factor
y_max = float(l_dens[3].strip().split()[1]) * factor
y_step = float(l_dens[3].strip().split()[2]) * factor
l_dens = l_dens[5:]
m_dens=[]
for i in l_dens:
m_dens.append(re.sub("\s\s+" , " ", i))
n_dens=[]
for i in m_dens:
n_dens.append(i.replace('\n','').split())
self.df = pd.DataFrame(n_dens)
self.x_points = np.linspace(x_min,x_max,n_punti_x)
self.y_points = np.linspace(y_min,y_max,n_punti_y)
a = x_max - x_min
b = y_max - y_min
r = a/b
self.x_graph_param = 10
self.y_graph_param = 10 / r
ctr1 = np.array([0.002,0.004,0.008,0.02,0.04,0.08,0.2,0.4,0.8,2,4,8,20])
colors1 = ['r','r','r','r','r','r','r','r','r','r','r','r','r']
ls1 = ['-','-','-','-','-','-','-','-','-','-','-','-','-']
ctr2 = np.array([-8,-4,-2,-0.8,-0.4,-0.2,-0.08,-0.04,-0.02,-0.008,-0.004,-0.002,0.002,0.004,0.008,0.02,0.04,0.08,
0.2,0.4,0.8,2,4,8])
colors2 = ['b','b','b','b','b','b','b','b','b','b','b','b','r','r','r','r','r','r','r','r','r','r','r','r']
ls2 = ['--','--','--','--','--','--','--','--','--','--','--','--','-','-','-','-','-','-','-','-','-','-','-','-']
ctr3 = np.array([0,0.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50,0.55,0.60,0.65,0.70,0.75,0.80,0.85,0.90,
0.95,1])
colors3 = ['k','b','b','b','b','b','b','b','b','b','b','r','r','r','r','r','r','r','r','r','r']
ls3 = ['dotted','--','--','--','--','--','--','--','--','--','--','-','-','-','-','-','-','-','-','-','-']
if (self.tipo == 'SURFRHOO') or (self.tipo == 'SURFRHOO_ref') or (self.tipo == 'SURFGRHO') or (self.tipo == 'SURFGKIN'):
self.levels = ctr1
self.colors = colors1
self.linestyles = ls1
self.fmt = '%1.3f'
elif (self.tipo == 'SURFLAPP') or (self.tipo == 'SURFLAPP_ref') or (self.tipo == 'SURFLAPM') or (self.tipo == 'SURFLAPM_ref') or (self.tipo == 'SURFVIRI') or (self.tipo == 'SURFKKIN'):
self.levels = ctr2
self.colors = colors2
self.linestyles = ls2
self.fmt = '%1.3f'
elif (self.tipo == 'SURFELFB') or (self.tipo == 'SURFELFB_ref'):
self.levels = ctr3
self.colors = colors3
self.linestyles = ls3
self.fmt = '%1.2f'
return self
def read_cry_xrd_spec(self, properties_output):
import sys
import re
import pandas as pd
self.read_file(properties_output)
data = self.data
filename = self.abspath
title = self.title
if filename.endswith('.outp'):
pass
else:
sys.exit('please, choose a valid file or rename it properly')
spectrum = re.compile('2THETA INTENS INTENS-LP INTENS-LP-DW', re.DOTALL)
match = []
a=0
for line in data:
if spectrum.search(line):
match.append('WRITE LINE:' + line)
a=1
else:
match.append('WRONG LINE:' + line)
if (a == 0):
sys.exit('please, choose a valid file or rename it properly')
df = pd.DataFrame(match)
num_riga = (df[df[0].str.contains(u'WRITE')].index.values)
num_riga = num_riga[0]
match = match[num_riga:]
pattern=re.compile('\s+ \d+\.\d+ \s+ \d+\.\d+ \s+ \d+\.\d+ \s+ \d+\.\d+\n', re.DOTALL)
match_2 = []
for line in match:
if pattern.search(line):
line = line.replace('WRONG LINE:', '') #pulisco dalle scritte di prima
match_2.append(line)
df = pd.DataFrame([i.strip().split() for i in match_2])
for i in range(0,4):
df[i] = df[i].astype(float)
df = df.rename(columns={0: '2THETA', 1: 'INTENS', 2: 'INTENS-LP', 3: 'INTENS-LP-DW'})
self.x = df['2THETA']
self.y = df['INTENS-LP']
self.title = title[:-1]
return self
def read_cry_rholine(self, properties_output):
import sys
import re
import pandas as pd
self.read_file(properties_output)
l_dens = self.data
filename = self.abspath
title = self.title
if filename.endswith('.RHOLINE'):
pass
else:
sys.exit('please, choose a valid file or rename it properly')
m_dens=[]
for i in l_dens:
m_dens.append(re.sub("\s\s+" , " ", i))
n_dens=[]
for i in m_dens:
n_dens.append(i.replace('\n','').split())
df_dens=pd.DataFrame(n_dens)
df_dens=df_dens.dropna()
for i in range(0,len(df_dens.columns)):
df_dens[i] = pd.to_numeric(df_dens[i])
self.x = (df_dens[0]-5.55)*0.529177249
self.y = df_dens[1]/0.148184743
self.title = title[:-4]
return self
def read_cry_seebeck(self, properties_output):
import sys
import re
import pandas as pd
self.read_file(properties_output)
data = self.data
filename = self.abspath
title = self.title
if filename.endswith('.DAT'):
pass
else:
sys.exit('please, choose a valid file or rename it properly')
spectrum = re.compile('Npoints', re.DOTALL)
match = []
for line in data:
if spectrum.search(line):
match.append('RIGHT LINE:' + line)
else:
match.append('WRONG LINE:' + line)
df = pd.DataFrame(match)
indx = list(df[df[0].str.contains("RIGHT")].index)
lin = []
for i in indx:
lin.append(i+1)
diffs = [abs(x - y) for x, y in zip(lin, lin[1:])]
length = diffs[0] - 1 #la lunghezza del blocco tra due "RIGHT"
lif = []
for i in lin:
lif.append(i+length)
c = []
for i in range(len(lin)):
c.append(lin[i])
c.append(lif[i])
d = [c[i:i + 2] for i in range(0, len(c), 2)]
l = []
for i in range(0,len(d)):
pd.DataFrame(l.append(df[d[i][0]:d[i][1]]))
right = df[df[0].str.contains("RIGHT")]
right = right.reset_index().drop('index',axis=1)
self.temp = []
for i in range(0,len(right)):
self.temp.append(float(str(right[0][i])[22:25])) #va bene perchè la struttura è sempre la stessa
ll = []
for k in range(0,len(l)):
ll.append(l[k].reset_index().drop('index',axis=1))
self.all_data = []
for k in range(0,len(ll)):
for i in ll[k]:
self.all_data.append(ll[k][i].apply(lambda x: x.replace('WRONG LINE:','')))
self.title = title
return self
def read_cry_lapl_profile(self, properties_output):
import pandas as pd
import re
import numpy as np
data = self.data
filename = self.abspath
title = self.title
self.read_file(properties_output)
spectrum = re.compile('PROFILE ALONG THE POINTS', re.DOTALL)
match = []
for line in data:
if spectrum.search(line):
match.append('RIGHT LINE: ' + line)
else:
match.append('WRONG LINE: ' + line)
df = pd.DataFrame(match)
num_riga = (df[df[0].str.contains(u'RIGHT')].index.values)
num_in = num_riga + 8
spectrum_fin = re.compile('EEEEEEEEEE TERMINATION DATE', re.DOTALL)
match_fin = []
for line in data:
if spectrum_fin.search(line):
match_fin.append('RIGHT LINE: ' + line)
else:
match_fin.append('WRONG LINE: ' + line)
df_fin = | pd.DataFrame(match_fin) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_pct_change(self):
# GH#11150
pnl = DataFrame([np.arange(0, 40, 10),
np.arange(0, 40, 10),
np.arange(0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [pd.Timestamp('1960-02-15'),
pd.Timestamp('1960-02-16'),
pd.NaT,
pd.NaT]
df = DataFrame({
"A": float_data,
"B": datetime_data
})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None]})
result = df.all(bool_only=True)
expected = Series(dtype=np.bool)
tm.assert_series_equal(result, expected)
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True]})
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH 21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level='out')
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH 4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# GH 16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH 4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH 16394
df = | pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']}) | pandas.DataFrame |
from __future__ import division
import os
import pandas as pd
import math
import numpy as np
from scipy.spatial import ConvexHull
import scipy
from configparser import ConfigParser
def extract_features_wotarget_4(inifile):
configFile = str(inifile)
config = ConfigParser()
config.read(configFile)
csv_dir = config.get('General settings', 'csv_path')
csv_dir_in = os.path.join(csv_dir, 'outlier_corrected_movement_location')
csv_dir_out = os.path.join(csv_dir, 'features_extracted')
vidInfPath = config.get('General settings', 'project_path')
vidInfPath = os.path.join(vidInfPath, 'logs')
vidInfPath = os.path.join(vidInfPath, 'video_info.csv')
vidinfDf = pd.read_csv(vidInfPath)
#change videos name to str
vidinfDf.Video = vidinfDf.Video.astype('str')
if not os.path.exists(csv_dir_out):
os.makedirs(csv_dir_out)
def count_values_in_range(series, values_in_range_min, values_in_range_max):
return series.between(left=values_in_range_min, right=values_in_range_max).sum()
def angle3pt(ax, ay, bx, by, cx, cy):
ang = math.degrees(
math.atan2(cy - by, cx - bx) - math.atan2(ay - by, ax - bx))
return ang + 360 if ang < 0 else ang
filesFound = []
roll_windows = []
roll_windows_values = [2, 5, 6, 7.5, 15]
loopy = 0
#REMOVE WINDOWS THAT ARE TOO SMALL
minimum_fps = vidinfDf['fps'].min()
for win in range(len(roll_windows_values)):
if minimum_fps < roll_windows_values[win]:
roll_windows_values[win] = minimum_fps
else:
pass
roll_windows_values = list(set(roll_windows_values))
########### FIND CSV FILES ###########
for i in os.listdir(csv_dir_in):
if i.__contains__(".csv"):
fname = os.path.join(csv_dir_in, i)
filesFound.append(fname)
print('Extracting features from ' + str(len(filesFound)) + ' files...')
########### CREATE PD FOR RAW DATA AND PD FOR MOVEMENT BETWEEN FRAMES ###########
for i in filesFound:
M1_hull_large_euclidean_list = []
M1_hull_small_euclidean_list = []
M1_hull_mean_euclidean_list = []
M1_hull_sum_euclidean_list = []
currentFile = i
currVidName = os.path.basename(currentFile)
currVidName = currVidName.replace('.csv', '')
# get current pixels/mm
currVideoSettings = vidinfDf.loc[vidinfDf['Video'] == currVidName]
try:
currPixPerMM = float(currVideoSettings['pixels/mm'])
except TypeError:
print('Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file')
fps = float(currVideoSettings['fps'])
print('Processing ' + '"' + str(currVidName) + '".' + ' Fps: ' + str(fps) + ". mm/ppx: " + str(currPixPerMM))
for i in range(len(roll_windows_values)):
roll_windows.append(int(fps / roll_windows_values[i]))
loopy += 1
columnHeaders = ["Ear_left_x", "Ear_left_y", "Ear_left_p", "Ear_right_x", "Ear_right_y",
"Ear_right_p", "Nose_x", "Nose_y", "Nose_p", "Tail_base_x",
"Tail_base_y", "Tail_base_p"]
csv_df = | pd.read_csv(currentFile, names=columnHeaders, low_memory=False) | pandas.read_csv |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import importlib.machinery
import sys
import os
import shutil
import yaml
loader = importlib.machinery.SourceFileLoader('config', sys.argv[1])
config = yaml.load(open(sys.argv[1]).read())
output = sys.argv[2]
combined = | pd.read_csv(output + '/combined.csv') | pandas.read_csv |
import numpy as np
import pytest
from pandas import Categorical, Series
import pandas._testing as tm
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, False, False, True, True, False])),
("last", Series([False, True, True, False, False, False, False])),
(False, Series([False, True, True, False, True, True, False])),
],
)
def test_drop_duplicates(any_numpy_dtype, keep, expected):
tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))
if tc.dtype == "bool":
pytest.skip("tested separately in test_drop_duplicates_bool")
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, True])),
("last", Series([True, True, False, False])),
(False, Series([True, True, True, True])),
],
)
def test_drop_duplicates_bool(keep, expected):
tc = Series([True, False, True, False])
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
assert return_value is None
@pytest.mark.parametrize("values", [[], list(range(5))])
def test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):
tc = Series(values, dtype=np.dtype(any_numpy_dtype))
expected = Series([False] * len(tc), dtype="bool")
if tc.dtype == "bool":
# 0 -> False and 1-> True
# any other value would be duplicated
tc = tc[:2]
expected = expected[:2]
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
result_dropped = tc.drop_duplicates(keep=keep)
tm.assert_series_equal(result_dropped, tc)
# validate shallow copy
assert result_dropped is not tc
class TestSeriesDropDuplicates:
@pytest.fixture(
params=["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"]
)
def dtype(self, request):
return request.param
@pytest.fixture
def cat_series1(self, dtype, ordered):
# Test case 1
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
cat = Categorical(input1, categories=cat_array, ordered=ordered)
tc1 = Series(cat)
return tc1
def test_drop_duplicates_categorical_non_bool(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, False, True])
result = tc1.duplicated()
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates()
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
def test_drop_duplicates_categorical_non_bool_keeplast(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, True, False])
result = tc1.duplicated(keep="last")
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates(keep="last")
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(keep="last", inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
def test_drop_duplicates_categorical_non_bool_keepfalse(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, True, True])
result = tc1.duplicated(keep=False)
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates(keep=False)
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(keep=False, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
@pytest.fixture
def cat_series2(self, dtype, ordered):
# Test case 2; TODO: better name
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
cat = | Categorical(input2, categories=cat_array, ordered=ordered) | pandas.Categorical |
"""
author: <NAME>
time: 01/03/2017
link:
"""
import seaborn as sns
import numpy as np
import pandas as pd
import scipy.io as sio
from scipy import stats
from sklearn.cross_validation import train_test_split
from matplotlib import pyplot as plt
import anomaly
def main():
# Loading mat
mat_data = sio.loadmat('./data/ex8data1.mat')
# print('ex8data1 key', mat_data.keys())
X = mat_data.get('X')
X_val, X_test, y_val, y_test = train_test_split(mat_data.get('Xval'),
mat_data.get('yval').ravel(),
test_size=0.5)
data = pd.DataFrame(X, columns=['Latency', 'Throughput'])
# sns.regplot('Latency', 'Throughput', data=data, fit_reg=False,
# scatter_kws={'s': 30, 'alpha': 0.5})
# plt.show()
mu = X.mean(axis=0)
cov = np.cov(X.T)
# create multi-var Gaussian model
multi_normal = stats.multivariate_normal(mu, cov)
# create a grid
x, y = np.mgrid[:30:0.1, :30:0.1]
pos = np.dstack((x, y))
fig, ax = plt.subplots()
ax.contourf(x, y, multi_normal.pdf(pos), cmap='Reds')
sns.regplot('Latency', 'Throughput',
data=data,
fit_reg=False,
ax=ax,
scatter_kws={"s": 10,
"alpha": 0.4})
plt.show()
e, fs = anomaly.select_threshold(X, X_val, y_val)
print('Best epsilon: {}\nBest F-score on validation data: {}'.format(e, fs))
multi_normal, y_pred = anomaly.predict(X, X_val, e, X_test, y_test)
# construct test DataFrame
data = | pd.DataFrame(X_test, columns=['Latency', 'Throughput']) | pandas.DataFrame |
import biomart
import sys
import pandas as pd
import numpy as np
from biomart import BiomartServer
#from cStringIO import StringIO # python2
from io import BytesIO as cStringIO
from io import StringIO
biomart_host="http://www.ensembl.org/biomart"
def datasetsBM(host=biomart_host):
"""
Lists BioMart datasets.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing
"""
stdout_ = sys.stdout #Keep track of the previous value.
stream = StringIO()
sys.stdout = stream
server = BiomartServer(biomart_host)
server.show_datasets()
sys.stdout = stdout_ # restore the previous stdout.
variable = stream.getvalue()
v=variable.replace("{"," ")
v=v.replace("}"," ")
v=v.replace(": ","\t")
print(v)
def filtersBM(dataset,host=biomart_host):
"""
Lists BioMart filters for a specific dataset.
:param dataset: dataset to list filters of.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing
"""
stdout_ = sys.stdout #Keep track of the previous value.
stream = StringIO()
sys.stdout = stream
server = BiomartServer(host)
d=server.datasets[dataset]
d.show_filters()
sys.stdout = stdout_ # restore the previous stdout.
variable = stream.getvalue()
v=variable.replace("{"," ")
v=v.replace("}"," ")
v=v.replace(": ","\t")
print(v)
def attributesBM(dataset,host=biomart_host):
"""
Lists BioMart attributes for a specific dataset.
:param dataset: dataset to list attributes of.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing
"""
stdout_ = sys.stdout #Keep track of the previous value.
stream = StringIO()
sys.stdout = stream
server = BiomartServer(host)
d=server.datasets[dataset]
d.show_attributes()
sys.stdout = stdout_ # restore the previous stdout.
variable = stream.getvalue()
v=variable.replace("{"," ")
v=v.replace("}"," ")
v=v.replace(": ","\t")
print(v)
def queryBM(query_attributes,query_dataset,query_filter=None,query_items=None,query_dic=None,host=biomart_host):
"""
Queries BioMart.
:param query_attributes: list of attributes to recover from BioMart
:param query_dataset: dataset to query
:param query_filter: one BioMart filter associated with the items being queried
:param query_items: list of items to be queried (must assoiate with given filter)
:param query_dic: for complex queries this option should be used instead of 'filters' and 'items' and a dictionary of filters provided here eg. querydic={"filter1":["item1","item2"],"filter2":["item3","item4"]}. If using querydic, don't query more than 350 items at once.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: a Pandas dataframe of the queried attributes
"""
server = BiomartServer(host)
d=server.datasets[query_dataset]
res=[]
if not query_dic:
if query_items:
chunks=[query_items[x:x+350] for x in xrange(0, len(query_items), 350)]
for c in chunks:
response=d.search({'filters':{query_filter:c},'attributes':query_attributes})
for line in response.iter_lines():
line = line.decode('utf-8')
res.append(line.split("\t"))
else:
response=d.search({'attributes':query_attributes})
for line in response.iter_lines():
line = line.decode('utf-8')
res.append(line.split("\t"))
elif query_dic:
response=d.search({'filters':query_dic,'attributes':query_attributes})
for line in response.iter_lines():
line = line.decode('utf-8')
res.append(line.split("\t"))
res=pd.DataFrame(res)
res.columns=query_attributes
return(res)
def FilterGOstring(names_filter=["age-", "aging", "aged", 'aging', 'aging.', 'aging,'],\
exclude_names=["packaging","voltage","cleavage-",\
"stage-1","cage-like","message-specific",\
"damage-associated","stage-specific","foraging",\
"DNA-damaging","engaging","damaged","packaged"],\
defs_filter=[" age-", " aging", " aged", ' aging', ' aging.', ' aging,'],\
exclude_defs=["packaging","voltage","cleavage-",\
"stage-1","cage-like","message-specific",\
"damage-associated","stage-specific","foraging",\
"DNA-damaging","engaging","damaged","packaged"],\
host=biomart_host,\
HSA=None,MUS=None,CEL=None,DMEL=None):
"""
Filters GO terms based on given strings using ENSEMBL's biomart homology mapping.
:param names_filter: list of substrings to filter GO names on
:param exclude_names: list of substrings to be used for exclusion of GO names
:param defs_filter: list of substrings to filter GO defenitions on
:param exclude_defs: list of substrings to be used for exclustion of GO defenitions
:param host: biomart host server, default="http://www.ensembl.org/biomart"
:param HSA: retrieved hsa dataframe
:param MUS: retrieved mus dataframe
:param CEL: retrieved cel dataframe
:param DMEL: retrieved dmel dataframe
:returns homology_df, HSA, MUS, CEL, DMEL
"""
if type(HSA) == type(None):
queries={'hsapiens_gene_ensembl':["ensembl_gene_id","external_gene_name", \
"go_id","name_1006","definition_1006"],\
"mmusculus_gene_ensembl":["ensembl_gene_id","external_gene_name", \
"go_id","name_1006","definition_1006"],\
"celegans_gene_ensembl":["ensembl_gene_id","external_gene_name", \
"go_id","name_1006","definition_1006"],\
"dmelanogaster_gene_ensembl":["ensembl_gene_id","external_gene_name", \
"go_id","name_1006","definition_1006"]}
def QueryBioMart(dataset,attributes,host=host):
#print dataset
#sys.stdout.flush()
server = BiomartServer( host )
organism=server.datasets[dataset]
response=organism.search({'attributes':attributes})
response=response.content
response=response.decode()
response=response.split("\n")
response=[s.split("\t") for s in response ]
response=pd.DataFrame(response,columns=attributes)
return response
homology=[ "ensembl_gene_id","celegans_homolog_ensembl_gene","dmelanogaster_homolog_ensembl_gene","mmusculus_homolog_ensembl_gene"]
hsa_homology=QueryBioMart('hsapiens_gene_ensembl',homology)
HSA=QueryBioMart('hsapiens_gene_ensembl',queries['hsapiens_gene_ensembl'])
MUS=QueryBioMart('mmusculus_gene_ensembl',queries['mmusculus_gene_ensembl'])
CEL=QueryBioMart('celegans_gene_ensembl',queries['celegans_gene_ensembl'])
DMEL=QueryBioMart('dmelanogaster_gene_ensembl',queries['dmelanogaster_gene_ensembl'])
HSA=pd.merge(HSA,hsa_homology,on=["ensembl_gene_id"],how="outer")
HSA.columns=['HSA_ensembl_gene_id', 'HSA_external_gene_name','HSA_go_id', 'HSA_name_1006', 'HSA_definition_1006',\
'CEL_ensembl_gene_id', 'DMEL_ensembl_gene_id', 'MUS_ensembl_gene_id']
MUS.columns=['MUS_ensembl_gene_id','MUS_external_gene_name',"MUS_go_id",'MUS_name_1006','MUS_definition_1006']
CEL.columns=['CEL_ensembl_gene_id','CEL_external_gene_name',"CEL_go_id",'CEL_name_1006','CEL_definition_1006']
DMEL.columns=['DMEL_ensembl_gene_id','DMEL_external_gene_name',"DMEL_go_id",'DMEL_name_1006','DMEL_definition_1006']
HSA_gos=HSA[['HSA_go_id','HSA_name_1006','HSA_definition_1006']]
MUS_gos=MUS[['MUS_go_id','MUS_name_1006','MUS_definition_1006']]
CEL_gos=CEL[['CEL_go_id','CEL_name_1006','CEL_definition_1006']]
DMEL_gos=DMEL[['DMEL_go_id','DMEL_name_1006','DMEL_definition_1006']]
GOS=pd.DataFrame()
for tmp in [ HSA_gos, MUS_gos, CEL_gos, DMEL_gos]:
tmp.columns=['go_id','name_1006','definition_1006']
GOS=pd.concat([GOS,tmp])
GOS=GOS.drop_duplicates()
GOS=GOS.dropna()
GOS.reset_index(inplace=True, drop=True)
names=GOS["name_1006"].tolist()
defs=GOS["definition_1006"].tolist()
filtered_names=[]
for age in names_filter:
tmp=list(set( [ s for s in names if age in s ] ))
exclude=exclude_names
for e in exclude:
tmp=[ s for s in tmp if e not in s ]
filtered_names.append(tmp)
filtered_defs=[]
for age in defs_filter:
tmp=list(set( [ s for s in defs if age in s ] ))
exclude=exclude_defs
for e in exclude:
tmp=[ s for s in tmp if e not in s ]
filtered_defs.append(tmp)
# def checkStrings(filtered_names,names_filter):
# print_names=" ".join(filtered_names)
# print_names=print_names.split(" ")
# print_names_=[]
# for age in names_filter:
# tmp=[s for s in print_names if age in s ]
# print_names_.append(tmp)
# print_names_=[item for sublist in print_names_ for item in sublist]
# print_names_=list(set(print_names_))
# return print_names_
filtered_names = [item for sublist in filtered_names for item in sublist]
filtered_defs = [item for sublist in filtered_defs for item in sublist]
# names_=checkStrings(filtered_names,names_filter)
# defs_=checkStrings(filtered_defs,defs_filter)
print("\nStrings being used for filtering in names section:")
for f in filtered_names:
print(f)
print("\nStrings being used for filtering in defenitions section:")
for f in filtered_defs:
print("\n"+f)
def CHECK_AGE(x,l):
if x in l:
res=x
else:
res=np.nan
return res
GOS["names_filter"]=GOS["name_1006"].apply(lambda x: CHECK_AGE(x,filtered_names) )
GOS["defs_filter"]=GOS["definition_1006"].apply(lambda x: CHECK_AGE(x,filtered_defs) )
AGEGOS=GOS[['go_id',"names_filter","defs_filter"]].dropna(thresh=2)
AGEGOS=list(set(AGEGOS["go_id"].tolist()))
def CombineAnnHSA(df):
return pd.Series(dict(HSA_ensembl_gene_id = ', '.join([ str(s) for s in list(set(df['HSA_ensembl_gene_id'])) if str(s) != "nan" ] ) ,\
HSA_go_id = ', '.join([ str(s) for s in list(set(df['HSA_go_id'])) if str(s) != "nan" ]), \
HSA_external_gene_name = ', '.join([ str(s) for s in list(set(df['HSA_external_gene_name'])) if str(s) != "nan" ] ) ,\
HSA_name_1006 = ', '.join([ str(s) for s in list(set(df['HSA_name_1006'])) if str(s) != "nan" ] ) ,\
HSA_definition_1006 = ', '.join([ str(s) for s in list(set(df['HSA_definition_1006'])) if str(s) != "nan" ] ) ,\
CEL_ensembl_gene_id = ', '.join([ str(s) for s in list(set(df['CEL_ensembl_gene_id'])) if str(s) != "nan" ] ) ,\
DMEL_ensembl_gene_id = ', '.join([ str(s) for s in list(set(df['DMEL_ensembl_gene_id'])) if str(s) != "nan"] ) ,\
MUS_ensembl_gene_id = ', '.join([ str(s) for s in list(set(df['MUS_ensembl_gene_id'])) if str(s) != "nan" ] ) ,\
) )
def CombineAnnMUS(df):
return pd.Series(dict(MUS_ensembl_gene_id = ', '.join([ str(s) for s in list(set(df['MUS_ensembl_gene_id'])) if str(s) != "nan" ] ) ,\
MUS_external_gene_name = ', '.join([ str(s) for s in list(set(df['MUS_external_gene_name'])) if str(s) != "nan" ]), \
MUS_go_id = ', '.join([ str(s) for s in list(set(df['MUS_go_id'])) if str(s) != "nan" ] ) ,\
MUS_name_1006 = ', '.join([ str(s) for s in list(set(df['MUS_name_1006'])) if str(s) != "nan" ] ) ,\
MUS_definition_1006 = ', '.join([ str(s) for s in list(set(df['MUS_definition_1006'])) if str(s) != "nan" ] ) ,\
) )
def CombineAnnCEL(df):
return pd.Series(dict(CEL_ensembl_gene_id = ', '.join([ str(s) for s in list(set(df['CEL_ensembl_gene_id'])) if str(s) != "nan" ] ) ,\
CEL_external_gene_name = ', '.join([ str(s) for s in list(set(df['CEL_external_gene_name'])) if str(s) != "nan" ]), \
CEL_go_id = ', '.join([ str(s) for s in list(set(df['CEL_go_id'])) if str(s) != "nan" ] ) ,\
CEL_name_1006 = ', '.join([ str(s) for s in list(set(df['CEL_name_1006'])) if str(s) != "nan" ] ) ,\
CEL_definition_1006 = ', '.join([ str(s) for s in list(set(df['CEL_definition_1006'])) if str(s) != "nan" ] ) ,\
) )
def CombineAnnDMEL(df):
return pd.Series(dict(DMEL_ensembl_gene_id = ', '.join([ str(s) for s in list(set(df['DMEL_ensembl_gene_id'])) if str(s) != "nan" ] ) ,\
DMEL_external_gene_name = ', '.join([ str(s) for s in list(set(df['DMEL_external_gene_name'])) if str(s) != "nan" ]), \
DMEL_go_id = ', '.join([ str(s) for s in list(set(df['DMEL_go_id'])) if str(s) != "nan" ] ) ,\
DMEL_name_1006 = ', '.join([ str(s) for s in list(set(df['DMEL_name_1006'])) if str(s) != "nan" ] ) ,\
DMEL_definition_1006 = ', '.join([ str(s) for s in list(set(df['DMEL_definition_1006'])) if str(s) != "nan" ] ) ,\
) )
HSA=HSA.groupby(by=["HSA_ensembl_gene_id","MUS_ensembl_gene_id",\
"DMEL_ensembl_gene_id","CEL_ensembl_gene_id"], as_index=False).apply(CombineAnnHSA)
MUS=MUS.groupby(by="MUS_ensembl_gene_id", as_index=False).apply(CombineAnnMUS)
DMEL=DMEL.groupby(by="DMEL_ensembl_gene_id", as_index=False).apply(CombineAnnDMEL)
CEL=CEL.groupby(by="CEL_ensembl_gene_id", as_index=False).apply(CombineAnnCEL)
MUS.reset_index(inplace=True,drop=True)
HSA.reset_index(inplace=True,drop=True)
DMEL.reset_index(inplace=True,drop=True)
CEL.reset_index(inplace=True,drop=True)
HSA=HSA[['HSA_ensembl_gene_id', 'HSA_external_gene_name', 'HSA_go_id', 'HSA_name_1006','HSA_definition_1006',\
'MUS_ensembl_gene_id', 'CEL_ensembl_gene_id', 'DMEL_ensembl_gene_id']]
MUS=MUS[['MUS_ensembl_gene_id', 'MUS_external_gene_name', 'MUS_go_id', 'MUS_name_1006','MUS_definition_1006']]
CEL=CEL[['CEL_ensembl_gene_id', 'CEL_external_gene_name', 'CEL_go_id', 'CEL_name_1006','CEL_definition_1006']]
DMEL=DMEL[['DMEL_ensembl_gene_id', 'DMEL_external_gene_name', 'DMEL_go_id', 'DMEL_name_1006','DMEL_definition_1006']]
homDF= | pd.merge(HSA,MUS,on=["MUS_ensembl_gene_id"], how="outer") | pandas.merge |
'''Populate the graph database'''
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
from pathlib import Path
import datetime as dt
from typing import List, Optional
from newspaper import Article, ArticleException
from tqdm.auto import tqdm
import json
import re
from neo4j.exceptions import DatabaseError
from concurrent.futures import ProcessPoolExecutor
from timeout_decorator import timeout, TimeoutError
from ..utils import root_dir, strip_url
from .graph import Graph
def populate(start_from_scratch: bool = False,
queries: Optional[List[str]] = None):
'''Populate the graph database with nodes and relations'''
# Initialise the graph database
graph = Graph()
# Set up twitter directory and a list of all the twitter queries
twitter_dir = Path('/media') / 'secure' / 'dan' / 'twitter'
if queries is None:
queries = [p for p in twitter_dir.iterdir()]
# Delete all nodes and constraints in database
if start_from_scratch:
graph.query('CALL apoc.periodic.iterate('
'"MATCH (n) RETURN n",'
'"DETACH DELETE n",'
'{batchsize:10000, parallel:false})')
constraints = graph.query('CALL db.constraints')
for constraint in constraints.name:
graph.query(f'DROP CONSTRAINT {constraint}')
# Set up cypher directory
cypher_dir = root_dir / 'src' / 'neo4j' / 'cypher'
constraint_paths = list(cypher_dir.glob('constraint_*.cql'))
node_paths = list(cypher_dir.glob('node_*.cql'))
rel_paths = list(cypher_dir.glob('rel_*.cql'))
# Create constraints
for path in tqdm(constraint_paths, desc='Creating constraints'):
cypher = path.read_text()
graph.query(cypher)
def load_records(query: str, fname: str) -> pd.DataFrame:
'''Helper function to load records from a CSV file'''
try:
df = pd.read_csv(twitter_dir / query / f'{fname}.csv',
engine='python',
error_bad_lines=False,
warn_bad_lines=False)
df = df.replace({np.nan: None})
return df
except pd.errors.EmptyDataError:
return | pd.DataFrame() | pandas.DataFrame |
import os
from os import listdir
from os.path import isfile, join
import re
from path import Path
import numpy as np
import pandas as pd
from poor_trader import utils
from poor_trader.utils import quotes_range
from poor_trader.config import INDICATORS_OUTPUT_PATH
def _true_range(df_quotes, indices):
cur = df_quotes.iloc[indices[1]]
prev = df_quotes.iloc[indices[0]]
high, low, prev_close = cur.High, cur.Low, prev.Close
a = utils.roundn(high - low, 4)
b = utils.roundn(abs(high - prev_close), 4)
c = utils.roundn(abs(low - prev_close), 4)
return max(a, b, c)
def true_range(df_quotes):
df = | pd.DataFrame(index=df_quotes.index) | pandas.DataFrame |
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib.pyplot as plt
import seaborn as sns; sns.set_context('notebook')
from collections import OrderedDict
import pickle
from pystan import StanModel
"""Multilevel Modeling with Poststratification (MRP)"""
# Use multilevel regression to model individual survey responses as a function of demographic and geographic
# predictors, partially pooling respondents across states/regions to an extent determined by the data.
# The final step is post-stratification.
# Read the data & define variables
# Data are from http://www.stat.columbia.edu/~gelman/arm/examples/election88
"""Step 1: gather national opinion polls (they need to include respondent information down to the level of disaggregation
the analysis is targetting) """
# Load in data from the CBS polls with the following covariates (individual level):
# - org: organisation which collected the poll
# - year: year id
# - survey: survey id
# - bush: indicator (=1) for support of bush
# - state: state id
# - edu: categorical variable indicating level of education
# - age: categorical variable indicating age
# - female: indicator (=1) for female
# - black: indicator (=1) for black
# - weight: sample weight
polls = pd.read_csv('./data/polls.csv')
polls = polls.drop(polls.columns[[0]], axis=1)
"""Step 2: create a separate dataset of state-level predictors """
# Load in data for region indicators (state level). The variables are:
# - state_abbr: abbreviations of state names
# - regions: 1=northeast, 2=south, 3=north central, 4=west, 5=d.c.
# - not_dc: indicator variable which is 1 for non_dc states
state_info = pd.read_csv('./data/state.csv')
state_info = state_info.rename(columns={'Unnamed: 0': 'state'})
# Include a measure of previous vote as a state-level predictor. The variables are:
# - g76_84pr: state average in previous election
# - stnum2: state id
presvote = pd.read_csv("./data/presvote.csv")
presvote = presvote.drop(presvote.columns[[0]], axis=1)
presvote = presvote.rename(columns={'g76_84pr': 'v_prev', 'stnum2': 'state'})
# Include a measure of candidate effects as a state-level predictor and add empty row for DC.
candidate_effects = pd.read_csv("./data/candidate_effects.csv")
candidate_effects = candidate_effects.drop(candidate_effects.columns[[0]], axis=1)
candidate_effects = candidate_effects.rename(columns={'state': 'state_abbr'})
candidate_effects.loc[:,'candidate_effects_weighted'] = (candidate_effects.loc[:,'X76'] + candidate_effects.loc[:,'X80'] + candidate_effects.loc[:,'X84']) / 3.0
candidate_effects_1 = candidate_effects.iloc[:9]
candidate_effects = pd.concat([candidate_effects_1,candidate_effects.iloc[8:]]).reset_index(drop=True)
candidate_effects.iloc[8] = 0
candidate_effects = candidate_effects.set_value(8, 'state_abbr', 'DC')
presvote.loc[:,'v_prev'] += candidate_effects.loc[:,'candidate_effects_weighted']
# Merge all three dataframes into one:
polls = pd.merge(polls, state_info, on='state', how='left')
polls = pd.merge(polls, presvote, on='state', how='left')
# Select subset of polls:
polls_subset = polls.loc[polls['survey'] == '9158']
# Change female to sex and black to race:
polls_subset.loc[:,'sex'] = polls_subset.loc[:,'female'] + 1
polls_subset.loc[:,'race'] = polls_subset.loc[:,'black'] + 1
# Drop unnessary columns:
polls_subset = polls_subset.drop(['org', 'year', 'survey', 'region', 'not_dc', 'state_abbr', 'weight', 'female', 'black'], axis=1)
polls_subset['main'] = np.where(polls_subset['bush'] == 1, 1, np.where(polls_subset['bush'] == 0, 1, 0))
# Drop nan in polls_subset.bush
polls_subset_no_nan = polls_subset[polls_subset.bush.notnull()]
polls_subset_no_nan = polls_subset_no_nan.drop(['main'], axis=1)
# define other data summaries
n = len(polls_subset.bush) # of survey respondents
n_no_nan = len(polls_subset_no_nan.bush) # of survey respondents
n_sex = max(polls_subset.sex) # of sex categories
n_race = max(polls_subset.race) # of race categories
n_age = max(polls_subset.age) # of age categories
n_edu = max(polls_subset.edu) # of education categories
n_state = max(polls_subset.state) # of states
""" Extra Step: Validation Data"""
# load in 1988 election data as a validation check
election88 = pd.read_csv("./data/election88.csv")
election88 = election88.drop(election88.columns[[0]], axis=1)
# stnum: state id
# st: state abbreviation
# electionresult: is the outcome of the election
# samplesize:
# raking:
# merge_:
"""Step 3: Load 1988 census data to enable poststratification."""
census88 = pd.read_csv("./data/census88.csv")
census88 = census88.drop(census88.columns[[0]], axis=1)
census88 = pd.merge(census88, state_info, on='state', how='left')
census88 = pd.merge(census88, presvote, on='state', how='left')
# edu: categorical variable indicating level of education
# age: categorical variable indicating age
# female: indicator (=1) for female
# black: indicator (=1) for black
# N: size of population in this cell
# Change female to sex and black to race:
census88.loc[:,'sex'] = census88.loc[:,'female'] + 1
census88.loc[:,'race'] = census88.loc[:,'black'] + 1
census88 = census88.drop(['female', 'black'], axis=1)
"""Step 4: Fit a regression model for an individual survey response given demographics, geography etc."""
################################
#### 1st model: Probability that a voter casts a vote on a main party candidate
################################
# Pr(Y_i \in {Obama, Romney}) = logit^{-1}(alpha[1] + alpha[2] * v_prev_j[i] + a^state_j[i] + a^edu_j[i] + a^sex_j[i] + a^age_j[i]
# + a^race_j[i] + a^partyID_j[i] + a^ideology_j[i] + a^lastvote_j[i])
# a^{}_j[i] are the varying coefficients associated with each categorical variable; with independent prior distributions:
# a^{}_j[i] ~ N(0,sigma^2_var)
# the variance parameters are assigned a hyper prior distribution:
# sigma^2_var ~ invX^2(v,sigma^2_0)
# with a weak prior specification for v and sigma^2_0
# Model description:
model_1 = """
data {
int<lower=0> N;
int<lower=0> n_state;
int<lower=0> n_edu;
int<lower=0> n_sex;
int<lower=0> n_age;
int<lower=0> n_race;
#int<lower=0> n_party_id;
#int<lower=0> n_ideology;
#int<lower=0> n_lastvote;
vector[N] state_v_prev;
int<lower=0,upper=n_state> state[N];
int<lower=0,upper=n_edu> edu[N];
int<lower=0,upper=n_sex> sex[N];
int<lower=0,upper=n_age> age[N];
int<lower=0,upper=n_race> race[N];
#int<lower=0,upper=n_party_id> party_id[N];
#int<lower=0,upper=n_ideology> ideology[N];
#int<lower=0,upper=n_lastvote> lastvote[N];
int<lower=0,upper=1> y[N];
}
parameters {
vector[2] alpha;
vector[n_state] a;
vector[n_edu] b;
vector[n_sex] c;
vector[n_age] d;
vector[n_race] e;
#vector[n_party_id] f;
#vector[n_ideology] g;
#vector[n_lastvote] h;
real<lower=0,upper=100> sigma_a;
real<lower=0,upper=100> sigma_b;
real<lower=0,upper=100> sigma_c;
real<lower=0,upper=100> sigma_d;
real<lower=0,upper=100> sigma_e;
#real<lower=0,upper=100> sigma_f;
#real<lower=0,upper=100> sigma_g;
#real<lower=0,upper=100> sigma_h;
real<lower=0> mu;
real<lower=0,upper=100> sigma_0;
}
transformed parameters {
vector[N] y_hat;
for (i in 1:N)
y_hat[i] = alpha[1] + alpha[2] * state_v_prev[i] + a[state[i]] + b[edu[i]] + c[sex[i]] + d[age[i]] +
e[race[i]]; #+ f[party_id[i]] + g[ideology[i]] + h[lastvote[i]];
}
model {
a ~ normal (0, sigma_a);
b ~ normal (0, sigma_b);
c ~ normal (0, sigma_c);
d ~ normal (0, sigma_d);
e ~ normal (0, sigma_e);
#f ~ normal (0, sigma_f);
#g ~ normal (0, sigma_g);
#h ~ normal (0, sigma_h);
alpha ~ normal(0, 100);
sigma_a ~ scaled_inv_chi_square(mu,sigma_0);
sigma_b ~ scaled_inv_chi_square(mu,sigma_0);
sigma_c ~ scaled_inv_chi_square(mu,sigma_0);
sigma_d ~ scaled_inv_chi_square(mu,sigma_0);
sigma_e ~ scaled_inv_chi_square(mu,sigma_0);
#sigma_f ~ scaled_inv_chi_square(mu,sigma_0);
#sigma_g ~ scaled_inv_chi_square(mu,sigma_0);
#sigma_h ~ scaled_inv_chi_square(mu,sigma_0);
mu ~ uniform(0, 100);
sigma_0 ~ uniform(0, 100);
y ~ bernoulli_logit(y_hat);
}
"""
# Model parameters and data:
model_1_data_dict = {'N': n, 'n_state': n_state, 'n_edu': n_edu, 'n_sex': n_sex, 'n_age': n_age, 'n_race': n_race,
'state': polls_subset.state, 'edu': polls_subset.edu, 'sex': polls_subset.sex, 'age': polls_subset.age,
'race': polls_subset.race, 'state_v_prev': polls_subset.v_prev, 'y': polls_subset.main}
# Fitting the model:
n_chains = 2
n_iter = 1000
sm = StanModel(model_code=model_1)
with open('./models/model_1.pkl', 'wb') as f:
pickle.dump(sm, f)
sm = pickle.load(open('./models/model_1.pkl', 'rb'))
model_1_fit = sm.sampling(data=model_1_data_dict, iter=n_iter, chains=n_chains)
# Plot coefficients with confidence intervals:
params_demo = model_1_fit.extract(['alpha', 'b', 'c', 'd', 'e'])
params_alpha_0 = pd.DataFrame({'Intercept' : params_demo['alpha'][:,0]})
params_b = pd.DataFrame(OrderedDict({'Edu ' + str(i+1) : params_demo['b'][:,i] for i in range(0,params_demo['b'].shape[1])}))
params_c = pd.DataFrame(OrderedDict({'Sex ' + str(i+1) : params_demo['c'][:,i] for i in range(0,params_demo['c'].shape[1])}))
params_d = pd.DataFrame(OrderedDict({'Age ' + str(i+1) : params_demo['d'][:,i] for i in range(0,params_demo['d'].shape[1])}))
params_e = pd.DataFrame(OrderedDict({'Race ' + str(i+1) : params_demo['e'][:,i] for i in range(0,params_demo['e'].shape[1])}))
params_demo = pd.concat([params_alpha_0, params_b, params_c, params_d, params_e], axis=1)
ticks_list = list(params_demo.columns.values)
plt.figure(figsize=(10,15))
plt.plot(params_demo.median(), range(params_demo.shape[1]), 'ko', ms = 10)
plt.hlines(range(params_demo.shape[1]), params_demo.quantile(0.025), params_demo.quantile(0.975), 'k')
plt.hlines(range(params_demo.shape[1]), params_demo.quantile(0.25), params_demo.quantile(0.75), 'k', linewidth = 3)
plt.axvline(0, linestyle = 'dashed', color = 'k')
plt.xlabel('Median Coefficient Estimate (50 and 95% CI)')
plt.yticks(range(params_demo.shape[1]), ticks_list)
plt.ylim([-1, params_demo.shape[1]])
plt.xlim([(min(params_demo.quantile(0.025))-0.5), (max(params_demo.quantile(0.975))+0.5)])
plt.title('Coefficients')
plt.tight_layout()
plt.savefig('./figs/DemoCoefficients_ConfidenceIntervals.png')
plt.show()
# Plot coefficients with confidence intervals:
params_state = model_1_fit.extract(['alpha', 'a'])
params_alpha_1 = pd.DataFrame({'Prev Vote' : params_state['alpha'][:,1]})
params_a = pd.DataFrame(OrderedDict({'State ' + str(i+1) : params_state['a'][:,i] for i in range(0,params_state['a'].shape[1])}))
params_state = pd.concat([params_alpha_1, params_a], axis=1)
ticks_list = list(params_state.columns.values)
plt.figure(figsize=(10,15))
plt.plot(params_state.median(), range(params_state.shape[1]), 'ko', ms = 10)
plt.hlines(range(params_state.shape[1]), params_state.quantile(0.025), params_state.quantile(0.975), 'k')
plt.hlines(range(params_state.shape[1]), params_state.quantile(0.25), params_state.quantile(0.75), 'k', linewidth = 3)
plt.axvline(0, linestyle = 'dashed', color = 'k')
plt.xlabel('Median Coefficient Estimate (50 and 95% CI)')
plt.yticks(range(params_state.shape[1]), ticks_list)
plt.ylim([-1, params_state.shape[1]])
plt.xlim([(min(params_state.quantile(0.025))-0.5), (max(params_state.quantile(0.975))+0.5)])
plt.title('State Intercepts')
plt.tight_layout()
plt.savefig('./figs/StateIntercepts_ConfidenceIntervals.png')
plt.show()
# Traceplot:
model_1_fit.plot()
plt.savefig('./figs/ParameterDistributions_model_1.png')
plt.show()
################################
#### 2nd model: Probability that a voter casts a vote for Bush
################################
# 2nd model:
# Pr(Y_i = Obama | Y_i \in {Obama, Romney}) = logit^{-1}(beta_0 + beta_1 + b^state_j[i] + b^edu_j[i]
# + b^sex_j[i] + b^age_j[i] + b^race_j[i] + b^partyID_j[i] + b^ideology_j[i] + b^lastvote_j[i])
# b^{}_j[i] ~ N(0,eta^2_var)
# eta^2_var ~ invX^2(mu,eta^2_0)
# run daily with four-dat moving window(t, t-1, t-2, t-3)
# Model description:
model_2 = """
data {
int<lower=0> N;
int<lower=0> n_state;
int<lower=0> n_edu;
int<lower=0> n_sex;
int<lower=0> n_age;
int<lower=0> n_race;
#int<lower=0> n_party_id;
#int<lower=0> n_ideology;
#int<lower=0> n_lastvote;
vector[N] state_v_prev;
int<lower=0,upper=n_state> state[N];
int<lower=0,upper=n_edu> edu[N];
int<lower=0,upper=n_sex> sex[N];
int<lower=0,upper=n_age> age[N];
int<lower=0,upper=n_race> race[N];
#int<lower=0,upper=n_party_id> party_id[N];
#int<lower=0,upper=n_ideology> ideology[N];
#int<lower=0,upper=n_lastvote> lastvote[N];
int<lower=0,upper=1> y[N];
}
parameters {
vector[2] alpha;
vector[n_state] a;
vector[n_edu] b;
vector[n_sex] c;
vector[n_age] d;
vector[n_race] e;
#vector[n_party_id] f;
#vector[n_ideology] g;
#vector[n_lastvote] h;
real<lower=0,upper=100> sigma_a;
real<lower=0,upper=100> sigma_b;
real<lower=0,upper=100> sigma_c;
real<lower=0,upper=100> sigma_d;
real<lower=0,upper=100> sigma_e;
#real<lower=0,upper=100> sigma_f;
#real<lower=0,upper=100> sigma_g;
#real<lower=0,upper=100> sigma_h;
real<lower=0> mu;
real<lower=0,upper=100> sigma_0;
}
transformed parameters {
vector[N] y_hat;
for (i in 1:N)
y_hat[i] = alpha[1] + alpha[2] * state_v_prev[i] + a[state[i]] + b[edu[i]] + c[sex[i]] + d[age[i]] + e[race[i]];
#+ f[party_id[i]] + g[ideology[i]] + h[lastvote[i]];
}
model {
a ~ normal (0, sigma_a);
b ~ normal (0, sigma_b);
c ~ normal (0, sigma_c);
d ~ normal (0, sigma_d);
e ~ normal (0, sigma_e);
#f ~ normal (0, sigma_f);
#g ~ normal (0, sigma_g);
#h ~ normal (0, sigma_h);
alpha ~ normal(0, 100);
sigma_a ~ scaled_inv_chi_square(mu,sigma_0);
sigma_b ~ scaled_inv_chi_square(mu,sigma_0);
sigma_c ~ scaled_inv_chi_square(mu,sigma_0);
sigma_d ~ scaled_inv_chi_square(mu,sigma_0);
sigma_e ~ scaled_inv_chi_square(mu,sigma_0);
#sigma_f ~ scaled_inv_chi_square(mu,sigma_0);
#sigma_g ~ scaled_inv_chi_square(mu,sigma_0);
#sigma_h ~ scaled_inv_chi_square(mu,sigma_0);
mu ~ uniform(0, 100);
sigma_0 ~ uniform(0, 100);
y ~ bernoulli_logit(y_hat);
}
"""
# Model parameters and data:
model_2_data_dict = {'N': n_no_nan, 'n_state': n_state, 'n_edu': n_edu, 'n_sex': n_sex, 'n_age': n_age, 'n_race': n_race,
'state': polls_subset_no_nan.state, 'edu': polls_subset_no_nan.edu, 'sex': polls_subset_no_nan.sex, 'age': polls_subset_no_nan.age,
'race': polls_subset_no_nan.race, 'state_v_prev': polls_subset_no_nan.v_prev, 'y': polls_subset_no_nan.bush.astype(int)}
# Fitting the model:
n_chains = 2
n_iter = 1000
sm = StanModel(model_code=model_2)
with open('./models/model_2.pkl', 'wb') as f:
pickle.dump(sm, f)
sm = pickle.load(open('./models/model_2.pkl', 'rb'))
model_2_fit = sm.sampling(data=model_2_data_dict, iter=n_iter, chains=n_chains)
# Plot coefficients with confidence intervals:
params_demo = model_2_fit.extract(['alpha', 'b', 'c', 'd', 'e'])
params_alpha_0 = pd.DataFrame({'Intercept' : params_demo['alpha'][:,0]})
params_b = pd.DataFrame(OrderedDict({'Edu ' + str(i+1) : params_demo['b'][:,i] for i in range(0,params_demo['b'].shape[1])}))
params_c = pd.DataFrame(OrderedDict({'Sex ' + str(i+1) : params_demo['c'][:,i] for i in range(0,params_demo['c'].shape[1])}))
params_d = pd.DataFrame(OrderedDict({'Age ' + str(i+1) : params_demo['d'][:,i] for i in range(0,params_demo['d'].shape[1])}))
params_e = pd.DataFrame(OrderedDict({'Race ' + str(i+1) : params_demo['e'][:,i] for i in range(0,params_demo['e'].shape[1])}))
params_demo = pd.concat([params_alpha_0, params_b, params_c, params_d, params_e], axis=1)
ticks_list = list(params_demo.columns.values)
plt.figure(figsize=(10,15))
plt.plot(params_demo.median(), range(params_demo.shape[1]), 'ko', ms = 10)
plt.hlines(range(params_demo.shape[1]), params_demo.quantile(0.025), params_demo.quantile(0.975), 'k')
plt.hlines(range(params_demo.shape[1]), params_demo.quantile(0.25), params_demo.quantile(0.75), 'k', linewidth = 3)
plt.axvline(0, linestyle = 'dashed', color = 'k')
plt.xlabel('Median Coefficient Estimate (50 and 95% CI)')
plt.yticks(range(params_demo.shape[1]), ticks_list)
plt.ylim([-1, params_demo.shape[1]])
plt.xlim([(min(params_demo.quantile(0.025))-0.5), (max(params_demo.quantile(0.975))+0.5)])
plt.title('Coefficients')
plt.tight_layout()
plt.savefig('./figs/DemoCoefficients_ConfidenceIntervals_m2.png')
plt.show()
# Plot coefficients with confidence intervals:
params_state = model_2_fit.extract(['alpha', 'a'])
params_alpha_1 = pd.DataFrame({'Prev Vote' : params_state['alpha'][:,1]})
params_a = pd.DataFrame(OrderedDict({'State ' + str(i+1) : params_state['a'][:,i] for i in range(0,params_state['a'].shape[1])}))
params_state = pd.concat([params_alpha_1, params_a], axis=1)
ticks_list = list(params_state.columns.values)
plt.figure(figsize=(10,15))
plt.plot(params_state.median(), range(params_state.shape[1]), 'ko', ms = 10)
plt.hlines(range(params_state.shape[1]), params_state.quantile(0.025), params_state.quantile(0.975), 'k')
plt.hlines(range(params_state.shape[1]), params_state.quantile(0.25), params_state.quantile(0.75), 'k', linewidth = 3)
plt.axvline(0, linestyle = 'dashed', color = 'k')
plt.xlabel('Median Coefficient Estimate (50 and 95% CI)')
plt.yticks(range(params_state.shape[1]), ticks_list)
plt.ylim([-1, params_state.shape[1]])
plt.xlim([(min(params_state.quantile(0.025))-0.5), (max(params_state.quantile(0.975))+0.5)])
plt.title('State Intercepts')
plt.tight_layout()
plt.savefig('./figs/StateIntercepts_ConfidenceIntervals_m2.png')
plt.show()
# Traceplot:
model_2_fit.plot()
plt.savefig('./figs/ParameterDistributions_model_2.png')
plt.show()
"""# Plot individual parameter's different chains:
b = basic_model_fit.extract(permuted=True)['b']
b_split = np.array_split(b, n_chains) # assumes that the b array is just one chain tacked onto the end of another
for i in range(n_chains):
plt.plot(b_split[i])
plt.savefig('./figs/Traceplot.png')
plt.show()"""
"""Poststratification"""
## Using the model inferences to estimate avg opinion for each state
# construct the n.sims x 3264 matrix
params_m1 = model_1_fit.extract(['alpha', 'a', 'b', 'c', 'd', 'e'])
alpha_m1 = pd.DataFrame(params_m1['alpha'])
a_m1 = pd.DataFrame(params_m1['a'])
b_m1 = pd.DataFrame(params_m1['b'])
c_m1 = pd.DataFrame(params_m1['c'])
d_m1 = pd.DataFrame(params_m1['d'])
e_m1 = pd.DataFrame(params_m1['e'])
params_m2 = model_2_fit.extract(['alpha', 'a', 'b', 'c', 'd', 'e'])
alpha_m2 = pd.DataFrame(params_m2['alpha'])
a_m2 = pd.DataFrame(params_m2['a'])
b_m2 = pd.DataFrame(params_m2['b'])
c_m2 = pd.DataFrame(params_m2['c'])
d_m2 = pd.DataFrame(params_m2['d'])
e_m2 = pd.DataFrame(params_m2['e'])
L = census88.shape[0]
y_pred = np.full((int((n_iter / 2) * n_chains),L), np.nan)
y_pred_cond = np.full((int((n_iter / 2) * n_chains),L), np.nan)
for l in range(0, L):
y_pred[:,l] = sp.special.expit(alpha_m1.iloc[:,0] + alpha_m1.iloc[:,1] * census88.v_prev[l] +
a_m1.iloc[:,census88.state[l]-1] + b_m1.iloc[:,census88.edu[l]-1] + c_m1.iloc[:,census88.sex[l]-1] +
d_m1.iloc[:,census88.age[l]-1] + e_m1.iloc[:,census88.race[l]-1])
y_pred_cond[:,l] = sp.special.expit(alpha_m2.iloc[:,0] + alpha_m2.iloc[:,1] * census88.v_prev[l] +
a_m2.iloc[:,census88.state[l]-1] + b_m2.iloc[:,census88.edu[l]-1] + c_m2.iloc[:,census88.sex[l]-1] +
d_m2.iloc[:,census88.age[l]-1] + e_m2.iloc[:,census88.race[l]-1])
# Convert to unconditional probabilities:
y_bush = y_pred_cond * y_pred
y_non_bush = (1 - y_pred_cond) * y_pred
y_non = (1 - y_pred)
# Normalized:
y_bush_norm = y_bush / (y_bush + y_non_bush)
y_non_bush_norm = y_non_bush / (y_bush + y_non_bush)
# average over strata within each state
y_pred_state = np.full((int((n_iter / 2) * n_chains),n_state), np.nan)
for j in range(1,n_state+1):
sel = [s for s in range(L) if census88.state[s] == j]
y_pred_state[:,j-1] = np.divide((np.dot(y_bush_norm[:,sel],(census88[census88.state == j]).N)),sum((census88[census88.state == j]).N))
y_pred_state = pd.DataFrame(y_pred_state)
y_pred_state_bush = np.full((int((n_iter / 2) * n_chains),n_state), np.nan)
for j in range(1,n_state+1):
sel = [s for s in range(L) if census88.state[s] == j]
y_pred_state_bush[:,j-1] = np.divide((np.dot(y_bush[:,sel],(census88[census88.state == j]).N)),sum((census88[census88.state == j]).N))
y_pred_state_bush = pd.DataFrame(y_pred_state_bush)
y_pred_state_non_bush = np.full((int((n_iter / 2) * n_chains),n_state), np.nan)
for j in range(1,n_state+1):
sel = [s for s in range(L) if census88.state[s] == j]
y_pred_state_non_bush[:,j-1] = np.divide((np.dot(y_non_bush[:,sel],(census88[census88.state == j]).N)),sum((census88[census88.state == j]).N))
y_pred_state_non_bush = pd.DataFrame(y_pred_state_non_bush)
y_pred_state_non = np.full((int((n_iter / 2) * n_chains),n_state), np.nan)
for j in range(1,n_state+1):
sel = [s for s in range(L) if census88.state[s] == j]
y_pred_state_non[:,j-1] = np.divide((np.dot(y_non[:,sel],(census88[census88.state == j]).N)),sum((census88[census88.state == j]).N))
y_pred_state_non = | pd.DataFrame(y_pred_state_non) | pandas.DataFrame |
import matplotlib
# matplotlib.use('Agg')
from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from pandas import datetime
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential,load_model
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
from matplotlib import pyplot
from numpy import array
import datetime
from matplotlib.dates import DateFormatter
from random import shuffle
import numpy as np
from scipy import stats
import os
import pickle
class Sensors:
units = {'MAIN_FILTER_IN_PRESSURE':'PSI','MAIN_FILTER_OIL_TEMP':'Celsius',
'MAIN_FILTER_OUT_PRESSURE':'PSI','OIL_RETURN_TEMPERATURE':'Celsius',
'TANK_FILTER_IN_PRESSURE':'PSI','TANK_FILTER_OUT_PRESSURE':'PSI',
'TANK_LEVEL':'Centimeter','TANK_TEMPERATURE':'Celsius','FT-202B':'Micrometer',
'FT-204B':'Micrometer','PT-203':'Micrometer','PT-204':'Micrometer'}
sensor_name_acronym = {'MAIN_FILTER_IN_PRESSURE':'P1','MAIN_FILTER_OIL_TEMP':'T1',
'MAIN_FILTER_OUT_PRESSURE':'PSI','OIL_RETURN_TEMPERATURE':'T2',
'TANK_FILTER_IN_PRESSURE':'PSI','TANK_FILTER_OUT_PRESSURE':'PSI',
'TANK_LEVEL':'L1','TANK_TEMPERATURE':'T3','FT-202B':'V1',
'FT-204B':'V2','PT-203':'V3','PT-204':'V4'}
threshold = {'MAIN_FILTER_IN_PRESSURE': (40, 65, 80), 'MAIN_FILTER_OIL_TEMP': (40, 55, 60),
'MAIN_FILTER_OUT_PRESSURE': 'PSI', 'OIL_RETURN_TEMPERATURE': (40, 55, 60),
'TANK_FILTER_IN_PRESSURE': 'PSI', 'TANK_FILTER_OUT_PRESSURE': 'PSI',
'TANK_LEVEL': (40, 48, 50), 'TANK_TEMPERATURE': (40, 55, 60), 'FT-202B': (0, 20, 50),
'FT-204B': (0, 10, 20), 'PT-203': (0, 20, 50), 'PT-204': (0, 10, 20)}
def __init__(self, dataset_path, sensor_name,sample_rate, root_path, n_epochs = 1, n_batch = 1,
save_info = 0, n_neurons = 1, run_on_local = 1, train = 1, n_lag = 1, n_seq = 1):
self.n_lag = n_lag
self.n_seq = n_seq
self.n_epochs = n_epochs
self.n_batch = n_batch
self.n_neurons = n_neurons
self.dataset_path = dataset_path
self.sensor_name = sensor_name
self.sample_rate = sample_rate
self.root_path = root_path
self.save_info = save_info
self.run_on_local = run_on_local
self.train = train
self.init_file_name()
# self.normality_test()
def get_units(self):
return self.units
def init_file_name(self):
# self.dataset_path = self.dataset_path + self.sample_rate + '/' + self.sensor_name + '.csv'
self.dataset_path = os.path.join(self.dataset_path, self.sample_rate, self.sensor_name + '.csv')
self.file_name = self.sensor_name + '-' + self.sample_rate
self.file_path = os.path.join(self.root_path, self.sensor_name, self.sample_rate, str(self.n_seq) + '_step')
def get_files(self, file_dir):
'''
Args:
file_dir: file directory
Returns:
list of file path
'''
dataset_path = []
for root, dirs, files in os.walk(file_dir):
for file in files:
dataset_path.append(os.path.join(root, file))
return dataset_path
# date-time parsing function for loading the dataset
def parser(self, x):
return datetime.strptime('190' + x, '%Y-%m')
# convert time series into supervised learning problem
def series_to_supervised(self, data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# create a differenced series
def difference(self, dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return | Series(diff) | pandas.Series |
"""
Test cases for the wiutils.summarizing.compute_general_count function.
"""
import numpy as np
import pandas as pd
import pytest
from wiutils.summarizing import compute_general_count
@pytest.fixture(scope="function")
def images():
return pd.DataFrame(
{
"deployment_id": ["001", "001", "001", "002", "003"],
"class": ["Mammalia", "Mammalia", "Mammalia", "Mammalia", "Mammalia"],
"order": ["Carnivora", "Carnivora", "Carnivora", "Carnivora", "Carnivora"],
"family": ["Felidae", "Mustelidae", "Mustelidae", "Mustelidae", "Canidae"],
"genus": ["Panthera", "Eira", "Eira", "Eira", np.nan],
"species": ["onca", "barbara", "barbara", "barbara", np.nan],
"number_of_objects": [1, 1, 1, 1, 4],
}
)
@pytest.fixture(scope="function")
def deployments():
return pd.DataFrame(
{"deployment_id": ["001", "002", "003"], "placename": ["AAA", "AAA", "BBB"]}
)
def test_deployment(images):
result = compute_general_count(images, groupby="deployment")
expected = pd.DataFrame(
{
"taxon": ["Canidae", "Eira barbara", "Panthera onca"],
"records": [4, 3, 1],
"deployments": [1, 2, 1],
}
)
pd.testing.assert_frame_equal(result, expected)
def test_location(images, deployments):
result = compute_general_count(images, deployments, groupby="location")
expected = pd.DataFrame(
{
"taxon": ["Canidae", "Eira barbara", "Panthera onca"],
"records": [4, 3, 1],
"locations": [1, 1, 1],
}
)
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = | pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) | pandas.date_range |
import argparse
from tqdm import trange
import requests
import os
import sys
import csv
import pandas as pd
from time import sleep
from datetime import datetime
# URLs to make api calls
BASE_URL = "https://metamon-api.radiocaca.com/usm-api"
TOKEN_URL = f"{BASE_URL}/login"
LIST_MONSTER_URL = f"{BASE_URL}/getWalletPropertyBySymbol"
CHANGE_FIGHTER_URL = f"{BASE_URL}/isFightMonster"
START_FIGHT_URL = f"{BASE_URL}/startBattle"
LIST_BATTLER_URL = f"{BASE_URL}/getBattelObjects"
WALLET_PROPERTY_LIST = f"{BASE_URL}/getWalletPropertyList"
LVL_UP_URL = f"{BASE_URL}/updateMonster"
MINT_EGG_URL = f"{BASE_URL}/composeMonsterEgg"
CHECK_BAG_URL = f"{BASE_URL}/checkBag"
def datetime_now():
return datetime.now().strftime("%m/%d/%Y %H:%M:%S")
def post_formdata(payload, url="", headers=None):
"""Method to send request to game"""
files = []
if headers is None:
headers = {}
for _ in range(5):
try:
# Add delay to avoid error from too many requests per second
sleep(1.1)
response = requests.request("POST",
url,
headers=headers,
data=payload,
files=files)
return response.json()
except:
continue
return {}
def get_battler_score(monster):
""" Get opponent's power score"""
return monster["sca"]
def picker_battler(monsters_list):
""" Picking opponent """
battlers = list(filter(lambda m: m["rarity"] == "N", monsters_list))
if len(battlers) == 0:
battlers = list(filter(lambda m: m["rarity"] == "R", monsters_list))
battler = battlers[0]
score_min = get_battler_score(battler)
for i in range(1, len(battlers)):
score = get_battler_score(battlers[i])
if score < score_min:
battler = battlers[i]
score_min = score
return battler
def pick_battle_level(level=1):
# pick highest league for given level
if 21 <= level <= 40:
return 2
if 41 <= level <= 60:
return 3
return 1
class MetamonPlayer:
def __init__(self,
address,
sign,
msg="LogIn",
auto_lvl_up=False,
output_stats=False):
self.no_enough_money = False
self.output_stats = output_stats
self.total_bp_num = 0
self.total_success = 0
self.total_fail = 0
self.mtm_stats_df = []
self.token = None
self.address = address
self.sign = sign
self.msg = msg
self.auto_lvl_up = auto_lvl_up
def init_token(self):
"""Obtain token for game session to perform battles and other actions"""
payload = {"address": self.address, "sign": self.sign, "msg": self.msg,
"network": "1", "clientType": "MetaMask"}
response = post_formdata(payload, TOKEN_URL)
if response.get("code") != "SUCCESS":
sys.stderr.write("Login failed, token is not initialized. Terminating\n")
sys.exit(-1)
self.token = response.get("data").get("accessToken")
def change_fighter(self, monster_id):
"""Switch to next metamon if you have few"""
payload = {
"metamonId": monster_id,
"address": self.address,
}
post_formdata(payload, CHANGE_FIGHTER_URL)
def list_battlers(self, monster_id, front=1):
"""Obtain list of opponents"""
payload = {
"address": self.address,
"metamonId": monster_id,
"front": front,
}
headers = {
"accessToken": self.token,
}
response = post_formdata(payload, LIST_BATTLER_URL, headers)
return response.get("data", {}).get("objects")
def start_fight(self,
my_monster,
target_monster_id,
loop_count=1):
""" Main method to initiate battles (as many as monster has energy for)"""
success = 0
fail = 0
total_bp_fragment_num = 0
mtm_stats = []
my_monster_id = my_monster.get("id")
my_monster_token_id = my_monster.get("tokenId")
my_level = my_monster.get("level")
my_power = my_monster.get("sca")
battle_level = pick_battle_level(my_level)
tbar = trange(loop_count)
tbar.set_description(f"Fighting with {my_monster_token_id}...")
for _ in tbar:
payload = {
"monsterA": my_monster_id,
"monsterB": target_monster_id,
"address": self.address,
"battleLevel": battle_level,
}
headers = {
"accessToken": self.token,
}
response = post_formdata(payload, START_FIGHT_URL, headers)
code = response.get("code")
if code == "BATTLE_NOPAY":
self.no_enough_money = True
break
data = response.get("data", {})
if data is None:
print(f"Metamon {my_monster_id} cannot fight skipping...")
break
fight_result = data.get("challengeResult", False)
bp_fragment_num = data.get("bpFragmentNum", 10)
if self.auto_lvl_up:
# Try to lvl up
res = post_formdata({"nftId": my_monster_id, "address": self.address},
LVL_UP_URL,
headers)
code = res.get("code")
if code == "SUCCESS":
tbar.set_description(f"LVL UP successful! Continue fighting with {my_monster_token_id}...")
my_level += 1
# Update league level if new level is 21 or 41
battle_level = pick_battle_level(my_level)
self.total_bp_num += bp_fragment_num
total_bp_fragment_num += bp_fragment_num
if fight_result:
success += 1
self.total_success += 1
else:
fail += 1
self.total_fail += 1
mtm_stats.append({
"My metamon id": my_monster_token_id,
"League lvl": battle_level,
"Total battles": loop_count,
"My metamon power": my_power,
"My metamon level": my_level,
"Victories": success,
"Defeats": fail,
"Total egg shards": total_bp_fragment_num,
"Timestamp": datetime_now()
})
mtm_stats_df = | pd.DataFrame(mtm_stats) | pandas.DataFrame |
#!/usr/bin/env python
"""
The :py:mod:`~pacman2020` module contains two classes,
:py:class:`~pacman2020.PACManPipeline` and :py:class:`~pacman2020.PACManTrain`,
which are designed to facilitate the process of text pre-processing, training,
testing, and applying the model to unclassified proposals.
"""
from collections import defaultdict
import glob
import logging
import os
import time
import dask
from dask.diagnostics import ProgressBar
import joblib
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
import tqdm
from utils.tokenizer import PACManTokenizer
logging.basicConfig(format='%(levelname)-4s '
'[%(module)s.%(funcName)s:%(lineno)d]'
' %(message)s')
LOG = logging.getLogger('pacman2020')
LOG.setLevel(logging.INFO)
class PACManPipeline(PACManTokenizer):
""" This class provides functionality for classifying new proposals.
Parameters
----------
cycle : int
The HST Cycle number to analyze
model_name : str
The name of one of the trained models in the ~/PACman_dist/models
directory.
"""
def __init__(self, cycle=None, model_name=''):
super().__init__()
self._base = os.path.join(
'/',
*os.path.dirname(os.path.abspath(__file__)).split('/')
)
self._unclassified_dir = os.path.join(
self.base,
'unclassified_proposals'
)
self._model_name = os.path.join(
self.base,
'models',
model_name
)
self._results_dir = os.path.join(
self.base,
'model_results'
)
self._cycle = cycle
self._proposal_data = {}
self._encoder = LabelEncoder()
self._model = None
@property
def base(self):
"""Base path of the PACMan package"""
return self._base
@base.setter
def base(self, value):
self._base = value
@property
def cycle(self):
"""Proposal cycle we are analyzing"""
return self._cycle
@cycle.setter
def cycle(self, value):
self._cycle = value
@property
def encoder(self):
"""Encoder used by the classifier"""
return self._encoder
@encoder.setter
def encoder(self, value):
self._encoder = value
@property
def model(self):
"""Pre-trained classifier"""
return self._model
@model.setter
def model(self, value):
self._model = value
@property
def model_name(self):
"""Absolute path of the pre-trained model"""
return self._model_name
@model_name.setter
def model_name(self, value):
self._model_name = value
@property
def results_dir(self):
"""Directory where results from the model are stored"""
return self._results_dir
@results_dir.setter
def results_dir(self, value):
self._results_dir = value
@property
def proposal_data(self):
"""`py:class:dict` for storing the DataFrame of proposal data"""
return self._proposal_data
@proposal_data.setter
def proposal_data(self, value):
self._proposal_data = value
@property
def unclassified_dir(self):
"""Directory containing unclassified proposals"""
return self._unclassified_dir
@unclassified_dir.setter
def unclassified_dir(self, value):
self._unclassified_dir = value
def apply_model(self, df, training=False):
""" Apply the model to make predictions on input data
Parameters
----------
df
Returns
-------
"""
X = df['cleaned_text']
self.predictions = self.model.predict(X)
self.predicition_probabilities = self.model.predict_proba(X)
if training:
self.model_results = df.loc[
:,
['fname',
'hand_classification',
'encoded_hand_classification']
]
else:
self.model_results = df.loc[:, ['fname']]
# Add the encoded model classifications to the DataFrame
self.model_results['encoded_model_classification'] = self.predictions
# Add the decoded model classifications
self.model_results['model_classification'] = \
self.encoder.inverse_transform(self.predictions)
# Now we need to add the probabilities for each class
for i, classname in enumerate(self.encoder.classes_):
colname = f"{self.encoder.classes_[i].replace(' ', '_')}_prob"
self.model_results[colname] = self.predicition_probabilities[:, i]
def save_model_results(self, fout=None, training=False):
""" Save the classification results to file
Parameters
----------
fout : str
Filename for output file. Defaults to the name of model and the
proposal cycle number.
training : bool
If True, then the results are saved in the training sub directory.
If False, then the results are saved in the production sub
directory.
Returns
-------
"""
if fout is None:
fout = f"{self.model_name.split('.')[0]}_results_cy{self.cycle}.txt"
if training:
fout = os.path.join(
self.results_dir,
'training',
fout
)
else:
fout = os.path.join(
self.results_dir,
'production',
fout
)
self.model_results.to_csv(fout, header=True, index=False)
def load_model(self, model_name=None):
""" Load the production model for PACman
Parameters
----------
model_file
Returns
-------
"""
if model_name is not None:
self.model_name = model_name
LOG.info(f"Loading model stored at \n {self.model_name}")
self.model = joblib.load(self.model_name)
LOG.info(f"Loading encoder information...")
classes = np.load(
self.model_name.replace('.joblib','_encoder_classes.npy'),
allow_pickle=True
)
self.encoder.classes_ = classes
def preprocess(self, flist, parallel=False):
""" Perform the necessary pre-processing steps
Parameters
----------
flist : list
Description
"""
if self.stop_words is None:
self.get_stop_words(fname=self.stop_words_file)
st = time.time()
data = defaultdict(list)
if parallel:
delayed_obj = [
dask.delayed(self.run_tokenization)(fname=f, plot=False)
for f in flist
]
with ProgressBar():
results = dask.compute(
*delayed_obj,
scheduler='threads',
num_workers=4
)
else:
results = [
self.run_tokenization(fname=f, plot=False)
for f in tqdm.tqdm(flist)
]
for i, (text, cleaned_text, tokens) in enumerate(results):
data['text'].append(text)
data['cleaned_text'].append(cleaned_text)
data['fname'].append(flist[i])
# Parse the proposal number from the file name
# TODO: Find a better way to extract numbers out of the filename
try:
proposal_num = int(
flist[i].split('/')[-1].split('_')[0]
)
except ValueError:
proposal_num = int(
flist[i].split('/')[-1].split('_')[0].split('.')[0]
)
data['proposal_num'].append(proposal_num)
df = pd.DataFrame(data)
et = time.time()
duration = (et - st)/60
LOG.info(f"Total time for preprocessing: {duration:.3f}")
return df
def read_unclassified_data(self, cycle=None, parallel=False, N=None):
""" Read in the data for the specified cycle and perform preprocessing
Parameters
----------
cycle
parallel
Returns
-------
"""
if cycle is not None:
self.cycle = cycle
path_to_data = os.path.join(
self.unclassified_dir,
f"corpus_cy{self.cycle}"
)
flist = glob.glob(
f"{path_to_data}/*parsed_text.txt")
if N is None:
N = len(flist)
LOG.info(
(f"Reading in {N} proposals...\n"
f"Data Directory: {path_to_data}")
)
df = self.preprocess(flist=flist[:N], parallel=parallel)
self.proposal_data[f"cycle_{self.cycle}"] = df
class PACManTrain(PACManPipeline):
""" This class provides the functionality required for training a model
The core functionality is as follows,
* Read in multiple cycles worth of proposals and perform the
necessary pre-processing steps for text data
* Generate an encoding for mapping category names to integer
labels
* Create a scikit-learn Pipeline object for vectorizing training
data and feeding it into a multi-class classification model
* Write the trained model and encoder out file
Parameters
----------
cycles_to_analyze : list
A list of integers mapping to proposal cycles. Each proposal in the
list will be processed.
"""
def __init__(self, cycles_to_analyze=[24, 25]):
PACManPipeline.__init__(self)
# PACManTokenizer.__init__(self)
self.training_dir = os.path.join(
self.base,
'training_data'
)
self.cycles_to_analyze = cycles_to_analyze
self.proposal_data = {}
self.encoders = {}
def read_training_data(self, parallel=False):
""" Read in training data
For each cycle, read in and pre-process the proposals training corpora.
Note that in order for data to be used for training, it must have a
cycle_N_hand_classifications.txt file located in the same directory
as the training corpora.
Returns
-------
"""
# First, read in our custom list of stop words using the
# get_stop_words() method of the PACManTokenizer object
for cycle in self.cycles_to_analyze:
path_to_data = os.path.join(
self.training_dir,
f"training_corpus_cy{cycle}"
)
flist = glob.glob(
f"{path_to_data}/*training.txt")
N = len(flist)
LOG.info(
(f"Reading in {N} proposals...\n"
f"Data Directory: {path_to_data}")
)
df = self.preprocess(flist=flist, parallel=parallel)
hand_classifications = pd.read_csv(
f"{path_to_data}/cycle_{cycle}_hand_classifications.txt"
)
merged_df = | pd.merge(df, hand_classifications, on='proposal_num') | pandas.merge |
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from floris.utilities import wrap_360
from flasc.dataframe_operations import dataframe_manipulations as dfm
from flasc import floris_tools as ftools
from flasc.energy_ratio import energy_ratio_suite
from flasc.visualization import plot_floris_layout
def load_data():
# Load dataframe with artificial SCADA data
root_dir = os.path.dirname(os.path.abspath(__file__))
ftr_path = os.path.join(
root_dir, '..', 'demo_dataset', 'demo_dataset_scada_60s.ftr'
)
if not os.path.exists(ftr_path):
raise FileNotFoundError('Please run ./examples/demo_dataset/' +
'generate_demo_dataset.py before try' +
'ing any of the other examples.')
df = pd.read_feather(ftr_path)
return df
def load_floris():
# Load the FLORIS model for the artificial wind farm
from floris import tools as wfct
print('Initializing the FLORIS object for our demo wind farm')
file_path = os.path.dirname(os.path.abspath(__file__))
fi_path = os.path.join(file_path, "../demo_dataset/demo_floris_input.yaml")
fi = wfct.floris_interface.FlorisInterface(fi_path)
return fi
def get_energy_ratio(df, ti, wd_bins):
# Calculate and plot energy ratios
s = energy_ratio_suite.energy_ratio_suite(verbose=False)
s.add_df(df, 'Raw data (wind direction calibrated)')
return s.get_energy_ratios(
test_turbines=ti,
ws_bins=[[6.0, 10.0]],
wd_bins=wd_bins,
N=1,
percentiles=[5., 95.],
verbose=False,
balance_bins_between_dfs=False,
)
def _process_single_wd(wd, wd_bin_width, turb_wd_measurement, df_upstream, df):
# In this function, we calculate the energy ratios of all upstream
# turbines for a single wind direction bin and single wind speed bin.
# The difference in energy ratios between different upstream turbines
# gives a strong indication of the heterogeneity in the inflow wind
# speeds for that mean inflow wind direction.
print("Processing wind direction = {:.1f} deg.".format(wd))
wd_bins = [[wd - wd_bin_width / 2.0, wd + wd_bin_width / 2.0]]
# Determine which turbines are upstream
if wd > df_upstream.iloc[0]["wd_max"]:
turbine_array = df_upstream.loc[
(wd > df_upstream["wd_min"]) & (wd <= df_upstream["wd_max"]),
"turbines"
].values[0]
# deal with wd = 0 deg (or close to 0.0)
else:
turbine_array = df_upstream.loc[
(wrap_360(wd + 180) > wrap_360(df_upstream["wd_min"] + 180.0)) &
(wrap_360(wd + 180) <= wrap_360(df_upstream["wd_max"] + 180)),
"turbines"
].values[0]
# Load data and limit region
df = df.copy()
pow_cols = ["pow_{:03d}".format(t) for t in turbine_array]
df = df.dropna(subset=pow_cols)
# Filter dataframe and set a reference wd and ws
df = dfm.set_wd_by_turbines(df, turb_wd_measurement)
df = dfm.filter_df_by_wd(df, [wd - wd_bin_width, wd + wd_bin_width])
df = dfm.set_ws_by_turbines(df, turbine_array)
df = dfm.filter_df_by_ws(df, [6, 10])
# Set reference power for df and df_fi as the average power
# of all upstream turbines
df = dfm.set_pow_ref_by_turbines(df, turbine_array)
results_scada = []
for ti in turbine_array:
# Get energy ratios
er = get_energy_ratio(df, ti, wd_bins)
results_scada.append(er[0]["er_results"].loc[0])
results_scada = pd.concat(results_scada, axis=1).T
energy_ratios = np.array(results_scada["baseline"], dtype=float)
energy_ratios_lb = np.array(results_scada["baseline_lb"], dtype=float)
energy_ratios_ub = np.array(results_scada["baseline_ub"], dtype=float)
return pd.DataFrame({
"wd": [wd],
"wd_bin_width": [wd_bin_width],
"upstream_turbines": [turbine_array],
"energy_ratios": [energy_ratios],
"energy_ratios_lb": [energy_ratios_lb],
"energy_ratios_ub": [energy_ratios_ub],
"ws_ratios": [energy_ratios**(1/3)],
})
def _plot_single_wd(df):
fig, ax = plt.subplots()
turbine_array = df.loc[0, "upstream_turbines"]
x = range(len(turbine_array))
ax.fill_between(
x,
df.loc[0, "energy_ratios_lb"],
df.loc[0, "energy_ratios_ub"],
color="k",
alpha=0.30
)
ax.plot(x, df.loc[0, "energy_ratios"], "-o", color='k', label="SCADA")
ax.grid(True)
ax.set_xticks(x)
ax.set_xticklabels(["T{:03d}".format(t) for t in turbine_array])
ax.set_ylabel("Energy ratio of upstream turbines w.r.t. the average (-)")
ax.set_title("Wind direction = {:.2f} deg.".format(df.loc[0, "wd"]))
ax.set_ylim([0.85, 1.20])
return fig, ax
if __name__ == "__main__":
# Load FLORIS and plot the layout
fi = load_floris()
plot_floris_layout(fi, plot_terrain=False)
# Load the SCADA data
df_full = load_data()
# Now specify which turbines we want to use in the analysis. Basically,
# we want to use all the turbines besides the ones that we know have
# an unreliable wind direction measurement. Here, for explanation purposes,
# we just exclude turbine 3 from our analysis.
nturbs = len(fi.layout_x)
bad_turbs = [3] # Just hypothetical situation: assume turbine 3 gave faulty wind directions so we ignore it
turb_wd_measurement = [i for i in range(nturbs) if i not in bad_turbs]
# We use a wind direction bin width of 15 deg. Thus, if we look at
# heterogeneity with winds coming from the west (270 deg), then we
# use all data reporting a wind direction measurement between 262.5
# and 277.5 deg, when we have a wd_bin_width of 15.0 deg.
wd_bin_width = 15.0
# Now calculate which turbines are upstream and for what wind directions,
# using a very simplified model as part of FLASC.
df_upstream = ftools.get_upstream_turbs_floris(fi, wake_slope=0.3)
# Finally, for various wind directions, calculate the energy ratios of
# all upstream turbines. That gives a good idea of the heterogeneity
# in the inflow wind speeds. Namely, turbines that consistently see
# a higher energy ratio, also likely consistently see a higher wind speed.
df_list = []
for wd in np.arange(0.0, 360.0, 15.0):
df = _process_single_wd(wd, wd_bin_width, turb_wd_measurement, df_upstream, df_full)
fig, ax = _plot_single_wd(df) # Plot the results
df_list.append(df)
# Finally merge the results to a single dataframe and print
df = | pd.concat(df_list) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.