prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 23 11:11:57 2018
@author: kazuki.onodera
-d- -> /
-x- -> *
-p- -> +
-m- -> -
nohup python -u 000.py 0 > LOG/log_000.py_0.txt &
nohup python -u 000.py 1 > LOG/log_000.py_1.txt &
nohup python -u 000.py 2 > LOG/log_000.py_2.txt &
nohup python -u 000.py 3 > LOG/log_000.py_3.txt &
nohup python -u 000.py 4 > LOG/log_000.py_4.txt &
nohup python -u 000.py 5 > LOG/log_000.py_5.txt &
nohup python -u 000.py 6 > LOG/log_000.py_6.txt &
"""
import numpy as np
import pandas as pd
from multiprocessing import Pool, cpu_count
NTHREAD = cpu_count()
from itertools import combinations
from tqdm import tqdm
import sys
argv = sys.argv
import os, utils, gc
utils.start(__file__)
#==============================================================================
folders = [
# '../data',
'../feature', '../feature_unused',
# '../feature_var0', '../feature_corr1'
]
for fol in folders:
os.system(f'rm -rf {fol}')
os.system(f'mkdir {fol}')
col_app_money = ['app_AMT_INCOME_TOTAL', 'app_AMT_CREDIT', 'app_AMT_ANNUITY', 'app_AMT_GOODS_PRICE']
col_app_day = ['app_DAYS_BIRTH', 'app_DAYS_EMPLOYED', 'app_DAYS_REGISTRATION', 'app_DAYS_ID_PUBLISH', 'app_DAYS_LAST_PHONE_CHANGE']
def get_trte():
usecols = ['SK_ID_CURR', 'AMT_INCOME_TOTAL', 'AMT_CREDIT', 'AMT_ANNUITY', 'AMT_GOODS_PRICE']
usecols += ['DAYS_BIRTH', 'DAYS_EMPLOYED', 'DAYS_REGISTRATION', 'DAYS_ID_PUBLISH', 'DAYS_LAST_PHONE_CHANGE']
rename_di = {
'AMT_INCOME_TOTAL': 'app_AMT_INCOME_TOTAL',
'AMT_CREDIT': 'app_AMT_CREDIT',
'AMT_ANNUITY': 'app_AMT_ANNUITY',
'AMT_GOODS_PRICE': 'app_AMT_GOODS_PRICE',
'DAYS_BIRTH': 'app_DAYS_BIRTH',
'DAYS_EMPLOYED': 'app_DAYS_EMPLOYED',
'DAYS_REGISTRATION': 'app_DAYS_REGISTRATION',
'DAYS_ID_PUBLISH': 'app_DAYS_ID_PUBLISH',
'DAYS_LAST_PHONE_CHANGE': 'app_DAYS_LAST_PHONE_CHANGE',
}
trte = pd.concat([pd.read_csv('../input/application_train.csv.zip', usecols=usecols).rename(columns=rename_di),
pd.read_csv('../input/application_test.csv.zip', usecols=usecols).rename(columns=rename_di)],
ignore_index=True)
return trte
def prep_prev(df):
df['AMT_APPLICATION'].replace(0, np.nan, inplace=True)
df['AMT_CREDIT'].replace(0, np.nan, inplace=True)
df['CNT_PAYMENT'].replace(0, np.nan, inplace=True)
df['AMT_DOWN_PAYMENT'].replace(np.nan, 0, inplace=True)
df.loc[df['NAME_CONTRACT_STATUS']!='Approved', 'AMT_DOWN_PAYMENT'] = np.nan
df['RATE_DOWN_PAYMENT'].replace(np.nan, 0, inplace=True)
df.loc[df['NAME_CONTRACT_STATUS']!='Approved', 'RATE_DOWN_PAYMENT'] = np.nan
# df['xxx'].replace(0, np.nan, inplace=True)
# df['xxx'].replace(0, np.nan, inplace=True)
return
p = int(argv[1])
if True:
#def multi(p):
if p==0:
# =============================================================================
# application
# =============================================================================
def f1(df):
df['CODE_GENDER'] = 1 - (df['CODE_GENDER']=='F')*1 # 4 'XNA' are converted to 'M'
df['FLAG_OWN_CAR'] = (df['FLAG_OWN_CAR']=='Y')*1
df['FLAG_OWN_REALTY'] = (df['FLAG_OWN_REALTY']=='Y')*1
df['EMERGENCYSTATE_MODE'] = (df['EMERGENCYSTATE_MODE']=='Yes')*1
df['AMT_CREDIT-d-AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-d-AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] / df['AMT_INCOME_TOTAL']
df['AMT_CREDIT-d-AMT_ANNUITY'] = df['AMT_CREDIT'] / df['AMT_ANNUITY'] # how long should user pay?(month)
df['AMT_GOODS_PRICE-d-AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] / df['AMT_ANNUITY']# how long should user pay?(month)
df['AMT_GOODS_PRICE-d-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['AMT_CREDIT']
df['AMT_GOODS_PRICE-m-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] - df['AMT_CREDIT']
df['AMT_GOODS_PRICE-m-AMT_CREDIT-d-AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE-m-AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['age_finish_payment'] = df['DAYS_BIRTH'].abs() + (df['AMT_CREDIT-d-AMT_ANNUITY']*30)
# df['age_finish_payment'] = (df['DAYS_BIRTH']/-365) + df['credit-d-annuity']
df.loc[df['DAYS_EMPLOYED']==365243, 'DAYS_EMPLOYED'] = np.nan
df['DAYS_EMPLOYED-m-DAYS_BIRTH'] = df['DAYS_EMPLOYED'] - df['DAYS_BIRTH']
df['DAYS_REGISTRATION-m-DAYS_BIRTH'] = df['DAYS_REGISTRATION'] - df['DAYS_BIRTH']
df['DAYS_ID_PUBLISH-m-DAYS_BIRTH'] = df['DAYS_ID_PUBLISH'] - df['DAYS_BIRTH']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_BIRTH'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_BIRTH']
df['DAYS_REGISTRATION-m-DAYS_EMPLOYED'] = df['DAYS_REGISTRATION'] - df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-m-DAYS_EMPLOYED'] = df['DAYS_ID_PUBLISH'] - df['DAYS_EMPLOYED']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_EMPLOYED'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-m-DAYS_REGISTRATION'] = df['DAYS_ID_PUBLISH'] - df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_REGISTRATION'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_ID_PUBLISH'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_ID_PUBLISH']
col = ['DAYS_EMPLOYED-m-DAYS_BIRTH',
'DAYS_REGISTRATION-m-DAYS_BIRTH',
'DAYS_ID_PUBLISH-m-DAYS_BIRTH',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_BIRTH',
'DAYS_REGISTRATION-m-DAYS_EMPLOYED',
'DAYS_ID_PUBLISH-m-DAYS_EMPLOYED',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_EMPLOYED',
'DAYS_ID_PUBLISH-m-DAYS_REGISTRATION',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_REGISTRATION',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_ID_PUBLISH'
]
col_comb = list(combinations(col, 2))
for i,j in col_comb:
df[f'{i}-d-{j}'] = df[i] / df[j]
df['DAYS_EMPLOYED-d-DAYS_BIRTH'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['DAYS_REGISTRATION-d-DAYS_BIRTH'] = df['DAYS_REGISTRATION'] / df['DAYS_BIRTH']
df['DAYS_ID_PUBLISH-d-DAYS_BIRTH'] = df['DAYS_ID_PUBLISH'] / df['DAYS_BIRTH']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_BIRTH'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['DAYS_REGISTRATION-d-DAYS_EMPLOYED'] = df['DAYS_REGISTRATION'] / df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-d-DAYS_EMPLOYED'] = df['DAYS_ID_PUBLISH'] / df['DAYS_EMPLOYED']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_EMPLOYED'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-d-DAYS_REGISTRATION'] = df['DAYS_ID_PUBLISH'] / df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_REGISTRATION'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_ID_PUBLISH'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_ID_PUBLISH']
df['OWN_CAR_AGE-d-DAYS_BIRTH'] = (df['OWN_CAR_AGE']*(-365)) / df['DAYS_BIRTH']
df['OWN_CAR_AGE-m-DAYS_BIRTH'] = df['DAYS_BIRTH'] + (df['OWN_CAR_AGE']*365)
df['OWN_CAR_AGE-d-DAYS_EMPLOYED'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['OWN_CAR_AGE-m-DAYS_EMPLOYED'] = df['DAYS_EMPLOYED'] + (df['OWN_CAR_AGE']*365)
df['cnt_adults'] = df['CNT_FAM_MEMBERS'] - df['CNT_CHILDREN']
df['CNT_CHILDREN-d-CNT_FAM_MEMBERS'] = df['CNT_CHILDREN'] / df['CNT_FAM_MEMBERS']
df['income_per_adult'] = df['AMT_INCOME_TOTAL'] / df['cnt_adults']
# df.loc[df['CNT_CHILDREN']==0, 'CNT_CHILDREN'] = np.nan
df['AMT_INCOME_TOTAL-d-CNT_CHILDREN'] = df['AMT_INCOME_TOTAL'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_CREDIT-d-CNT_CHILDREN'] = df['AMT_CREDIT'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_ANNUITY-d-CNT_CHILDREN'] = df['AMT_ANNUITY'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_GOODS_PRICE-d-CNT_CHILDREN'] = df['AMT_GOODS_PRICE'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_INCOME_TOTAL-d-cnt_adults'] = df['AMT_INCOME_TOTAL'] / df['cnt_adults']
df['AMT_CREDIT-d-cnt_adults'] = df['AMT_CREDIT'] / df['cnt_adults']
df['AMT_ANNUITY-d-cnt_adults'] = df['AMT_ANNUITY'] / df['cnt_adults']
df['AMT_GOODS_PRICE-d-cnt_adults'] = df['AMT_GOODS_PRICE'] / df['cnt_adults']
df['AMT_INCOME_TOTAL-d-CNT_FAM_MEMBERS'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['AMT_CREDIT-d-CNT_FAM_MEMBERS'] = df['AMT_CREDIT'] / df['CNT_FAM_MEMBERS']
df['AMT_ANNUITY-d-CNT_FAM_MEMBERS'] = df['AMT_ANNUITY'] / df['CNT_FAM_MEMBERS']
df['AMT_GOODS_PRICE-d-CNT_FAM_MEMBERS'] = df['AMT_GOODS_PRICE'] / df['CNT_FAM_MEMBERS']
# EXT_SOURCE_x
df['EXT_SOURCES_prod'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']
df['EXT_SOURCES_sum'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].sum(axis=1)
df['EXT_SOURCES_sum'] = df['EXT_SOURCES_sum'].fillna(df['EXT_SOURCES_sum'].mean())
df['EXT_SOURCES_mean'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
df['EXT_SOURCES_mean'] = df['EXT_SOURCES_mean'].fillna(df['EXT_SOURCES_mean'].mean())
df['EXT_SOURCES_std'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
df['EXT_SOURCES_std'] = df['EXT_SOURCES_std'].fillna(df['EXT_SOURCES_std'].mean())
df['EXT_SOURCES_1-2-3'] = df['EXT_SOURCE_1'] - df['EXT_SOURCE_2'] - df['EXT_SOURCE_3']
df['EXT_SOURCES_2-1-3'] = df['EXT_SOURCE_2'] - df['EXT_SOURCE_1'] - df['EXT_SOURCE_3']
df['EXT_SOURCES_1-2'] = df['EXT_SOURCE_1'] - df['EXT_SOURCE_2']
df['EXT_SOURCES_2-3'] = df['EXT_SOURCE_2'] - df['EXT_SOURCE_3']
df['EXT_SOURCES_1-3'] = df['EXT_SOURCE_1'] - df['EXT_SOURCE_3']
# =========
# https://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features/code
# =========
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']
# =========
# https://www.kaggle.com/poohtls/fork-of-fork-lightgbm-with-simple-features/code
# =========
docs = [_f for _f in df.columns if 'FLAG_DOC' in _f]
live = [_f for _f in df.columns if ('FLAG_' in _f) & ('FLAG_DOC' not in _f) & ('_FLAG_' not in _f)]
inc_by_org = df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE').median()['AMT_INCOME_TOTAL']
df['alldocs_kurt'] = df[docs].kurtosis(axis=1)
df['alldocs_skew'] = df[docs].skew(axis=1)
df['alldocs_mean'] = df[docs].mean(axis=1)
df['alldocs_sum'] = df[docs].sum(axis=1)
df['alldocs_std'] = df[docs].std(axis=1)
df['NEW_LIVE_IND_SUM'] = df[live].sum(axis=1)
df['NEW_INC_PER_CHLD'] = df['AMT_INCOME_TOTAL'] / (1 + df['CNT_CHILDREN'])
df['NEW_INC_BY_ORG'] = df['ORGANIZATION_TYPE'].map(inc_by_org)
df['NEW_ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / (1 + df['AMT_INCOME_TOTAL'])
df['NEW_CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']
df['NEW_CAR_TO_EMPLOY_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['NEW_PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['NEW_PHONE_TO_EMPLOYED_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
# =============================================================================
# Maxwell features
# =============================================================================
bdg_avg = df.filter(regex='_AVG$').columns
bdg_mode = df.filter(regex='_MODE$').columns
bdg_medi = df.filter(regex='_MEDI$').columns[:len(bdg_avg)] # ignore FONDKAPREMONT_MODE...
df['building_score_avg_mean'] = df[bdg_avg].mean(1)
df['building_score_avg_std'] = df[bdg_avg].std(1)
df['building_score_avg_sum'] = df[bdg_avg].sum(1)
df['building_score_mode_mean'] = df[bdg_mode].mean(1)
df['building_score_mode_std'] = df[bdg_mode].std(1)
df['building_score_mode_sum'] = df[bdg_mode].sum(1)
df['building_score_medi_mean'] = df[bdg_medi].mean(1)
df['building_score_medi_std'] = df[bdg_medi].std(1)
df['building_score_medi_sum'] = df[bdg_medi].sum(1)
df['maxwell_feature_1'] = (df['EXT_SOURCE_1'] * df['EXT_SOURCE_3']) ** (1 / 2)
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
return
df = pd.read_csv('../input/application_train.csv.zip')
f1(df)
utils.to_pickles(df, '../data/train', utils.SPLIT_SIZE)
utils.to_pickles(df[['TARGET']], '../data/label', utils.SPLIT_SIZE)
df = pd.read_csv('../input/application_test.csv.zip')
f1(df)
utils.to_pickles(df, '../data/test', utils.SPLIT_SIZE)
df[['SK_ID_CURR']].to_pickle('../data/sub.p')
elif p==1:
# =============================================================================
# prev
# =============================================================================
"""
df = utils.read_pickles('../data/previous_application')
"""
df = pd.merge(pd.read_csv('../data/prev_new_v4.csv.gz'),
get_trte(), on='SK_ID_CURR', how='left')
# df = pd.merge(pd.read_csv('../input/previous_application.csv.zip'),
# get_trte(), on='SK_ID_CURR', how='left')
prep_prev(df)
df['FLAG_LAST_APPL_PER_CONTRACT'] = (df['FLAG_LAST_APPL_PER_CONTRACT']=='Y')*1
# day
for c in ['DAYS_FIRST_DRAWING', 'DAYS_FIRST_DUE', 'DAYS_LAST_DUE_1ST_VERSION',
'DAYS_LAST_DUE', 'DAYS_TERMINATION']:
df.loc[df[c]==365243, c] = np.nan
df['days_fdue-m-fdrw'] = df['DAYS_FIRST_DUE'] - df['DAYS_FIRST_DRAWING']
df['days_ldue1-m-fdrw'] = df['DAYS_LAST_DUE_1ST_VERSION'] - df['DAYS_FIRST_DRAWING']
df['days_ldue-m-fdrw'] = df['DAYS_LAST_DUE'] - df['DAYS_FIRST_DRAWING'] # total span
df['days_trm-m-fdrw'] = df['DAYS_TERMINATION'] - df['DAYS_FIRST_DRAWING']
df['days_ldue1-m-fdue'] = df['DAYS_LAST_DUE_1ST_VERSION'] - df['DAYS_FIRST_DUE']
df['days_ldue-m-fdue'] = df['DAYS_LAST_DUE'] - df['DAYS_FIRST_DUE']
df['days_trm-m-fdue'] = df['DAYS_TERMINATION'] - df['DAYS_FIRST_DUE']
df['days_ldue-m-ldue1'] = df['DAYS_LAST_DUE'] - df['DAYS_LAST_DUE_1ST_VERSION']
df['days_trm-m-ldue1'] = df['DAYS_TERMINATION'] - df['DAYS_LAST_DUE_1ST_VERSION']
df['days_trm-m-ldue'] = df['DAYS_TERMINATION'] - df['DAYS_LAST_DUE']
# money
df['total_debt'] = df['AMT_ANNUITY'] * df['CNT_PAYMENT']
df['AMT_CREDIT-d-total_debt'] = df['AMT_CREDIT'] / df['total_debt']
df['AMT_GOODS_PRICE-d-total_debt'] = df['AMT_GOODS_PRICE'] / df['total_debt']
df['AMT_GOODS_PRICE-d-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['AMT_CREDIT']
# app & money
df['AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-d-app_AMT_INCOME_TOTAL'] = df['AMT_APPLICATION'] / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-m-app_AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] - df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_INCOME_TOTAL'] = df['AMT_APPLICATION'] - df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] - df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] - df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_CREDIT'] = df['AMT_ANNUITY'] / df['app_AMT_CREDIT']
df['AMT_APPLICATION-d-app_AMT_CREDIT'] = df['AMT_APPLICATION'] / df['app_AMT_CREDIT']
df['AMT_CREDIT-d-app_AMT_CREDIT'] = df['AMT_CREDIT'] / df['app_AMT_CREDIT']
df['AMT_GOODS_PRICE-d-app_AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['app_AMT_CREDIT']
df['AMT_ANNUITY-m-app_AMT_CREDIT'] = df['AMT_ANNUITY'] - df['app_AMT_CREDIT']
df['AMT_APPLICATION-m-app_AMT_CREDIT'] = df['AMT_APPLICATION'] - df['app_AMT_CREDIT']
df['AMT_CREDIT-m-app_AMT_CREDIT'] = df['AMT_CREDIT'] - df['app_AMT_CREDIT']
df['AMT_GOODS_PRICE-m-app_AMT_CREDIT'] = df['AMT_GOODS_PRICE'] - df['app_AMT_CREDIT']
df['AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_ANNUITY'] = df['AMT_ANNUITY'] / df['app_AMT_ANNUITY']
df['AMT_APPLICATION-d-app_AMT_ANNUITY'] = df['AMT_APPLICATION'] / df['app_AMT_ANNUITY']
df['AMT_CREDIT-d-app_AMT_ANNUITY'] = df['AMT_CREDIT'] / df['app_AMT_ANNUITY']
df['AMT_GOODS_PRICE-d-app_AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] / df['app_AMT_ANNUITY']
df['AMT_ANNUITY-m-app_AMT_ANNUITY'] = df['AMT_ANNUITY'] - df['app_AMT_ANNUITY']
df['AMT_APPLICATION-m-app_AMT_ANNUITY'] = df['AMT_APPLICATION'] - df['app_AMT_ANNUITY']
df['AMT_CREDIT-m-app_AMT_ANNUITY'] = df['AMT_CREDIT'] - df['app_AMT_ANNUITY']
df['AMT_GOODS_PRICE-m-app_AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] - df['app_AMT_ANNUITY']
df['AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_GOODS_PRICE'] = df['AMT_ANNUITY'] / df['app_AMT_GOODS_PRICE']
df['AMT_APPLICATION-d-app_AMT_GOODS_PRICE'] = df['AMT_APPLICATION'] / df['app_AMT_GOODS_PRICE']
df['AMT_CREDIT-d-app_AMT_GOODS_PRICE'] = df['AMT_CREDIT'] / df['app_AMT_GOODS_PRICE']
df['AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE'] = df['AMT_GOODS_PRICE'] / df['app_AMT_GOODS_PRICE']
df['AMT_ANNUITY-m-app_AMT_GOODS_PRICE'] = df['AMT_ANNUITY'] - df['app_AMT_GOODS_PRICE']
df['AMT_APPLICATION-m-app_AMT_GOODS_PRICE'] = df['AMT_APPLICATION'] - df['app_AMT_GOODS_PRICE']
df['AMT_CREDIT-m-app_AMT_GOODS_PRICE'] = df['AMT_CREDIT'] - df['app_AMT_GOODS_PRICE']
df['AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE'] = df['AMT_GOODS_PRICE'] - df['app_AMT_GOODS_PRICE']
df['AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
# nejumi
f_name='nejumi'; init_rate=0.9; n_iter=500
df['AMT_ANNUITY_d_AMT_CREDIT_temp'] = df.AMT_ANNUITY / df.AMT_CREDIT
df[f_name] = df['AMT_ANNUITY_d_AMT_CREDIT_temp']*((1 + init_rate)**df.CNT_PAYMENT - 1)/((1 + init_rate)**df.CNT_PAYMENT)
for i in range(n_iter):
df[f_name] = df['AMT_ANNUITY_d_AMT_CREDIT_temp']*((1 + df[f_name])**df.CNT_PAYMENT - 1)/((1 + df[f_name])**df.CNT_PAYMENT)
df.drop(['AMT_ANNUITY_d_AMT_CREDIT_temp'], axis=1, inplace=True)
df.sort_values(['SK_ID_CURR', 'DAYS_DECISION'], inplace=True)
df.reset_index(drop=True, inplace=True)
col = [
'total_debt',
'AMT_CREDIT-d-total_debt',
'AMT_GOODS_PRICE-d-total_debt',
'AMT_GOODS_PRICE-d-AMT_CREDIT',
'AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-d-app_AMT_CREDIT',
'AMT_APPLICATION-d-app_AMT_CREDIT',
'AMT_CREDIT-d-app_AMT_CREDIT',
'AMT_GOODS_PRICE-d-app_AMT_CREDIT',
'AMT_ANNUITY-d-app_AMT_ANNUITY',
'AMT_APPLICATION-d-app_AMT_ANNUITY',
'AMT_CREDIT-d-app_AMT_ANNUITY',
'AMT_GOODS_PRICE-d-app_AMT_ANNUITY',
'AMT_ANNUITY-d-app_AMT_GOODS_PRICE',
'AMT_APPLICATION-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT-d-app_AMT_GOODS_PRICE',
'AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE',
'AMT_ANNUITY-m-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_CREDIT',
'AMT_APPLICATION-m-app_AMT_CREDIT',
'AMT_CREDIT-m-app_AMT_CREDIT',
'AMT_GOODS_PRICE-m-app_AMT_CREDIT',
'AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_ANNUITY',
'AMT_APPLICATION-m-app_AMT_ANNUITY',
'AMT_CREDIT-m-app_AMT_ANNUITY',
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY',
'AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE',
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE',
'AMT_CREDIT-m-app_AMT_GOODS_PRICE',
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE',
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'nejumi'
]
def multi_prev(c):
ret_diff = []
ret_pctchng = []
key_bk = x_bk = None
for key, x in df[['SK_ID_CURR', c]].values:
# for key, x in tqdm(df[['SK_ID_CURR', c]].values, mininterval=30):
if key_bk is None:
ret_diff.append(None)
ret_pctchng.append(None)
else:
if key_bk == key:
ret_diff.append(x - x_bk)
ret_pctchng.append( (x_bk-x) / x_bk)
else:
ret_diff.append(None)
ret_pctchng.append(None)
key_bk = key
x_bk = x
ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
ret_pctchng = | pd.Series(ret_pctchng, name=f'{c}_pctchange') | pandas.Series |
import os
from datetime import datetime
import time
from sklearn.preprocessing import StandardScaler
import plotly.express as px
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import seaborn as sns
import math
import statsmodels.api as sm
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.seasonal import seasonal_decompose
from scipy import stats
from itertools import product
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import os
"""
Read the info and train data
"""
info = pd.read_csv("asset_details.csv")
ctrain = pd.read_csv("train.csv")
#print(info.head(10))
# Impute missing time value
def c_time_sub(asset_id,data=ctrain):
df=data[ctrain["Asset_ID"]==asset_id].set_index("timestamp")
df=df.reindex(range(df.index[0],df.index[-1]+60,60), method="pad")
return df
# subgroup BTC(bitcoin)
btc = c_time_sub(asset_id=1)
# subgroup ETH(Ethereum)
eth = c_time_sub(asset_id=6)
# subgroup cardano(bitcoin)
ada = c_time_sub(asset_id=3)
#print("btc",btc.head(10),"eth",eth.head(10),"ADA",ada.head(10))
# time frame selection: from datetime to timestamp
totimestamp= lambda s: np.int32(time.mktime(datetime.strptime(s,"%d/%m/%Y").timetuple()))
# Log Return
def log_return(series, periods=1):
return np.log(series).diff(periods=periods)
# Data Selection
def crypto_sub(asset_id ,data= ctrain ):
df = data[data["Asset_ID"]==asset_id].reset_index(drop = True)
df['timestamp'] = | pd.to_datetime(df['timestamp'], unit='s') | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Created on Fri Aug 9 14:01:22 2019
@author: cherrabi
"""
from P2N_Lib import GenereListeFichiers # import
from P2N_Config import LoadConfig #
import os # importation de la bibliothèque os qui sert à
from textblob import TextBlob # importation de textblob outil liguistique
from nltk.corpus import stopwords
import nltk
from sematch.semantic.similarity import WordNetSimilarity
from nltk.corpus import wordnet as wn
import pandas as pd
import re
import shutil
import sys
from nltk.corpus import stopwords
import numpy as np
import pandas as pd
import re
import umap
import matplotlib.pyplot as plt
import seaborn as sns
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
import string
import gensim
from gensim import corpora
from gensim.corpora import Dictionary
from sklearn.decomposition import TruncatedSVD
import os
import re
import codecs
import logging
import time
from operator import add
from textblob import TextBlob # importation de textblob outil liguistique
from nltk.corpus import stopwords
from P2N_Lib import LoadBiblioFile
from P2N_Lib import GenereListeFichiers
from P2N_Config import LoadConfig
from nltk.corpus import wordnet
import spacy
import en_core_web_sm
from itertools import product
ListeBrevet = [] # The patent List
stop_words = set(stopwords.words('english'))
configFile = LoadConfig()
requete = configFile.requete
BiblioPath = configFile.ResultBiblioPath
GatherContent = configFile.GatherContent
GatherBiblio = configFile.GatherBiblio
GatherPatent = configFile.GatherPatent
GatherFamilly = configFile.GatherFamilly
IsEnableScript = configFile.GatherIramuteq
ResultBiblioPath = configFile.ResultBiblioPath
ndf = configFile.ndf
DataBrevet = LoadBiblioFile(BiblioPath, ndf)
InventorList = []
InventorList = DataBrevet['brevets']
# preparing parsing data for indicator scientific publication and inventive production
inventor_list = [auth['inventor'] for auth in DataBrevet['brevets']]
label_list = [auth['label'] for auth in DataBrevet['brevets']]
title_list = [auth['title'] for auth in DataBrevet['brevets']]
dict = { 'label' : label_list, 'title' : title_list, 'inventor' : inventor_list }
df = pd.DataFrame(dict)
df.to_csv("data_inventor.csv", header=False, index=False)
temporPath = configFile.temporPath
ResultAbstractPath = configFile.ResultAbstractPath
#ResultClaimsPath = configFile.ResultClaimsPath
#add here templateFlask directory local to the request directory normalize path for windows
ResultPathContent= configFile.ResultContentsPath.replace('\\', '/' )
ResultTemplateFlask = os.path.join(ResultPathContent,'Trizifiier').replace('\\','/')
bigram_measures = nltk.collocations.BigramAssocMeasures()
trigram_measures = nltk.collocations.TrigramAssocMeasures()
if not os.path.exists(ResultTemplateFlask): #creation des dossiers templates et dataFormat
os.mkdir(ResultTemplateFlask)
if not os.path.exists(ResultTemplateFlask+'/templates'): #creation des dossiers templates et dataFormat
os.mkdir(ResultTemplateFlask+'/templates')
if not os.path.exists(ResultTemplateFlask+'/DataFormat'): #creation des dossiers templates et dataFormat
os.mkdir(ResultTemplateFlask+'/DataFormat')
#add here tempo dir
temporar = configFile.temporPath
wns = WordNetSimilarity()
i=0
# build file list
#direct = os.path.normpath(ResultBiblioPath)
#direct = os.path.normpath(ResultClaimsPath)
direct = os.path.normpath(ResultAbstractPath)
# affiche url de chaque documents txt dans le dossier de la requete inseree , EN tous les url dossier pour en ect...
Fr, En, Unk = GenereListeFichiers(direct)
def convert_tag(tag):
tag_dict = {'N': 'n', 'J': 'a', 'R': 'r', 'V': 'v'}
try:
return tag_dict[tag[0]]
except KeyError:
return None
CountFile_R = 0
CountFile_W = 0
FichierOrg={}
# compter les nombre de caractere de EN
#if len(En)
PSW = [] # liste de mots vide à compléter au fur et à mesure des recherches
# minimalistic HTML for result file in html format
dataF = """""" # va contenir tous les abstracts du dossier de la requete
import codecs
#DejaVus = dict()
f=open(ResultTemplateFlask + '/DataFormat/FileDataAnalysisTrizWikiE.csv','w')
entetes = [
u'i',
u'label',
u'classe',
u'Action',
u'indiceSimAction',
u'abstract',
u'urlEspacenet'
]
ligneEntete=",".join(entetes)+"\n"
f.write(ligneEntete)
d= pd.read_csv("trizOxfordData.csv",delimiter=";")
dnew= pd.read_csv("FileTrizNewList.csv",delimiter=",")
classes = pd.DataFrame(dnew,columns=['Ref_classe'])
classes_syn = pd.DataFrame(dnew,columns=['syn_classe'])
classesUnique = classes.drop_duplicates(keep = False)
expansionTriz = classes_syn.drop_duplicates(keep = False)
tal = spacy.load('en_core_web_sm')
#lecture des fichiers txt en boucle et placement element dans dataF
for fic in En:
with codecs.open(fic, 'r', 'utf8') as File:
dataF = File.readlines() #single File ne pas lire la première ligne de l'abstract
# dataF = '\n'.join(dataF)
# FichierOrg = dataF
abstract = '\n'.join(dataF[1:])
NumberBrevet= fic.split('-')[1]
#NumberBrevet=NumberBrevet.replace('*Label_','')
NumberBrevet=NumberBrevet.replace('.txt','')
#sys.exit(0)
# tokenization
abstract = re.sub("[^a-zA-Z#]", " ",str(abstract))
brevet = tal(abstract)
#Blob = TextBlob(abstract)
#wordlist=Blob.words #should give best results@ DR
# remove stop-words and words less 3 caracters
filtered_sentence = [mot.lemma_ for mot in brevet if mot.pos_ == "NOUN" or mot.pos_ == "VERB"]
#for w in wordlist:
#if w not in stop_words and len(w) > 3:
#filtered_sentence.append(w)
#Document-Term Matrix
#print(filtered_sentence)
#print(resultType)
urlEspacenet="https://worldwide.espacenet.com/searchResults?submitted=true&locale=fr_EP&DB=EPODOC&ST=advanced&TI=&AB=&PN="+format(NumberBrevet)
matriceListe = []
matricelistePaire = []
matricelistePaireSort=[]
matricelistePaireAction = []
matricelistePaireObject = []
for classe in expansionTriz.keys() :
ExpansionClasse = expansionTriz[classe]
allsyns1 = set(ss for word in ExpansionClasse for ss in wordnet.synsets(word))
allsyns2 = set(ss for word in filtered_sentence for ss in wordnet.synsets(word))
best = max((wordnet.wup_similarity(s1, s2) or 0, s1, s2) for s1, s2 in product(allsyns1, allsyns2))
#print("allsyns1 ========",allsyns1)
#print("\n")
#print("allsyns2========",allsyns2)
print("best: ", best)
print("\n")
sys.exit()
f.close()
sys.exit()
#open file data semantic classification
d= pd.read_csv(ResultTemplateFlask + "/DataFormat/FileDataAnalysisTrizWikiE.csv")
df = pd.DataFrame(d,columns=['i','label','Term','Action','indiceSimAction','abstract','urlEspacenet'])
df.to_csv(ResultTemplateFlask + '/DataFormat/tableauTriE.csv')
sys.exit(0) # je veux le csv généré ici, car a partir de cette ligne je vais changer pour afficher les classes autrement
# sorted data by id and term ascending
dfmax = df.sort_values(by=['i','Term','indiceSimAction'],ascending=[True,True,False])
dfmax.to_csv(ResultTemplateFlask + '/DataFormat/tableauTri.csv')
# selected just top indice similiraty for term / action
dresult = dfmax.drop_duplicates(['Term'],keep='first')
dresult.to_csv(ResultTemplateFlask + '/DataFormat/tableauDrop.csv')
dresultmaxI=dresult.sort_values(by='indiceSimAction')
# create file formated datas to use in tabulator html
dresultmaxI.to_csv(ResultTemplateFlask + '/DataFormat/resultatParserV2.csv')
dd= | pd.read_csv(ResultTemplateFlask + '/DataFormat/resultatParserV2.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import pandas as pd
from fooltrader.api import quote
import datetime
def ma(security_item, start_date, end_date, level='day', fuquan='qfq', source='163', window=5,
col=['close', 'volume', 'turnover'], return_all=False, return_col=True):
"""
calculate ma.
Parameters
----------
security_item : SecurityItem or str
the security item,id or code
start_date : TimeStamp str or TimeStamp
start date
end_date : TimeStamp str or TimeStamp
end date
fuquan : str
{"qfq","hfq","bfq"},default:"qfq"
source : str
the data source,{'163','sina'},default: '163'
level : str or int
the kdata level,{1,5,15,30,60,'day','week','month'},default : 'day'
window : int
the ma window,default : 5
col : list
the column for calculating,['close', 'volume', 'turnover'],default:['close', 'volume', 'turnover']
return_all : bool
whether return all the kdata values,default:False
return_col : bool
whether return the calculating col too,default:True
Returns
-------
DataFrame
"""
df = quote.get_kdata(security_item, fuquan=fuquan, start_date=start_date, end_date=end_date, source=source,
level=level)
df_col = df.loc[:, col]
df_result = df_col.rolling(window=window, min_periods=window).mean()
df_result.columns = ["{}_ma{}".format(item, window) for item in col]
if return_all:
df_result = pd.concat([df, df_result], axis=1)
elif return_col:
df_result = pd.concat([df_col, df_result], axis=1)
return df_result
def ema(security_item, start_date, end_date, level='day', fuquan='qfq', source='163', window=12, col=['close'],
return_all=False, return_col=True):
"""
calculate ema.
Parameters
----------
security_item : SecurityItem or str
the security item,id or code
start_date : TimeStamp str or TimeStamp
start date
end_date : TimeStamp str or TimeStamp
end date
fuquan : str
{"qfq","hfq","bfq"},default:"qfq"
source : str
the data source,{'163','sina'},default: '163'
level : str or int
the kdata level,{1,5,15,30,60,'day','week','month'},default : 'day'
window : int
the ma window,default : 12
col : list
the column for calculating,['close', 'volume', 'turnover'],default:['close']
return_all : bool
whether return all the kdata values,default:False
return_col : bool
whether return the calculating col too,default:True
Returns
-------
DataFrame
"""
df = quote.get_kdata(security_item, fuquan=fuquan, start_date=start_date, end_date=end_date, source=source,
level=level)
df_col = df.loc[:, col]
df_result = df_col.ewm(span=window, adjust=False, min_periods=window).mean()
df_result.columns = ["{}_ema{}".format(item, window) for item in col]
if return_all:
df_result = | pd.concat([df, df_result], axis=1) | pandas.concat |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# Prepare datasets
train_ds = | pd.read_csv("train.csv") | pandas.read_csv |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype="datetime64[ns, CET]")
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
def test_constructor_datetime64_bigendian(self):
# GH#30976
ms = np.datetime64(1, "ms")
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = Series(arr)
expected = Series([Timestamp(ms)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
intervals = interval_constructor.from_breaks(np.arange(3), closed="right")
result = Series(intervals)
assert result.dtype == "interval[int64]"
tm.assert_index_equal(Index(result.values), Index(intervals))
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_interval_mixed_closed(self, data_constructor):
# GH 23563: mixed closed results in object dtype (not interval dtype)
data = [pd.Interval(0, 1, closed="both"), pd.Interval(0, 2, closed="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
assert result.tolist() == data
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx, dtype=np.float64)
expected.iloc[0] = 0
expected.iloc[1] = 1
tm.assert_series_equal(result, expected)
def test_constructor_dict_list_value_explicit_dtype(self):
# GH 18625
d = {"a": [[2], [3], [4]]}
result = Series(d, index=["a"], dtype="object")
expected = Series(d, index=["a"])
tm.assert_series_equal(result, expected)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {"b": 1, "a": 0, "c": 2}
result = Series(d)
expected = Series([1, 0, 2], index=list("bac"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_constructor_dict_extension(self, data, dtype):
d = {"a": data}
result = Series(d, index=["a"])
expected = Series(data, index=["a"], dtype=dtype)
assert result.dtype == dtype
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18480
d = {1: "a", value: "b", float("nan"): "c", 4: "d"}
result = Series(d).sort_values()
expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4])
tm.assert_series_equal(result, expected)
# MultiIndex:
d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"}
result = Series(d).sort_values()
expected = Series(
["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)])
)
tm.assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
tm.assert_series_equal(result_datetime64, expected)
tm.assert_series_equal(result_datetime, expected)
tm.assert_series_equal(result_Timestamp, expected)
def test_constructor_dict_tuple_indexer(self):
# GH 12948
data = {(1, 1, None): -1.0}
result = Series(data)
expected = Series(
-1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]])
)
tm.assert_series_equal(result, expected)
def test_constructor_mapping(self, non_dict_mapping_subclass):
# GH 29788
ndm = non_dict_mapping_subclass({3: "three"})
result = Series(ndm)
expected = Series(["three"], index=[3])
tm.assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
assert list(s) == data
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
assert tuple(s) == data
def test_constructor_dict_of_tuples(self):
data = {(1, 2): 3, (None, 5): 6}
result = Series(data).sort_values()
expected = Series([3, 6], index=MultiIndex.from_tuples([(1, 2), (None, 5)]))
tm.assert_series_equal(result, expected)
def test_constructor_set(self):
values = {1, 2, 3, 4, 5}
with pytest.raises(TypeError, match="'set' type is unordered"):
Series(values)
values = frozenset(values)
with pytest.raises(TypeError, match="'frozenset' type is unordered"):
Series(values)
# https://github.com/pandas-dev/pandas/issues/22698
@pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning")
def test_fromDict(self):
data = {"a": 0, "b": 1, "c": 2, "d": 3}
series = Series(data)
tm.assert_is_sorted(series.index)
data = {"a": 0, "b": "1", "c": "2", "d": datetime.now()}
series = Series(data)
assert series.dtype == np.object_
data = {"a": 0, "b": "1", "c": "2", "d": "3"}
series = Series(data)
assert series.dtype == np.object_
data = {"a": "0", "b": "1"}
series = Series(data, dtype=float)
assert series.dtype == np.float64
def test_fromValue(self, datetime_series):
nans = Series(np.NaN, index=datetime_series.index, dtype=np.float64)
assert nans.dtype == np.float_
assert len(nans) == len(datetime_series)
strings = Series("foo", index=datetime_series.index)
assert strings.dtype == np.object_
assert len(strings) == len(datetime_series)
d = datetime.now()
dates = Series(d, index=datetime_series.index)
assert dates.dtype == "M8[ns]"
assert len(dates) == len(datetime_series)
# GH12336
# Test construction of categorical series from value
categorical = Series(0, index=datetime_series.index, dtype="category")
expected = Series(0, index=datetime_series.index).astype("category")
assert categorical.dtype == "category"
assert len(categorical) == len(datetime_series)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# mixed with NaT
td = Series([timedelta(days=1), NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), np.nan], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(300000000), pd.NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), NaT])
assert td.dtype == "timedelta64[ns]"
# because iNaT is int, not coerced to timedelta
td = Series([np.timedelta64(300000000), iNaT])
assert td.dtype == "object"
td = Series([np.timedelta64(300000000), np.nan])
assert td.dtype == "timedelta64[ns]"
td = Series([pd.NaT, np.timedelta64(300000000)])
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# with pytest.raises(TypeError):
# td.astype('m8[%s]' % t)
# valid astype
td.astype("int64")
# invalid casting
msg = r"cannot astype a timedelta from \[timedelta64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
td.astype("int32")
# this is an invalid casting
msg = "Could not convert object to NumPy timedelta"
with pytest.raises(ValueError, match=msg):
Series([timedelta(days=1), "foo"], dtype="m8[ns]")
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ["foo"])
assert td.dtype == "object"
# these will correctly infer a timedelta
s = Series([None, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([np.nan, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, None, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, np.nan, "1 Day"])
assert s.dtype == "timedelta64[ns]"
# GH 16406
def test_constructor_mixed_tz(self):
s = Series([Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")])
expected = Series(
[Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(s, expected)
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype="M8[ns]")
val = series[3]
assert isna(val)
series[2] = val
assert isna(series[2])
def test_NaT_cast(self):
# GH10747
result = Series([np.nan]).astype("M8[ns]")
expected = Series([NaT])
tm.assert_series_equal(result, expected)
def test_constructor_name_hashable(self):
for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05D0"]:
for data in [[1, 2, 3], np.ones(3), {"a": 0, "b": 1}]:
s = Series(data, name=n)
assert s.name == n
def test_constructor_name_unhashable(self):
msg = r"Series\.name must be a hashable type"
for n in [["name_list"], np.ones(2), {1: 2}]:
for data in [["name_list"], np.ones(2), {1: 2}]:
with pytest.raises(TypeError, match=msg):
Series(data, name=n)
def test_auto_conversion(self):
series = Series(list(date_range("1/1/2000", periods=10)))
assert series.dtype == "M8[ns]"
def test_convert_non_ns(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype="timedelta64[s]")
s = Series(arr)
expected = Series(pd.timedelta_range("00:00:01", periods=3, freq="s"))
tm.assert_series_equal(s, expected)
# convert from a numpy array of non-ns datetime64
# note that creating a numpy datetime64 is in LOCAL time!!!!
# seems to work for M8[D], but not for M8[s]
# TODO: is the above comment still accurate/needed?
arr = np.array(
["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]"
)
ser = Series(arr)
expected = Series(date_range("20130101", periods=3, freq="D"))
tm.assert_series_equal(ser, expected)
arr = np.array(
["2013-01-01 00:00:01", "2013-01-01 00:00:02", "2013-01-01 00:00:03"],
dtype="datetime64[s]",
)
ser = Series(arr)
expected = Series(date_range("20130101 00:00:01", periods=3, freq="s"))
tm.assert_series_equal(ser, expected)
@pytest.mark.parametrize(
"index",
[
date_range("1/1/2000", periods=10),
timedelta_range("1 day", periods=10),
period_range("2000-Q1", periods=10, freq="Q"),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_cant_cast_datetimelike(self, index):
# floats are not ok
# strip Index to convert PeriodIndex -> Period
# We don't care whether the error message says
# PeriodIndex or PeriodArray
msg = f"Cannot cast {type(index).__name__.rstrip('Index')}.*? to "
with pytest.raises(TypeError, match=msg):
Series(index, dtype=float)
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(index, dtype=np.int64)
expected = Series(index.astype(np.int64))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
date_range("1/1/2000", periods=10),
timedelta_range("1 day", periods=10),
period_range("2000-Q1", periods=10, freq="Q"),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_cast_object(self, index):
s = Series(index, dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
s = Series( | pd.Index(index, dtype=object) | pandas.Index |
from typing import Set, Optional
from terminusdb_client import WOQLClient
from terminusdb_client.woqlschema.woql_schema import (
DocumentTemplate,
EnumTemplate,
WOQLSchema,
LexicalKey,
)
import pandas as pd
from tqdm import tqdm
import tempfile
import random
schema = WOQLSchema()
class Content(DocumentTemplate):
_schema = schema
title: str
type_of: "Content_Type"
director: Optional[str]
cast: Optional[str]
country_of_origin: Optional[str]
release_year: int
rating: "Rating"
duration: str
listed_in: str
description: str
date_added: Optional[str]
class User(DocumentTemplate):
_schema = schema
_key = LexicalKey(keys="id")
_base = "User"
id : str
watched_contents: Set["Content"]
class Content_Type(EnumTemplate):
_schema = schema
TV_Show = "TV Show"
Movie = "Movie"
class Rating(EnumTemplate):
_schema = schema
TV_MA = "TV-MA"
R = ()
PG_13 = "PG-13"
TV_14 = "TV-14"
TV_PG = "TV-PG"
NR = ()
TV_G = "TV-G"
TV_Y = "TV-Y"
TV_Y7 = "TV-Y7"
TY = ()
TY_7 = "TY-7"
PG = ()
G = ()
NC_17 = "NC-17"
TV_Y7_FV = "TV-Y7-FV"
UR = ()
def insert_content_data(client, url):
df = pd.read_csv(url, chunksize=1000)
for chunk in tqdm(df, desc='Transfering data'):
csv = tempfile.NamedTemporaryFile()
chunk.to_csv(csv)
netflix_content = read_data(csv.name)
client.insert_document(netflix_content, commit_msg="Adding all Netflix content")
# We will generate and insert random 50 users using following function
def insert_user_data(contents):
users = []
for i in range(0,50):
randomlist = random.sample(range(1, 50), i%10)
watched_contents = set()
for index in randomlist:
watched_contents.add(schema.import_objects(contents[index]))
users.append(User(id=str(i), watched_contents = watched_contents))
client.insert_document(users, commit_msg="Adding users")
def read_data(csv):
records = []
df = pd.read_csv(csv)
for index, row in df.iterrows():
type_of = row['type'].replace(" ", "_")
rating = "NR" if | pd.isna(row['rating']) | pandas.isna |
"""
Functions for simple MODFLOW boundary conditions such as ghb, drain, etc.
"""
import numbers
import shutil
import flopy
import numpy as np
import pandas as pd
fm = flopy.modflow
import pyproj
import rasterio
from gisutils import project
from rasterstats import zonal_stats
from shapely.geometry import Polygon
from mfsetup.discretization import cellids_to_kij, get_layer
from mfsetup.grid import rasterize
from mfsetup.units import convert_length_units
def setup_basic_stress_data(model, shapefile=None, csvfile=None,
head=None, elev=None, bhead=None, stage=None,
cond=None, rbot=None, default_rbot_thickness=1,
all_touched=True,
**kwargs):
m = model
# get the BC cells
# todo: generalize more of the GHB setup code and move it somewhere else
bc_cells = None
if shapefile is not None:
shapefile = shapefile.copy()
key = [k for k in shapefile.keys() if 'filename' in k.lower()]
if key:
shapefile_name = shapefile.pop(key[0])
if 'all_touched' in shapefile:
all_touched = shapefile['all_touched']
if 'boundname_col' in shapefile:
shapefile['names_column'] = shapefile.pop('boundname_col')
bc_cells = rasterize(shapefile_name, m.modelgrid, **shapefile)
if csvfile is not None:
raise NotImplementedError('Time-varying (CSV) file input not yet supported for this package.')
if bc_cells is None:
return
# create polygons of model grid cells
if bc_cells.dtype == object:
cells_with_bc = bc_cells.flat != ''
else:
cells_with_bc = bc_cells.flat > 0
vertices = np.array(m.modelgrid.vertices)[cells_with_bc, :, :]
polygons = [Polygon(vrts) for vrts in vertices]
# setup DataFrame for MODFLOW input
i, j = np.indices((m.nrow, m.ncol))
df = pd.DataFrame({'per': 0,
'k': 0,
'i': i.flat,
'j': j.flat})
# add the boundnames
if bc_cells.dtype == object:
df['boundname'] = bc_cells.flat
df.loc[df.boundname.isna(), 'boundname'] = 'unnamed'
# cull to just the cells with bcs
df = df.loc[cells_with_bc].copy()
variables = {'head': head, 'elev': elev, 'bhead': bhead,
'stage': stage, 'cond': cond, 'rbot': rbot}
for var, entry in variables.items():
if entry is not None:
# Raster of variable values supplied
if isinstance(entry, dict):
filename_entries = [k for k in entry.keys() if 'filename' in k.lower()]
if not any(filename_entries):
continue
filename = entry[filename_entries[0]]
with rasterio.open(filename) as src:
meta = src.meta
# reproject the polygons to the dem crs if needed
try:
from gisutils import get_authority_crs
raster_crs = get_authority_crs(src.crs)
except:
raster_crs = pyproj.crs.CRS.from_user_input(src.crs)
if raster_crs != m.modelgrid.crs:
polygons = project(polygons, m.modelgrid.crs, raster_crs)
# all_touched arg for rasterstats.zonal_stats
all_touched = False
if meta['transform'][0] > m.modelgrid.delr[0]:
all_touched = True
stat = entry['stat']
results = zonal_stats(polygons, filename, stats=stat,
all_touched=all_touched)
#values = np.ones((m.nrow * m.ncol), dtype=float) * np.nan
#values[cells_with_bc] = np.array([r[stat] for r in results])
values = np.array([r[stat] for r in results])
# cull to polygon statistics within model area
valid = values != None
values = values[valid]
df = df.loc[valid].copy()
units_key = [k for k in entry if 'units' in k]
if len(units_key) > 0:
values *= convert_length_units(entry[units_key[0]],
model.length_units)
#values = np.reshape(values, (m.nrow, m.ncol))
# add the layer and the values to the Modflow input DataFrame
# assign layers so that the elevation is above the cell bottoms
if var in ['head', 'elev', 'bhead']:
df['k'] = get_layer(model.dis.botm.array, df.i, df.j, values)
df[var] = values
# single global value specified
elif isinstance(entry, numbers.Number):
df[var] = entry
else:
raise ValueError(f"Unrecognized input for {var}:\n{entry}. "
"If this is from a YAML format configuration file, "
"check that the number is formatted correctly "
"(i.e. 1.e+3 for 1e3)")
# drop cells that don't include this boundary condition
df.dropna(axis=0, inplace=True)
# special handling of rbot for RIV package
if 'stage' in df.columns and 'rbot' not in df.columns:
df['rbot'] = df['stage'] - default_rbot_thickness
df['k'] = get_layer(model.dis.botm.array, df.i, df.j, df['rbot'])
# remove BC cells from places where the specified head is below the model
for var in ['head', 'elev', 'bhead', 'rbot']:
if var in df.columns:
below_bottom_of_model = df[var] < model.dis.botm.array[-1, df.i, df.j] + 0.01
df = df.loc[~below_bottom_of_model].copy()
# exclude inactive cells
k, i, j = df.k, df.i, df.j
if model.version == 'mf6':
active_cells = model.idomain[k, i, j] >= 1
else:
active_cells = model.ibound[k, i, j] >= 1
df = df.loc[active_cells]
# sort the columns
col_order = ['per', 'k', 'i', 'j', 'head', 'elev', 'bhead', 'stage',
'cond', 'rbot', 'boundname']
cols = [c for c in col_order if c in df.columns]
df = df[cols].copy()
return df
def get_bc_package_cells(package, exclude_horizontal=True):
"""
Parameters
----------
package : flopy package instance for boundary condition
Returns
-------
k, i, j : 1D numpy arrays of boundary condition package cell locations
"""
if package.package_type == 'sfr':
if package.parent.version == 'mf6':
k, i, j = cellids_to_kij(package.packagedata.array['cellid'])
else:
rd = package.reach_data
k, i, j = rd['k'], rd['i'], rd['j']
elif package.package_type == 'lak':
if package.parent.version == 'mf6':
connectiondata = package.connectiondata.array
if exclude_horizontal:
connectiondata = connectiondata[connectiondata['claktype'] == 'vertical']
k, i, j = map(np.array, zip(*connectiondata['cellid']))
else:
try:
# todo: figure out why flopy sometimes can't read external files for lakarr
k, i, j = np.where(package.lakarr.array[0, :, :, :] > 0)
except:
k, i, j = np.where(package.parent.lakarr > 0)
else:
df = mftransientlist_to_dataframe(package.stress_period_data,
squeeze=True)
k, i, j = df['k'].values, df['i'].values, df['j'].values
return k, i, j
def mftransientlist_to_dataframe(mftransientlist, squeeze=True):
"""
Cast a MFTransientList of stress period data
into single dataframe containing all stress periods. Output data are
aggregated (summed) to the model cell level, to avoid
issues with non-unique row indices.
Parameters
----------
mftransientlist : flopy.mf6.data.mfdatalist.MFTransientList instance
squeeze : bool
Reduce number of columns in dataframe to only include
stress periods where a variable changes.
Returns
-------
df : dataframe
Dataframe of shape nrow = ncells, ncol = nvar x nper. If
the squeeze option is chosen, nper is the number of
stress periods where at least one cell is different,
otherwise it is equal to the number of keys in MfList.data.
"""
data = mftransientlist
names = ['cellid']
if isinstance(data.package, flopy.mf6.modflow.ModflowGwfmaw):
names += ['wellid']
# monkey patch the mf6 version to behave like the mf2005 version
#if isinstance(mftransientlist, flopy.mf6.data.mfdatalist.MFTransientList):
# mftransientlist.data = {per: ra for per, ra in enumerate(mftransientlist.array)}
# find relevant variable names
# may have to iterate over the first stress period
#for per in range(data.model.nper):
try:
data.data
except:
j=2
for per, spd in data.data.items():
if spd is not None and hasattr(spd, 'dtype'):
varnames = list([n for n in spd.dtype.names
if n not in ['k', 'i', 'j', 'cellid', 'boundname']])
break
# create list of dataframes for each stress period
# each with index of k, i, j
dfs = []
for per, recs in data.data.items():
if recs is None or recs == 0:
# add an empty dataframe if a stress period is
# set to 0 (e.g. no pumping during a predevelopment
# period)
columns = names + list(['{}{}'.format(c, per)
for c in varnames])
dfi = pd.DataFrame(data=None, columns=columns)
dfi = dfi.set_index(names)
else:
dfi = pd.DataFrame.from_records(recs)
if {'k', 'i', 'j'}.issubset(dfi.columns):
dfi['cellid'] = list(zip(dfi.k, dfi.i, dfi.j))
dfi.drop(['k', 'i', 'j'], axis=1, inplace=True)
if 'cellid' in dfi.columns:
dfi['cellid'] = dfi['cellid'].astype(str)
dfi = dfi.set_index(names)
# aggregate (sum) data to model cells
# because pd.concat can't handle a non-unique index
# (and modflow input doesn't have a unique identifier at sub-cell level)
dfg = dfi.groupby(names)
dfi = dfg.sum() # aggregate
#dfi.columns = names + list(['{}{}'.format(c, per) for c in varnames])
dfi.columns = ['{}{}'.format(c, per) if c in varnames else c for c in dfi.columns]
dfs.append(dfi)
df = pd.concat(dfs, axis=1)
if squeeze:
keep = []
for var in varnames:
diffcols = list([n for n in df.columns if var in n])
squeezed = squeeze_columns(df[diffcols])
keep.append(squeezed)
df = pd.concat(keep, axis=1)
data_cols = df.columns.tolist()
df.index = [eval(s) for s in df.index]
df['cellid'] = df.index.tolist()
idx_cols = ['cellid']
if isinstance(df.index.values[0], tuple):
df['k'], df['i'], df['j'] = list(zip(*df['cellid']))
idx_cols += ['k', 'i', 'j']
cols = idx_cols + data_cols
df = df[cols]
return df
def remove_inactive_bcs(pckg, external_files=False):
"""Remove boundary conditions from cells that are inactive.
Parameters
----------
model : flopy model instance
pckg : flopy package instance
"""
model = pckg.parent
if model.version == 'mf6':
active = model.dis.idomain.array > 0
else:
active = model.bas6.ibound.array > 0
spd = pckg.stress_period_data.data
new_spd = {}
for per, rec in spd.items():
if 'cellid' in rec.dtype.names:
k, i, j = zip(*rec['cellid'])
else:
k, i, j = zip(*rec[['k', 'i', 'j']])
new_spd[per] = rec[active[k, i, j]]
if external_files:
if model.version == 'mf6':
spd_input = {}
for per, filename in external_files.items():
df = | pd.DataFrame(new_spd[per]) | pandas.DataFrame |
import re
import sys
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import wgdi.base as base
class circos():
def __init__(self, options):
self.figsize = '10,10'
self.position = 'order'
self.label_size = 9
self.label_radius = 0.015
for k, v in options:
setattr(self, str(k), v)
print(k, ' = ', v)
self.figsize = [float(k) for k in self.figsize.split(',')]
self.ring_width = float(self.ring_width)
def plot_circle(self, loc_chr, radius, color='black', lw=1, alpha=1, linestyle='-'):
for k in loc_chr:
start, end = loc_chr[k]
t = np.arange(start, end, 0.005)
x, y = (radius) * np.cos(t), (radius) * np.sin(t)
plt.plot(x, y, linestyle=linestyle,
color=color, lw=lw, alpha=alpha)
def plot_labels(self, root, labels, loc_chr, radius, horizontalalignment="center", verticalalignment="center", fontsize=6,
color='black'):
for k in loc_chr:
loc = sum(loc_chr[k]) * 0.5
x, y = radius * np.cos(loc), radius * np.sin(loc)
self.Wedge(root, (x, y), self.label_radius, 0,
360, self.label_radius, 'white', 1)
if 1 * np.pi < loc < 2 * np.pi:
loc += np.pi
plt.text(x, y, labels[k], horizontalalignment=horizontalalignment, verticalalignment=verticalalignment,
fontsize=fontsize, color=color, rotation=0)
def Wedge(self, ax, loc, radius, start, end, width, color, alpha):
p = mpatches.Wedge(loc, radius, start, end, width=width,
edgecolor=None, facecolor=color, alpha=alpha)
ax.add_patch(p)
def plot_bar(self, df, radius, length, lw, color, alpha):
for k in df[df.columns[0]].drop_duplicates().values:
if k in ['', np.nan]:
continue
df_chr = df.groupby(df.columns[0]).get_group(k)
x1, y1 = radius * \
np.cos(df_chr['rad']), radius * np.sin(df_chr['rad'])
x2, y2 = (radius + length) * \
np.cos(df_chr['rad']), (radius + length) * \
np.sin(df_chr['rad'])
x = np.array(
[x1.values, x2.values, [np.nan] * x1.size]).flatten('F')
y = np.array(
[y1.values, y2.values, [np.nan] * x1.size]).flatten('F')
plt.plot(x, y, linestyle='-',
color=color[str(k)], lw=lw, alpha=alpha)
def chr_loction(self, lens, angle_gap, angle):
start, end, loc_chr = 0, 0.2*angle_gap, {}
for k in lens.index:
end += angle_gap + angle * (float(lens[k]))
start = end - angle * (float(lens[k]))
loc_chr[k] = [float(start), float(end)]
return loc_chr
def deal_alignment(self, alignment, gff, lens, loc_chr, angle):
alignment.replace('\s+', '', inplace=True)
alignment.replace('.', '', inplace=True)
newalignment = alignment.copy()
for i in range(len(alignment.columns)):
alignment[i] = alignment[i].astype(str)
newalignment[i] = alignment[i].map(gff['chr'].to_dict())
newalignment['loc'] = alignment[0].map(gff[self.position].to_dict())
newalignment[0] = newalignment[0].astype('str')
newalignment['loc'] = newalignment['loc'].astype('float')
newalignment = newalignment[newalignment[0].isin(lens.index) == True]
newalignment['rad'] = np.nan
for name, group in newalignment.groupby([0]):
if str(name) not in loc_chr:
continue
newalignment.loc[group.index, 'rad'] = loc_chr[str(
name)][0]+angle * group['loc']
return newalignment
def deal_ancestor(self, alignment, gff, lens, loc_chr, angle, al):
alignment.replace('\s+', '', inplace=True)
alignment.replace('.', np.nan, inplace=True)
newalignment = pd.merge(alignment, gff, left_on=0, right_on=gff.index)
newalignment['rad'] = np.nan
for name, group in newalignment.groupby(['chr']):
if str(name) not in loc_chr:
continue
newalignment.loc[group.index, 'rad'] = loc_chr[str(
name)][0]+angle * group[self.position]
newalignment.index = newalignment[0]
newalignment[0] = newalignment[0].map(newalignment['rad'].to_dict())
data = []
for index_al, row_al in al.iterrows():
for k in alignment.columns[1:]:
alignment[k] = alignment[k].astype(str)
group = newalignment[(newalignment['chr'] == row_al['chr']) & (
newalignment['order'] >= row_al['start']) & (newalignment['order'] <= row_al['end'])]
group[k] = group[k].map(newalignment['rad'].to_dict())
group[k].dropna(inplace=True)
group.index = group.index.map(newalignment['rad'].to_dict())
group['color'] = row_al['color']
group = group[group[k].notnull()]
data += group[[0,k,'color']].values.tolist()
df = pd.DataFrame(data, columns=['loc1', 'loc2', 'color'])
return df
def plot_collinearity(self, data, radius, lw=0.02, alpha=1):
for name, group in data.groupby('color'):
x,y = np.array([]), np.array([])
for index, row in group.iterrows():
ex1x, ex1y = radius * \
np.cos(row['loc1']), radius*np.sin(row['loc1'])
ex2x, ex2y = radius * \
np.cos(row['loc2']), radius*np.sin(row['loc2'])
ex3x, ex3y = radius * (1-abs(row['loc1']-row['loc2'])/np.pi) * np.cos((row['loc1']+row['loc2'])*0.5), radius * (
1-abs(row['loc1']-row['loc2'])/np.pi) * np.sin((row['loc1']+row['loc2'])*0.5)
x1 = [ex1x, 0.5*ex3x, ex2x]
y1 = [ex1y, 0.5*ex3y, ex2y]
step = .002
t = np.arange(0, 1+step, step)
xt = base.Bezier3(x1, t)
yt = base.Bezier3(y1, t)
x = np.hstack((x,xt,np.nan))
y = np.hstack((y,yt,np.nan))
plt.plot(x, y, color=name, lw=lw,alpha=alpha)
def run(self):
fig = plt.figure(figsize=(tuple(self.figsize)))
root = plt.axes([0, 0, 1, 1])
mpl.rcParams['agg.path.chunksize'] = 100000000
lens = base.newlens(self.lens, self.position)
radius, angle_gap = float(self.radius), float(self.angle_gap)
angle = (2 * np.pi - (int(len(lens))+1.5) * angle_gap) / (int(lens.sum()))
loc_chr = self.chr_loction(lens, angle_gap, angle)
list_colors = [str(k).strip() for k in re.split(',|:', self.colors)]
chr_color = dict(zip(list_colors[::2], list_colors[1::2]))
for k in loc_chr:
start, end = loc_chr[k]
self.Wedge(root, (0.0, 0.0), radius+self.ring_width, start * 180 /
np.pi, end * 180 / np.pi, self.ring_width*0.3, chr_color[k], 0.9)
gff = base.newgff(self.gff)
if hasattr(self, 'ancestor'):
ancestor = | pd.read_csv(self.ancestor, sep='\t', header=None) | pandas.read_csv |
from bs4 import BeautifulSoup
import requests
import pandas as pd
import urllib.request
#가방
def b_bags():
#여자
total = []
for page in range(1, 10):
url = "https://kr.burberry.com/womens-bags/?start={}&pageSize=40&productsOffset=&cellsOffset=8&cellsLimit=&__lang=ko".format(page)
response = requests.get(url)
dom = BeautifulSoup(response.content, "html.parser")
elements = dom.select(".product-card")
datas = []
for element in elements:
datas.append({
"brand": "BURBERRY",
"title": element.select_one("a").get("aria-label"),
"price": element.select_one(".product-card-price").text.strip(),
"sex" : "Women",
"image": element.select_one(".cell-asset-image").get("data-src").split("?")[0]
})
w_bag = pd.DataFrame(datas)[["brand", "title", "price", "sex", "image"]].reset_index(drop=True)
ls = []
for x in w_bag["price"]:
num = re.findall("\d+", x)
num = "".join(num)
num = int(num)
ls.append(num)
w_bag["price"] = ls
total.append(w_bag)
w_bag = pd.concat(total)
#남자
url2 = "https://kr.burberry.com/web-api/pages?offset=17&limit=17&order_by=&pagePath=%2Fmens-bags%2Fbum-bags%2Fproducts&country=KR&language=ko"
response = requests.get(url2)
data2 = response.json()["data"]
p = []
for i in range(len(data2)):
d = data2[i]['price']['current']['value']
p.append(d)
e = []
for i in range(len(data2)):
g = data2[i]["rollover_media"]["image"]
g_1 = "https:" + g
e.append(g_1)
m_bag = pd.DataFrame(data2)[["extended_image_alt"]]
m_bag["price"] = p
m_bag["image"] = e
m_bag["brand"] = "BURBERRY"
m_bag["sex"] = "Men"
m_bag = m_bag.rename(columns = {"extended_image_alt":"title"})
m_bag = m_bag[["brand", "title", "price", "sex", "image"]]
#합치기
burberry_bag_df = pd.concat([w_bag, m_bag]).reset_index(drop=True)
burberry_bag_df.to_csv('burberry_bags.csv', index=False, encoding='utf-8')
for idx, link in enumerate(burberry_bag_df["image"]):
url = link
urllib.request.urlretrieve(url, 'clothes/' + 'b' + str(idx) + '.jpg')
return burberry_bag_df
#신발
def b_shoes():
#여자
url = "https://kr.burberry.com/web-api/pages?offset=0&limit=50&order_by=&pagePath=/womens-sneakers/products&country=KR&language=ko"
response = requests.get(url)
data = response.json()["data"]
w_shoes = pd.DataFrame(data)[["extended_image_alt"]]
p = []
for i in range(len(data)):
a = data[i]["price"]["current"]["value"]
p.append(a)
e = []
for i in range(len(data)):
b = data[i]["rollover_media"]["image"]
b_1 = "http:" + b
e.append(b_1)
w_shoes["price"] = p
w_shoes["image"] = e
w_shoes["brand"] = "BURBERRY"
w_shoes["sex"] = "Women"
w_shoes = w_shoes.rename(columns = {"extended_image_alt":"title"})
w_shoes = w_shoes[["brand", "title", "price", "sex", "image"]]
#남자
url2 = "https://kr.burberry.com/web-api/pages?offset=0&limit=50&order_by=&pagePath=%2Fmens-shoes%2Fsneakers%2Fproducts&country=KR&language=ko"
response = requests.get(url2)
data = response.json()["data"]
m_shoes = | pd.DataFrame(data) | pandas.DataFrame |
import copy
import warnings
import os
import concurrent.futures as cf
import multiprocessing as mp
import numpy as np
from scipy import optimize
import pandas as pd
import pvlib
import utils
from sklearn import metrics
def main():
pass
class ClearskyDetection(object):
"""Class for detecting clearsky based on NSRDB data."""
def __init__(self, df, meas_col, model_col=None, target_col=None, solar_noon_col=None, copy=True):
"""Initialize members.
Parameters
---------
df: pd.DataFrame
Time series irradiance data.
meas_col: str
Column of measured GHI values.
model_col: str
Column of clear sky GHI values.
target_col: str
Column of clear sky status.
copy: bool
Permission to copy dataframe.
"""
self.meas_col = meas_col
self.model_col = model_col
self.target_col = target_col
if copy:
self.df = df.copy()
else:
self.df = df
if target_col is not None:
self.df[self.target_col] = self.df[self.target_col].astype(bool)
self.window = self.calc_window()
self.masks_ = []
self.features_ = ['avg(GHI)-avg(GHIcs)',
'max(GHI)-max(GHIcs)',
'GHILL-GHIcsLL',
'std(GHI)-std(GHIcs) normed',
'max(abs(diff(GHI)-diff(GHIcs)))',
'GHI>0']
# 't-tnoon']
@classmethod
def read_nsrdb_dir(cls, dir_path, timezone, keepers=('GHI', 'Clearsky GHI', 'Cloud Type'), file_ext='csv'):
"""Read directory of NSRDB files.
*** NOTE ***
This is hardcoded for the files I have. It is not guaranteed to be general at all for
SRRL/MDIC/etc data sources and files.
Parameters
---------
dir_path: str
Path to directory of files.
timezone: pytz.timezone
Timezone for the dataframe indicies.
file_ext: str
Filetype to specify for reading.
Returns
-------
df: pd.DataFrame
Contains all fields from files.
"""
if file_ext.lower() not in ('csv'):
raise NotImplementedError('Only accept CSV files at this time.')
files = [os.path.join(dir_path, i) for i in os.listdir(dir_path) if i.endswith(file_ext)]
df = pd.concat([pd.read_csv(f, header=2) for f in files])
df.index = pd.to_datetime(df[['Year', 'Month', 'Day', 'Hour', 'Minute']])
tmp_list = []
for index in df.index:
try:
index.tz_localize(timezone)
tmp_list.append(index)
except:
pass
df = df[df.index.isin(tmp_list)]
df.index = df.index.tz_localize(timezone)
df = df.sort_index()
df = df.reindex(pd.date_range(start=df.index[0], end=df.index[-1], freq='30min')).fillna(0)
df = df[~df.index.duplicated(keep='first')]
df.index = df.index.tz_convert('UTC')
return cls(df[keepers])
@classmethod
def read_snl_rtc(cls, file_w_path, timezone, keepers=('GHI')):
"""Read SNL RTC data into file.
*** NOTE ***
This is hardcoded for the files I have. It is not guaranteed to be general at all for
SRRL/MDIC/etc data sources and files.
Parameters
----------
file_w_path: str
Path to file (absolute).
timezone: pytz.timezone or str
Timezone for localization.
Returns
-------
df: pd.DataFrame
Contains all fields from files.
"""
df = | pd.read_csv(file_w_path, parse_dates=['Date-Time'], index_col=['Date-Time']) | pandas.read_csv |
# Импортируем стандартный модуль для рендеринга страниц
from django.shortcuts import render
# Импортируем стандартные модули для пагинации страниц
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Подключаем модуль для фиксирования времени
import time
# Подключаем модуль для анализа pandas
import pandas as pd
# Подключаем конфигурационный файл для импорта констант
from django.conf import settings
'''
Модуль для создания дампов. Будет использован только для pandas, поскольку модуль намного медленее работает чем numpy VG
Пример кода для создания дампа
dt = pd.read_csv('base.txt', index_col=False, delimiter=';', names=['date', 'hours', 'minutes', 'seconds', 'gap', 'grp', 'v', 'gi', 's1', 's2', 's3'])
dt.to_pickle('pandas.pickle', compression='infer')
'''
import pickle
# Создаем вид для рендеринга главной страници
def index(request):
return render(request, 'home.html')
# Создаем вид для рендеринга страницы формы
def pandas_page(request):
return render(request, 'pandas_page.html')
# Создаем вид для обработки вариантов pandas
def pandas_processing(request):
# Обявляем глобальные переменные т.к будем работать не только с post запросами
global end
global pandas_data
# Проверяем тип запроса формы
if request.method == "POST":
# Получаем значение варианта из формы
c = request.POST.get('choice', None)
# Обработка варианта 1
if c == 'c1':
# Обявляем переменую для пагинации первой страницы
page = request.GET.get('page', 1)
# Фиксируем время
start = time.time()
# Считываем информацию из дампа
pandas_data = pd.read_pickle(settings.PANDAS_DUMP, compression='infer')
# Поиск значений global_active_power что больше 5
pandas_data = pandas_data[pandas_data['gap'] > 5]
# Полученый фрейм конвертируем в список и передаем в качестве аргумента, также ставим мааксимальное количесто строк на одной странице
paginator = Paginator(pandas_data.values.tolist(), 1000)
# Фиксируем время исполнения
end = time.time() - start
try:
# Получаем значения для первой страницы
p = paginator.page(page)
# Обработка исключений при не целых значениях пагинатора
except PageNotAnInteger:
# В этом случае выводим страницу 1
p = paginator.page(1)
# Обработка исключений для пустых страниц
except EmptyPage:
# Выводим только пагинацию
p = paginator.page(paginator.num_pages)
# Создаем словарь со значениями
context = {'frame': p, 'time': end}
# Передаем обработаные данные на страницу
return render(request, 'pandas_data.html', context)
# Обработка варианта 2
elif c == 'c2':
# Обявляем переменую для пагинации первой страницы
page = request.GET.get('page', 1)
# Фиксируем время
start = time.time()
# Считываем информацию из дампа
pandas_data = | pd.read_pickle(settings.PANDAS_DUMP, compression='infer') | pandas.read_pickle |
"""Code here is about filtering and aggregating. There should be no plotting
code here."""
import pandas as pd
import datetime
# from datetime import datetime, timedelta
from .data import my_data, COLS_KEEP, RUN_FINISHED_DATE, CURRENT_COLUMNS_KEEP
def limit_rows(data, max_rows=20):
result = data
if len(data) > max_rows:
num_last = max_rows // 2
num_first = max_rows - num_last
result = pd.concat([data.head(num_first), data.tail(num_last)])
return result
def query_ses(platform, group, appl, start_str, end_str):
filter_by_date = start_str and end_str
is_filtering = platform or group or appl or filter_by_date
result_df = my_data.at
if platform:
result_df = by_platform(result_df, platform)
if group:
result_df = by_group(result_df, group)
if appl:
result_df = by_appl(result_df, appl)
if filter_by_date:
start, end = make_range(start_str, end_str)
result_df = by_date_range(result_df, start, end)
return result_df
# write dataframe to a csv file format
def build_csv_data(data_frame):
csv_data = data_frame.to_csv(path_or_buf=None,
index=True, encoding='utf-8')
return csv_data
def head():
"""Demonstration of a function that returns a data frame"""
return my_data.at_head
def sub_demo():
"""Demonstration of a function that returns a data frame"""
# select number of rows from dataframe
at_sub = my_data.at.iloc[-1000:, :]
return at_sub
def home_grp():
"""A function that returns a data frame"""
return my_data.grp
def home_appl():
"""A function that returns a data frame"""
return my_data.appl
def by_date_range(result_df, start, end):
df = result_df
# convert to datetime64[ns]
df['Run Finished Date'] = df['Run Finished Date'].astype('datetime64[ns]')
return df[(df[RUN_FINISHED_DATE] >= start)
& (df[RUN_FINISHED_DATE] <= end)]
ONE_DAY = datetime.timedelta(days=1)
MIDNIGHT = datetime.time.min
# start_str, end_str are user input
def make_range(start_str, end_str):
start = pd.to_datetime(start_str)
end = ( | pd.to_datetime(end_str) | pandas.to_datetime |
"""
Code for the optimization and gaming component of the Baselining work.
@author: <NAME>, <NAME>
@date Mar 2, 2016
"""
import numpy as np
import pandas as pd
import logging
from gurobipy import GRB, Model, quicksum, LinExpr
from pandas.tseries.holiday import USFederalHolidayCalendar
from datetime import datetime
from .utils import (get_energy_charges, get_demand_charge, dem_charges, dem_charges_yearly,
get_pdp_demand_credit, get_DR_rewards, powerset, E19,
carbon_costs)
# define some string formatters
psform = '%Y-%m-%d %H:%M'
dsform = '%Y-%m-%d'
class BLModel(object):
"""
Abstract base class for Baselining models.
"""
def __init__(self, name):
"""
Construct an abstract dynamical system object based on the
gurobipy Model object 'model'.
"""
self._name = name
self._model = Model()
def get_model(self):
"""
Returns the underlying gurobiy Model object.
"""
return self._model
def set_dynsys(self, dynsys):
"""
Initialize dynamical system for underlying dynamics.
"""
self._dynsys = dynsys
def set_window(self, index):
"""
Set the window for the optimization. Here index is a pandas
DatetimeIndex.
"""
self._index = index
self._dynsys.set_window(index)
def energy_charges(self, tariff, isRT=False, LMP=None, isPDP=False,
twindow=None, carbon=False):
"""
Return total enery consumption charges (as determined by the
tariff's energy charge) as a gurobipy LinExpr.
"""
locidx = self._index.tz_convert('US/Pacific')
year = locidx[0].year
if isRT and isPDP:
raise Exception('Cannot combine RTP and PDP.')
nrg_charges = get_energy_charges(
self._index, tariff, isRT=isRT, LMP=LMP,
isPDP=isPDP, carbon=carbon, year=year)['EnergyCharge']
cons = self._dynsys.get_consumption()['energy']
if twindow is None:
# echrg_= quicksum([ec * con for ec, con in
# zip(nrg_charges.values, cons.values)])
echrg_ = [ec * con for ec, con in
zip(nrg_charges.values, cons.values)]
echrg = pd.Series(echrg_, index=locidx)
else:
nrg_charges_ = nrg_charges.loc[twindow[0]:twindow[1]]
cons_ = cons.loc[twindow[0]:twindow[1]]
# echrg = quicksum([ec * con for ec, con in
# zip(nrg_charges_.values, cons_.values)])
echrg_ = [ec * con for ec, con in
zip(nrg_charges_.values, cons_.values)]
indx = locidx[locidx.get_loc(twindow[0]):
locidx.get_loc(twindow[1])+1]
echrg = pd.Series(echrg_, index=indx)
return echrg
def demand_charges(self, tariff, isPDP=False):
"""
Return the total demand charges under the tariff as a
gurobipy LinExpr.
"""
# determine which year/month combinations there is a demand charge,
# and create a variable for each of them
if hasattr(self, '_maxcon'):
for maxcon in self._maxcon.values():
self._model.remove(maxcon)
del self._maxcon
if hasattr(self, '_maxconbnd'):
for maxconbnd in self._maxconbnd.values():
self._model.remove(maxconbnd)
del self._maxconbnd
if hasattr(self, '_maxconppk'):
for maxconppk in self._maxconppk.values():
self._model.remove(maxconppk)
del self._maxconppk
if hasattr(self, '_maxconppkbnd'):
for maxconppkbnd in self._maxconppkbnd.values():
self._model.remove(maxconppkbnd)
del self._maxconppkbnd
if hasattr(self, '_maxconpk'):
for maxconpk in self._maxconpk.values():
self._model.remove(maxconpk)
del self._maxconpk
if hasattr(self, '_maxconpkbnd'):
for maxconpkbnd in self._maxconpkbnd.values():
self._model.remove(maxconpkbnd)
del self._maxconpkbnd
if hasattr(self, '_maxconpks'):
for maxconpks in self._maxconpks.values():
self._model.remove(maxconpks)
del self._maxconpks
if hasattr(self, '_maxconppkw'):
for maxconppkw in self._maxconppkw.values():
self._model.remove(maxconppkw)
del self._maxconppkw
if hasattr(self, '_maxconppkbndw'):
for maxconppkbndw in self._maxconppkbndw.values():
self._model.remove(maxconppkbndw)
del self._maxconppkbndw
if hasattr(self, '_maxconppks'):
for maxconppks in self._maxconppks.values():
self._model.remove(maxconppks)
del self._maxconppks
if hasattr(self, '_maxconppkbnds'):
for maxconppkbnds in self._maxconppkbnds.values():
self._model.remove(maxconppkbnds)
del self._maxconppkbnds
self._model.update()
locidx = self._index.tz_convert('US/Pacific')
ym_dict = {year: np.unique(locidx[locidx.year == year].month)
for year in np.unique(locidx.year)}
indx = []
for year, months in ym_dict.items():
for month in months:
indx.append(pd.Timestamp(datetime(year, month, 1),
tz='US/Pacific'))
if tariff in dem_charges:
if not(tariff in E19):
self._maxcon, self._maxconbnd = {}, {}
# locidx = self._index.tz_convert('US/Pacific')
# print locidx
# the following creates a dictionary with all years in the data
# as keys, and for each year the value is an array of (unique)
# months that appear during that year. This is used for keeping
# track of the peak consumpiton for the demand charge
# ym_dict = {year: np.unique(locidx[locidx.year == year].month)
# for year in np.unique(locidx.year)}
# indx=[]
for year, months in ym_dict.items():
for month in months:
# declare variable for max consumption
self._maxcon[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxcon[{},{}]'.format(year, month))
# indx.append(pd.Timestamp(datetime(year,month,1),tz='US/Pacific'))
self._model.update()
# now add in the necessary constraints and update objective
dcharges = []
cons = self._dynsys.get_consumption()['power']
for year, months in ym_dict.items():
for month in months:
relcons = cons[(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(relcons):
self._maxconbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con, name='maxconbnd[{},{},{}]'.format(
year, month, i))
# dcharges += (get_demand_charge(tariff, month, isPDP)*
# self._maxcon[year, month])
dcharges.append(
(get_demand_charge(tariff, month, isPDP, year=year) *
self._maxcon[year, month]))
dcharges = pd.Series(dcharges, index=indx)
self._model.update()
return dcharges
else:
# for E19 tarrifs
idx_ = self._index.tz_convert('US/Pacific')
iswknd = idx_.dayofweek > 5
holidays = USFederalHolidayCalendar().holidays(
idx_.min(), idx_.max())
iswknd = iswknd | pd.DatetimeIndex(idx_.date).isin(holidays)
issummer = (idx_.month >= 5) & (idx_.month <= 10)
ToD = idx_.hour + idx_.minute / 60
ispeak = ~iswknd & issummer & (ToD >= 12) & (ToD < 18)
ispartial_summer = (~iswknd & issummer & (
((ToD >= 8.5) & (ToD < 12)) |
((ToD >= 18) & (ToD < 21.5))))
ispartial_winter = ~iswknd & ~issummer & (
(ToD >= 8.5) & (ToD < 21.5))
# create dictionaries for variables
self._maxcon, self._maxconbnd = {}, {}
self._maxconppks, self._maxconppkbnds = {}, {}
self._maxconpks, self._maxconpkbnds = {}, {}
self._maxconpk, self._maxconpkbnd = {}, {}
self._maxconppk, self._maxconppkbnd = {}, {}
self._maxconppkw, self._maxconppkbndw = {}, {}
# locidx = self._index.tz_convert('US/Pacific')
# ym_dict = {year: np.unique(locidx[locidx.year == year].month)
# for year in np.unique(locidx.year)}
# indx=[]
for year, months in ym_dict.items():
for month in months:
# declare variable for max consumption
self._maxcon[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxcon[{},{}]'.format(year, month))
# declare variable for part peak consumption
self._maxconppk[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxconppk[{},{}]'.format(year, month))
# declare variable for max peak only in summer
if (5 <= month) & (month <= 10):
# add variable for maximum peak usage in summer
self._maxconpk[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxconpk[{},{}]'.format(year, month))
# indx.append(pd.Timestamp(datetime(year,month,1),tz='US/Pacific'))
self._model.update() # update model
# now add in the necessary constraints and update objective
dcharges = []
cons = self._dynsys.get_consumption()['power']
for year, months in ym_dict.items():
for month in months:
dchrg = 0.0
# for peak summer less than max demand
if (month >= 5) & (month <= 10):
self._maxconpkbnd[year, month] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=self._maxconpk[year, month],
name='maxconpkbnd[{},{}]'.format(year, month))
# max partial peak summer greater than consumption
ppconsum = cons[(ispartial_summer) &
(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(ppconsum):
self._maxconppkbnds[year, month, i] = self._model.addConstr(
lhs=self._maxconppk[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con,
name='maxconppkbnds[{},{},{}]'.format(
year, month, i))
# max peak consumption summer
pconsum = cons[(ispeak) & (locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(pconsum):
self._maxconpkbnds[year, month, i] = self._model.addConstr(
lhs=self._maxconpk[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con,
name='maxconpkbnds[{},{},{}]'.format(
year, month, i))
# max partial peak winter
ppkconwin = cons[(ispartial_winter) &
(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(ppkconwin):
self._maxconppkbndw[year, month, i] = self._model.addConstr(
lhs=self._maxconppk[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con,
name='maxconppkbndw[{},{},{}]'.format(
year, month, i))
# max demand each month
relcons = cons[(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(relcons):
self._maxconbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con, name='maxconbnd[{},{},{}]'.format(
year, month, i))
# max partial peaks (summer & winter) < than max demand
self._maxconppkbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=self._maxconppk[year, month],
name='maxconppkbnd[{},{},{}]'.format(
year, month, i))
demchrg = get_demand_charge(tariff, month, year=year)
if (month >= 5) & (month <= 10):
mpeakchg = demchrg['mpeak']
ppeakchg = demchrg['ppeak']
maxchg = demchrg['max']
if isPDP:
pdpcred = get_pdp_demand_credit(tariff, month, year=year)
mpeakchg = mpeakchg - pdpcred['peak']
dchrg += mpeakchg * self._maxconpk[year, month]
# dcharges.append(mpeakchg * self._maxconpk[year, month])
else:
ppeakchg = demchrg['ppeak']
maxchg = demchrg['max']
# add partpeak and maximum demand charge
dcharges.append(
(maxchg * self._maxcon[year, month] +
ppeakchg * self._maxconppk[year, month])+dchrg)
self._model.update()
dcharges = pd.Series(dcharges, index=indx)
return dcharges
else:
return pd.Series([LinExpr(0.0) for ij in
range(0, np.size(indx, 0))], index=indx)
def DR_compensation(self, LMP, dr_periods, BL='CAISO', **kwargs):
"""
Return compensation for DR, i.e. reductions w.r.t. baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
dr_periods is a pandas DatetimeIndex.
"""
# start by removing all variables (might be inefficient, but o/w it
# is a pain in the ass do deal with the multihour baselines etc.)
self._removeOld()
# no work if no DR events are specified
if (LMP is None) or (dr_periods is None):
return pd.Series([0.0], index=['None'])
# get DR rewards (in case we want LMP-G instead of LMP)
DR_rewards = get_DR_rewards(LMP, isLMPmG=kwargs.get('isLMPmG'),
tariff=kwargs.get('tariff'))
# populate optimization problem for proper BL choices
if BL == 'CAISO':
# print self._DR_comp_CAISO(DR_rewards, dr_periods)
return self._DR_comp_CAISO(DR_rewards, dr_periods)
elif BL == 'expMA':
return self._DR_comp_expMA(DR_rewards, dr_periods, **kwargs)
else:
raise NotImplementedError(
'Baseline type "{}" not known!'.format(BL))
def _DR_comp_CAISO(self, LMP, dr_periods):
"""
Return compensation for DR, i.e. reductions w.r.t. CAISO baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
dr_periods is a pandas DatetimeIndex. Note that LMP may also be
LMP-G, i.e. the LMP minus the generation component of the tariff.
"""
valid_periods = dr_periods[dr_periods.isin(self._index)].tz_convert(
'US/Pacific')
locidx = self._index.tz_convert('US/Pacific')
grouped = valid_periods.groupby(valid_periods.date)
# define auxiliary variables for each possible dr period if none exist
self._red, self._z, self._bl = {}, {}, {}
self._redpos, self._redBL, self._red0, self._blcon = {}, {}, {}, {}
self._dr_periods = valid_periods
# add variables if there are days w/ multiple possible DR events
if np.max([len(grp) for grp in grouped.values()]) > 1:
self._zday, self._zdaysum, self._zdaymax = {}, {}, {}
# now create variables for different days and periods within each day
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zday[daystr] = self._model.addVar(
vtype=GRB.BINARY, name='zday[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._red[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='red[{}]'.format(perstr))
self._z[perstr] = self._model.addVar(
vtype=GRB.BINARY, name='z[{}]'.format(perstr))
self._bl[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='bl[{}]'.format(perstr))
self._model.update() # this must be done before defining constaints
# determine "bigM" value from the bounds on the control variables
M = np.sum(np.asarray(self._dynsys._opts['nrg_coeffs']) *
(self._dynsys._opts['umax'] - self._dynsys._opts['umin']),
axis=1).max()
# if u is not bounded the the above results in an NaN value. We need
# to deal with this in a better way than the following:
if np.isnan(M):
M = 1e9
# perform some preparations for the constraints
# drcomp = 0.0
nrgcons = self._dynsys.get_consumption()['energy']
lmps = LMP.tz_convert('US/Pacific').loc[locidx] / 1000 # to $/kWh
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
# add constraints on varible zday (if multiple periods per day)
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zdaysum[daystr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.LESS_EQUAL,
rhs=quicksum([self._z[ps] for ps in perstrs]),
name='zdaysum[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._zdaymax[perstr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.GREATER_EQUAL,
rhs=self._z[perstr],
name='zdaymax[{}]'.format(perstr))
self._model.update()
# formulate constaints and add terms to objective
drcomp_ = []
for i, day in enumerate(grouped):
periods = grouped[day]
# print('Formulating constraints for day {} of {}'.format(
# i, len(grouped)))
perstrs = [per.strftime(psform) for per in periods]
for period, perstr in zip(periods, perstrs):
per_select = ((locidx < period) &
(locidx.hour == period.hour) &
(locidx.minute == period.minute))
if isBusiness.loc[period]:
nmax = 10
per_select = per_select & isBusiness.values
else:
nmax = 4
per_select = per_select & (~isBusiness.values)
similars = locidx[per_select].sort_values(ascending=False)
# now go through similar days sucessively
sim_nonDR, sim_DR, sim_DR_mult = [], [], []
for sim in similars:
if len(sim_nonDR) == nmax:
continue
if sim in self._dr_periods:
sim_DR += [sim]
if len(grouped[pd.Timestamp(sim.date())]) > 1:
sim_DR_mult += [sim]
else:
sim_nonDR += [sim]
sim_DR = pd.DatetimeIndex(
sim_DR).sort_values(ascending=False)
sim_DR_mult = pd.DatetimeIndex(
sim_DR_mult).sort_values(ascending=False)
sim_nonDR = pd.DatetimeIndex(
sim_nonDR).sort_values(ascending=False)
# get consumption variables
cons_nonDR = nrgcons.loc[sim_nonDR].values
# Now add constraits on the baseline variables
for idxset in powerset(range(len(sim_DR))):
K = [sim_DR[i] for i in idxset]
Kc = [sim_DR[i] for i in range(len(sim_DR))
if i not in idxset]
qK = nrgcons.loc[K].values.tolist()
# Need to make sure to use zday if there are multiple
# events possible that day!
zK, zKc = [], []
for k in K:
if k in sim_DR_mult:
zK.append(self._zday[k.strftime(dsform)])
else:
zK.append(self._z[k.strftime(psform)])
for kc in Kc:
if kc in sim_DR_mult:
zKc.append(self._zday[kc.strftime(dsform)])
else:
zKc.append(self._z[kc.strftime(psform)])
# the following uses that the "closest" days appear first
qD = cons_nonDR[:nmax-len(idxset)].tolist()
n = len(sim_nonDR)
if n == 0:
print('No non-DR day available for BL computation -' +
' too many DR events!')
bnd = (quicksum(qD + qK) / float(n) +
M * quicksum(zK) +
M * quicksum([(1-z) for z in zKc]))
self._blcon[perstr, idxset] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.LESS_EQUAL,
rhs=bnd, name="blcon[{},{}]".format(perstr, idxset))
# add constraints on baseline reduction
self._redpos[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.GREATER_EQUAL,
rhs=0.0, name='redpos[{}]'.format(perstr))
self._redBL[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._bl[perstr] - nrgcons.loc[period],
name='redBL[{}]'.format(perstr))
self._red0[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._z[perstr] * M, name='red0[{}]'.format(perstr))
# add DR compensation to objective
# drcomp += lmps.loc[period] * self._red[perstr]
drcomp_.append(lmps.loc[period] * self._red[perstr])
drcomp = | pd.Series(drcomp_, index=self._dr_periods) | pandas.Series |
## ÇOK DEĞİŞKENLİ REGRESYON İÇİN VERİ HAZIRLAMA
#------------------------------------------------------
## KÜTÜPHANELER
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#---------------------------------
## VERİ YÜKLEME
veri = | pd.read_csv("C:\\Users\\Computer\\Desktop\\python-machine_learning\\Veriler\\data.csv") | pandas.read_csv |
"""Tests for the sdv.constraints.tabular module."""
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
ColumnFormula, CustomConstraint, GreaterThan, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test__valid_separator_valid(self):
"""Test ``_valid_separator`` for a valid separator.
If the separator and data are valid, result is ``True``.
Input:
- Table data (pandas.DataFrame)
Output:
- True (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert is_valid
def test__valid_separator_non_valid_separator_contained(self):
"""Test ``_valid_separator`` passing a column that contains the separator.
If any of the columns contains the separator string, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column that contains the separator string ('#')
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', '#', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test__valid_separator_non_valid_name_joined_exists(self):
"""Test ``_valid_separator`` passing a column whose name is obtained after joining
the column names using the separator.
If the column name obtained after joining the column names using the separator
already exists, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column name that will be obtained by joining
the column names and the separator.
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'b#c': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
transformed_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(transformed_data)
# Run
out = instance.reverse_transform(transformed_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
| pd.testing.assert_frame_equal(expected_out, out) | pandas.testing.assert_frame_equal |
import logging
import summer2020py
import summer2020py.setup_logger as setup_logger
import argparse
import sys
import os
import shutil
import glob
import pandas
import cmapPy.pandasGEXpress.GCToo as GCToo
import cmapPy.pandasGEXpress.write_gct as write_gct
logger = logging.getLogger(setup_logger.LOGGER_NAME)
def build_parser():
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--verbose", "-v", help="Whether to print a bunch of output.", action="store_true", default=False)
parser.add_argument("--source_analysis_dir", "-s",
help = "source directory containin analysis, where the DGE data is found and where the heatmaps (subdirectory) will be created",
type = str, required = True)
parser.add_argument("--experiment_id", "-e", help = "id of the expirment", type = str, required = True)
parser.add_argument("--dge_stats_for_heatmaps", "-d", help = "dge stats for heatmaps", default = ["logFC", "t"])
parser.add_argument("--base_server_data_path", "-b", help = "base path to directory on server with subfolders of experiment id in it",
type = str, default = "/data/experiments/RNA_SEQ/")
parser.add_argument("--relative_analysis_path", "-r",
help = """relative path from directory for specific experiment to analysis directory""",
type = str, default = "analysis/")
parser.add_argument("--base_url", "-u", help = "base url for the heatmaps to be put onto", type = str, default = "http://fht.samba.data/fht_morpheus.html?gctData=")
parser.add_argument("--dge_dir_name", "-ddn",
help="name of subdirectory in source_analysis_dir that contains differential gene expression (dge) data tables",
type=str, default="dge_data")
parser.add_argument("--config_filepath", help="path to config file containing information about how to connect to CDD API, ArxLab API etc.",
type=str, default=summer2020py.default_config_filepath)
parser.add_argument("--config_section", help="section of config file to use for information about how to connect to CDD API, ArxLab API etc.",
type=str, default=summer2020py.default_config_section)
return parser
def prepare_output_dir(source_analysis_dir):
heatmap_dir = os.path.join(source_analysis_dir, "heatmaps")
logger.debug("heatmap_dir: {}".format(heatmap_dir))
#if the path heatmaps exists in source_dir, delete it
if os.path.exists(heatmap_dir):
shutil.rmtree(heatmap_dir)
os.mkdir(heatmap_dir) #create the path heatmaps in source_dir
return heatmap_dir
def find_DGE_files(source_analysis_dir, dge_dir_name, experiment_id):
dge_file_list = glob.glob(
os.path.join(source_analysis_dir, dge_dir_name, experiment_id + "_*_DGE_r*.txt")
)
#set equal to dge_file_list all the files in source dir, in dge_data that start with the experiment id and end with _*_DGE_r*.txt where * is wildcard
dge_file_list.sort()
#sort the file list
logger.debug("len(dge_file_list): {}".format(len(dge_file_list)))
logger.debug(
"dge_file_list:\n{}".format(
"\n".join(dge_file_list)
)
)
return(dge_file_list)
def read_DGE_files(dge_file_list):
dge_df_list = [
(
pandas.read_csv(dge_file, sep="\t", index_col=0),
os.path.basename(dge_file)
)
for dge_file in dge_file_list
]
#this is a list comprehension, it is the same as the code in the comment below
# dge_df_list = []
# for dge_file in dge_file_list
# dge_df_list.append(pandas.read_csv(dge_file, sep="\t", index_col=0),os.path.basename(dge_file))
logger.debug([x[0].shape for x in dge_df_list])
#another list comprhension this one is the same as this
#for x in dge_df_list
#[x[0]].shape
#print out the name and data frame head of each tuple in list
for dge_df, dge_file in dge_df_list:
logger.debug("dge_file: {}".format(dge_file))
logger.debug("dge_df.head()\n{}".format(dge_df.head()))
return dge_df_list
def prepare_all_GCToo_objects(dge_stats_for_heatmaps, dge_df_list):
heatmap_gct_list = []
for dge_stat in dge_stats_for_heatmaps:
heatmap_g = prepare_GCToo_object(dge_stat, dge_df_list)
heatmap_gct_list.append((dge_stat, heatmap_g))
logger.debug("len(heatmap_gct_list): {}".format(len(heatmap_gct_list)))
logger.debug([(dge_stat, heat_g.data_df.shape) for dge_stat, heat_g in heatmap_gct_list])
return heatmap_gct_list
def prepare_data_df(dge_stat, dge_df_list):
extract_df_list = []
for dge_df, dge_file in dge_df_list:
basename = os.path.splitext(dge_file)[0]
annotation_values = basename.split("_")
annotation_str = "_".join(annotation_values[1:-2])
logger.debug("annotation_str: {}".format(annotation_str))
extract_df = dge_df[[dge_stat]]
col_id = dge_stat + "_" + annotation_str
extract_df.columns = [col_id]
logger.debug("extract_df.head():\n{}".format(extract_df.head()))
extract_df_list.append(extract_df)
data_df = pandas.concat(extract_df_list, axis=1)
logger.debug("data_df.shape: {}".format(data_df.shape))
logger.debug("data_df.head()\n{}".format(data_df.head()))
return data_df
def prepare_col_metadata(dge_stat, data_df_columns):
col_meta_list = [x.split("_") for x in data_df_columns]
col_metadata_df = | pandas.DataFrame(col_meta_list) | pandas.DataFrame |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = | DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2}) | pandas.DataFrame |
from datetime import datetime, timedelta, date
import pandas as pd
import numpy as np
import requests, json
from api import API
class Market():
__competi = API.get_competi()
__user_key = API.get_user_key()
__url_base = API.get_url_base()
def __init__(self, market, year=datetime.now().year, month=datetime.now().month, day=datetime.now().day):
assert market in ["IBEX", "DAX", "EUROSTOXX"], "Not a valid market. The market must be IBEX, DAX or EUROSTOXX"
try:
self.__date = date(int(year), int(month), int(day))
except ValueError as exc:
raise ValueError(exc.args)
self.__market_name = market
self.__ticker_master = API.get_ticker_master(self.__market_name)
self.__stocks = self.__ticker_master['ticker']
self.__initialize_attributes()
self.__ohlcv()
# Get previous day in case the market is closed today
if (self.__date == datetime.now().date()):
self.__date = datetime.now().date() - timedelta(days=1)
def __ohlcv(self):
for stock in self.__stocks:
price_series = API.get_price_series(stock, self.__market_name)
self.__open = pd.concat([self.__open, price_series['open']], axis=1)
self.__high = pd.concat([self.__high, price_series['high']], axis=1)
self.__low = pd.concat([self.__low, price_series['low']], axis=1)
self.__close = pd.concat([self.__close, price_series['close']], axis=1)
self.__volume = pd.concat([self.__volume, price_series['vol']], axis=1)
self.__rename_columns()
self.__establish_dataframes_index_as_datetime()
def __rename_columns(self):
self.__open.columns = self.__stocks
self.__high.columns = self.__stocks
self.__low.columns = self.__stocks
self.__close.columns = self.__stocks
self.__volume.columns = self.__stocks
def __establish_dataframes_index_as_datetime(self):
self.__open.index = pd.to_datetime(self.__open.index)
self.__high.index = pd.to_datetime(self.__high.index)
self.__low.index = pd.to_datetime(self.__low.index)
self.__close.index = pd.to_datetime(self.__close.index)
self.__volume.index = | pd.to_datetime(self.__volume.index) | pandas.to_datetime |
import pandas as pd
import numpy as np
from tqdm import tqdm
def _parse_cols(lines, verbose, return_attrib_dict=False):
"""
Hidden function to parse the tabular part of the data
:param lines: (list of strings) - the lines of the file
:return: (pd.DataFrame) - columns = Seqid, Source, Type, Start, End, Score, Strand, Phase
"""
attribs = []
# spec from https://m.ensembl.org/info/website/upload/gff3.html
data = {"Seqid": [],
"Source": [],
"Type": [],
"Start": [],
"End": [],
"Score": [],
"Strand": [],
"Phase": []}
for line in (tqdm(lines) if verbose else lines):
if line[0] != '#':
columns = line.split('\t')
for i, key in enumerate(data):
data[key].append(columns[i])
attribs.append(columns[-1])
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@File : freshhigh.py
Description :
@Author : pchaos
date: 18-7-9
-------------------------------------------------
Change Activity:
18-7-9:
@Contact : <EMAIL>
-------------------------------------------------
"""
__author__ = 'pchaos'
from django.db import models
from django.db.models import Q, Max
from django.db import transaction
import datetime
import pandas as pd
from stocks.models import StockBase
from stocks.models import Listing, RPS
import QUANTAXIS as qa
class FreshHigh(StockBase):
""" 创新高后,最低点和最高价记录
"""
code = models.ForeignKey(Listing, verbose_name='代码', max_length=10, on_delete=models.PROTECT, db_index=True,
null=True)
high = models.DecimalField(verbose_name='最高价', max_digits=9, decimal_places=3, null=True)
low = models.DecimalField(verbose_name='最低价', max_digits=9, decimal_places=3, null=True)
htradedate = models.DateField(verbose_name='最高点日期', null=True, db_index=True)
ltradedate = models.DateField(verbose_name='最低点日期', null=True, db_index=True)
@classmethod
def importList(cls, start=None, end=datetime.datetime.now().date(), n=120, m=250):
""" 创一年新高后,最低点和最高价记录
:param start:
:param end:
:param n:
:param m:
:return:
"""
def firstHigh(code, start=None, end=datetime.datetime.now().date(), n=120, m=250):
""" 返回第一次新高
:param code:
:param start:
:param end:
:param n:
:param m:
:return:
"""
def cmpfuncPeriod(df, days):
# 标记创半年新高
return pd.DataFrame(df.high == df.high.rolling(days).max())
# 半年新高
tdate = Listing.getNearestTradedate(days=-(n + m))
data = qa.QA_fetch_stock_day_adv(code, start=tdate, end=end).to_qfq()
ind = data.add_func(lambda x: cmpfuncPeriod(x, m))
results = ind[ind['high']]
df = data[ind.high].data.high.reset_index()
# gg = df.groupby('code').date.first() # 更快速?
usedcode = []
fh = []
for v in [df.iloc[a] for a in df.index]:
if not (v.code in usedcode):
# 创新高的股票代码
usedcode.append(v.code)
fh.append([v.date, v.code])
return | pd.DataFrame(fh, columns=['date', 'code']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Time : 2020/9/9-22:50
# @Author : TuringEmmy
# @Email : <EMAIL>
# @WeChat : csy_lgy
# @File : support_vector_machine.py
# @Project : Sep-Dragon
# *************************************************
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
class SVM:
def __init__(self, max_iter=100, kernel='linear'):
self.max_iter = max_iter
self._kernel = kernel
def init_args(self, features, labels):
self.m, self.n = features.shape
self.X = features
self.Y = labels
self.b = 0.0
# 将Ei保存在一个列表里
self.alpha = np.ones(self.m)
self.E = [self._E(i) for i in range(self.m)]
# 松弛变量
self.C = 1.0
def _KKT(self, i):
y_g = self._g(i) * self.Y[i]
if self.alpha[i] == 0:
return y_g >= 1
elif 0 < self.alpha[i] < self.C:
return y_g == 1
else:
return y_g <= 1
# g(x)预测值,输入xi(X[i])
def _g(self, i):
r = self.b
for j in range(self.m):
r += self.alpha[j] * self.Y[j] * self.kernel(self.X[i],
self.X[j]) # 对于输入的一行数据,分别对应原数据按位置乘再加和,在之后再进行累加,即为预测值
return r
# 核函数
def kernel(self, x1, x2):
if self._kernel == 'linear':
return sum([x1[k] * x2[k] for k in range(self.n)])
elif self._kernel == 'poly':
return (sum([x1[k] * x2[k] for k in range(self.n)]) + 1) ** 2
return 0
# E(x)为g(x)对输入x的预测值和y的差
def _E(self, i):
return self._g(i) - self.Y[i]
def _init_alpha(self):
# 外层循环首先遍历所有满足0<a<C的样本点,检验是否满足KKT
index_list = [i for i in range(self.m) if 0 < self.alpha[i] < self.C]
# 否则遍历整个训练集
non_satisfy_list = [i for i in range(self.m) if i not in index_list]
index_list.extend(non_satisfy_list)
for i in index_list:
if self._KKT(i):
continue
E1 = self.E[i]
# 如果E2是+,选择最小的;如果E2是负的,选择最大的
if E1 >= 0:
j = min(range(self.m), key=lambda x: self.E[x])
else:
j = max(range(self.m), key=lambda x: self.E[x])
return i, j
def _compare(self, _alpha, L, H):
if _alpha > H:
return H
elif _alpha < L:
return L
else:
return _alpha
def fit(self, features, labels):
self.init_args(features, labels)
for t in range(self.max_iter):
# train
i1, i2 = self._init_alpha()
# 边界
if self.Y[i1] == self.Y[i2]:
L = max(0, self.alpha[i1] + self.alpha[i2] - self.C)
H = min(self.C, self.alpha[i1] + self.alpha[i2])
else:
L = max(0, self.alpha[i2] - self.alpha[i1])
H = min(self.C, self.C + self.alpha[i2] - self.alpha[i1])
E1 = self.E[i1]
E2 = self.E[i2]
# eta=K11+K22-2K12
eta = self.kernel(self.X[i1], self.X[i1]) + self.kernel(
self.X[i2],
self.X[i2]) - 2 * self.kernel(self.X[i1], self.X[i2])
if eta <= 0:
# print('eta <= 0')
continue
alpha2_new_unc = self.alpha[i2] + self.Y[i2] * (
E1 - E2) / eta # 此处有修改,根据书上应该是E1 - E2,书上130-131页
alpha2_new = self._compare(alpha2_new_unc, L, H)
alpha1_new = self.alpha[i1] + self.Y[i1] * self.Y[i2] * (
self.alpha[i2] - alpha2_new)
b1_new = -E1 - self.Y[i1] * self.kernel(self.X[i1], self.X[i1]) * (
alpha1_new - self.alpha[i1]) - self.Y[i2] * self.kernel(
self.X[i2],
self.X[i1]) * (alpha2_new - self.alpha[i2]) + self.b
b2_new = -E2 - self.Y[i1] * self.kernel(self.X[i1], self.X[i2]) * (
alpha1_new - self.alpha[i1]) - self.Y[i2] * self.kernel(
self.X[i2],
self.X[i2]) * (alpha2_new - self.alpha[i2]) + self.b
if 0 < alpha1_new < self.C:
b_new = b1_new
elif 0 < alpha2_new < self.C:
b_new = b2_new
else:
# 选择中点
b_new = (b1_new + b2_new) / 2
# 更新参数
self.alpha[i1] = alpha1_new
self.alpha[i2] = alpha2_new
self.b = b_new
self.E[i1] = self._E(i1)
self.E[i2] = self._E(i2)
return 'train done!'
def predict(self, data):
r = self.b
for i in range(self.m):
r += self.alpha[i] * self.Y[i] * self.kernel(data, self.X[i])
return 1 if r > 0 else -1
def score(self, X_test, y_test):
right_count = 0
for i in range(len(X_test)):
result = self.predict(X_test[i])
if result == y_test[i]:
right_count += 1
return right_count / len(X_test)
def _weight(self):
# linear model
yx = self.Y.reshape(-1, 1) * self.X
self.w = np.dot(yx.T, self.alpha)
return self.w
# data
def create_data():
iris = load_iris()
df = | pd.DataFrame(iris.data, columns=iris.feature_names) | pandas.DataFrame |
import os
import subprocess
import orjson as json
import pandas as pd
from multiprocessing import Pool
from ekorpkit import eKonf
from glob import glob
from ekorpkit.io.fetch.web import web_download, gdrive_download_un7z
class Wiki:
def __init__(self, **args):
self.args = eKonf.to_config(args)
self.name = self.args.name
self.autoload = self.args.get("autoload", True)
self.url = self.args.dump.url
self.output_dir = self.args.output_dir
self.dump_dir = self.args.dump.dump_dir
os.makedirs(self.output_dir, exist_ok=True)
os.makedirs(self.dump_dir, exist_ok=True)
if self.args.output_file:
self.output_file = os.path.join(self.output_dir, self.args.output_file)
self.dump_file = os.path.join(self.dump_dir, self.args.dump.dump_file)
self.force_download = self.args.force_download
if self.autoload:
self.fetch()
def fetch(self):
if not os.listdir(self.output_dir) or self.force_download:
self.download_dump()
if self.args.extract._target_:
getattr(self, self.args.extract._target_)()
def extract_namuwiki(self):
extracted_dir = self.dump_file[:-3]
if os.path.exists(extracted_dir):
json_file = glob(extracted_dir + "/*.json")[0]
else:
raise Exception("Extracted json file doesn't exist")
with open(json_file, "r", encoding="utf-8") as input_file:
namu_wiki = json.load(input_file)
with Pool(processes=self.args.num_workers) as pool:
documents = pool.map(work, namu_wiki)
print(f"Extracted {self.name} from dump file {self.dump_file}")
df = | pd.DataFrame(documents) | pandas.DataFrame |
from imghdr import what
import pandas as pd
import numpy as np
import streamlit as st
from streamlit import caching
import datetime as dt
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import RendererAgg
_lock = RendererAgg.lock
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.dates as mdates
import plotly.express as px
import plotly.graph_objects as go
# INSPRIATION : https://weatherspark.com/m/52666/10/Average-Weather-in-October-in-Utrecht-Netherlands
# https://radumas.info/blog/tutorial/2017/04/17/percentile-test.html
def select_period_oud(df, field, show_from, show_until):
"""Shows two inputfields (from/until and Select a period in a df (helpers.py).
Args:
df (df): dataframe
field (string): Field containing the date
Returns:
df: filtered dataframe
"""
if show_from is None:
show_from = "2020-01-01"
if show_until is None:
show_until = "2030-01-01"
# "Date_statistics"
mask = (df[field].dt.date >= show_from) & (df[field].dt.date <= show_until)
df = df.loc[mask]
df = df.reset_index()
return df
@st.cache(ttl=60 * 60 * 24, suppress_st_warning=True)
def getdata(stn, fromx, until):
with st.spinner(f"GETTING ALL DATA ..."):
# url = "https://www.daggegevens.knmi.nl/klimatologie/daggegevens?stns=251&vars=TEMP&start=18210301&end=20210310"
# https://www.knmi.nl/kennis-en-datacentrum/achtergrond/data-ophalen-vanuit-een-script
# url = f"https://www.daggegevens.knmi.nl/klimatologie/daggegevens?stns={stn}&vars=ALL&start={fromx}&end={until}"
url = f"https://www.daggegevens.knmi.nl/klimatologie/daggegevens?stns={stn}&vars=TEMP:SQ:SP:Q:DR:RH&start={fromx}&end={until}"
try:
df = pd.read_csv(
url,
delimiter=",",
header=None,
comment="#",
low_memory=False,
)
except:
st.write("FOUT BIJ HET INLADEN.")
st.stop()
# TG : Etmaalgemiddelde temperatuur (in 0.1 graden Celsius) / Daily mean temperature in (0.1 degrees Celsius)
# TN : Minimum temperatuur (in 0.1 graden Celsius) / Minimum temperature (in 0.1 degrees Celsius)
# TNH : Uurvak waarin TN is gemeten / Hourly division in which TN was measured
# TX : Maximum temperatuur (in 0.1 graden Celsius) / Maximum temperature (in 0.1 degrees Celsius)
# TXH : Uurvak waarin TX is gemeten / Hourly division in which TX was measured
# T10N : Minimum temperatuur op 10 cm hoogte (in 0.1 graden Celsius) / Minimum temperature at 10 cm above surface (in 0.1 degrees Celsius)
# T10NH : 6-uurs tijdvak waarin T10N is gemeten / 6-hourly division in which T10N was measured; 6=0-6 UT; 12=6-12 UT; 18=12-18 UT; 24=18-24 UT
# SQ : Zonneschijnduur (in 0.1 uur) berekend uit de globale straling (-1 voor <0.05 uur) / Sunshine duration (in 0.1 hour) calculated from global radiation (-1 for <0.05 hour)
# SP : Percentage van de langst mogelijke zonneschijnduur / Percentage of maximum potential sunshine duration
# Q : Globale straling (in J/cm2) / Global radiation (in J/cm2)
# DR : Duur van de neerslag (in 0.1 uur) / Precipitation duration (in 0.1 hour)
# RH : Etmaalsom van de neerslag (in 0.1 mm) (-1 voor <0.05 mm) / Daily precipitation amount (in 0.1 mm) (-1 for <0.05 mm)
column_replacements = [
[0, "STN"],
[1, "YYYYMMDD"],
[2, "temp_avg"],
[3, "temp_min"],
[4, "temp_max"],
[5, "T10N"],
[6, "zonneschijnduur"],
[7, "perc_max_zonneschijnduur"],
[8, "glob_straling"],
[9, "neerslag_duur"],
[10, "neerslag_etmaalsom"],
]
for c in column_replacements:
df = df.rename(columns={c[0]: c[1]})
df["YYYYMMDD"] = | pd.to_datetime(df["YYYYMMDD"], format="%Y%m%d") | pandas.to_datetime |
import codecs
import datetime
import json
import numbers
import warnings
import dill
import numpy as np
import pandas as pd
import pickle
from pymongo import MongoClient
import redis
from tabulate import tabulate
class Concordia():
def __init__(self, persistent_db_config=None, in_memory_db_config=None, default_row_id_field=None):
print('Welcome to Concordia! We\'ll do our best to take a couple stressors off your plate and give you more confidence in your machine learning systems in production.')
self.persistent_db_config = {
'host': 'localhost'
, 'port': 27017
, 'db': '_concordia'
}
if persistent_db_config is not None:
self.persistent_db_config.update(persistent_db_config)
self.in_memory_db_config = {
'host': 'localhost'
, 'port': 6379
, 'db': 0
}
if in_memory_db_config is not None:
self.in_memory_db_config.update(in_memory_db_config)
self._create_db_connections()
self.valid_prediction_types = set([str, int, float, list, 'int8', 'int16', 'int32', 'int64', 'float16', 'float32', 'float64'])
self.default_row_id_field = default_row_id_field
params_to_save = {
'persistent_db_config': self.persistent_db_config
, 'in_memory_db_config': self.in_memory_db_config
, 'default_row_id_field': self.default_row_id_field
}
self.insert_into_persistent_db(val=params_to_save, val_type='concordia_config', row_id='_intentionally_blank', model_id='_intentionally_blank')
def set_params(self, params_dict):
for k, v in params_dict.items():
setattr(self, k, v)
def _create_db_connections(self):
host = self.in_memory_db_config['host']
port = self.in_memory_db_config['port']
db = self.in_memory_db_config['db']
self.rdb = redis.StrictRedis(host=host, port=port, db=db)
host = self.persistent_db_config['host']
port = self.persistent_db_config['port']
db = self.persistent_db_config['db']
client = MongoClient(host=host, port=port)
self.mdb = client[db]
return self
# feature_importances is a dict, with keys as feature names, and values being the importance of each feature. it doesn't matter how the imoprtances are calculated, we'll just sort by those values
def add_model(self, model, model_id, feature_names=None, feature_importances=None, description=None, features_to_save='all'):
print('One thing to keep in mind is that each model_id must be unique in each db configuration. So if two Concordia instances are using the same database configurations, you should make sure their model_ids do not overlap.')
redis_key_model = self.make_redis_model_key(model_id)
stringified_model = codecs.encode(dill.dumps(model), 'base64').decode()
self.rdb.set(redis_key_model, stringified_model)
redis_key_features = self.make_redis_key_features(model_id)
stringified_features = json.dumps(features_to_save)
self.rdb.set(redis_key_features, stringified_features)
if feature_importances is not None:
if not isinstance(feature_importances, dict):
raise(TypeError('feature_importances must be a dict, where each key is a feature name, and each value is the importance of that feature'))
for k, v in feature_importances.items():
if isinstance(v, np.generic):
feature_importances[k] = np.asscalar(v)
mdb_doc = {
'val_type': 'model_info'
, 'model': stringified_model
, 'model_id': model_id
, 'feature_names': feature_names
, 'feature_importances': json.dumps(feature_importances)
, 'description': description
, 'date_added': datetime.datetime.now()
, 'features_to_save': stringified_features
}
self.insert_into_persistent_db(mdb_doc, val_type=mdb_doc['val_type'], row_id=mdb_doc['model_id'], model_id=mdb_doc['model_id'])
return self
def add_label(self, row_id, model_id, label):
label_doc = {
'row_id': row_id
, 'model_id': model_id
, 'label': label
}
if not isinstance(row_id, numbers.Number) and not isinstance(row_id, np.generic) and not isinstance(row_id, str):
if isinstance(model_id, str):
label_doc['model_id'] = [model_id for x in range(len(row_id))]
label_doc = pd.DataFrame(label_doc)
self.insert_into_persistent_db(val=label_doc, val_type='live_labels', row_id=label_doc['row_id'], model_id=label_doc['model_id'])
def list_all_models(self, verbose=True):
live_models = self.retrieve_from_persistent_db(val_type='model_info')
if verbose:
print('Here are all the models that have been added to concordia for live predictions:')
model_names = [x['model_id'] for x in live_models]
print(model_names)
for model_info in live_models:
del model_info['model']
return live_models
def retrieve_from_persistent_db(self, val_type, row_id=None, model_id=None, min_date=None, date_field=None):
if min_date is not None and date_field is None and not (isinstance(min_date, datetime.datetime) or isinstance(min_date, datetime.date)):
print('You have specified a min_date, but not a date_field')
print('Without the date_field specified, Concordia will query against the "_concordia_created_at" field, which is of type datetime.datetime.')
print('Therefore, your min_date must be of type datetime.datetime, but it is not right now. It is of type: '.format(type(min_date)))
raise(TypeError('min_date must be of type datetime if date_field is unspecified'))
query_params = {
'row_id': row_id
, 'model_id': model_id
}
if row_id is None:
del query_params['row_id']
if model_id is None:
del query_params['model_id']
if min_date is not None:
if date_field is None:
query_params['_concordia_created_at'] = {'$gte': min_date}
else:
query_params[date_field] = {'$gte': min_date}
result = self.mdb[val_type].find(query_params)
# Handle the case where we have multiple predictions from the same row, or any other instances where we have multiple results for the same set of ids
if isinstance(result, dict):
result = [result]
elif not isinstance(result, list):
result = list(result)
return result
def check_row_id(self, val, row_id, idx=None):
if row_id is None:
calculated_row_id = val.get(self.default_row_id_field, None)
if calculated_row_id is None:
print('You must pass in a row_id for anything that gets saved to the db.')
print('This input is missing a value for "row_id"')
if self.default_row_id_field is not None:
print('This input is also missing a value for "{}", the default_row_id_field'.format(self.default_row_id_field))
raise(ValueError('Missing "row_id" field'))
else:
row_id = calculated_row_id
assert row_id is not None
val['row_id'] = row_id
return val
def check_model_id(self, val, model_id, idx=None):
if isinstance(model_id, list):
model_id = model_id[idx]
if model_id is None:
calculated_model_id = val.get('model_id', None)
if calculated_model_id is None:
print('You must pass in a model_id for anything that gets saved to the db.')
print('This input is missing a value for "model_id"')
raise(ValueError('Missing "model_id" field'))
else:
model_id = calculated_model_id
assert model_id is not None
val['model_id'] = model_id
return val
def _insert_df_into_db(self, df, val_type, row_id, model_id):
df_cols = set(df.columns)
if 'row_id' not in df_cols:
if row_id is not None:
df['row_id'] = row_id
else:
if self.default_row_id_field not in df_cols:
print('You must pass in a row_id for anything that gets saved to the db.')
print('This input is missing a value for "row_id"')
if self.default_row_id_field is not None:
print('This input is also missing a value for "{}", the default_row_id_field'.format(self.default_row_id_field))
raise(ValueError('Missing "row_id" field'))
if 'model_id' not in df_cols:
if model_id is not None:
df['model_id'] = model_id
else:
print('You must pass in a model_id for anything that gets saved to the db.')
print('This input is missing a value for "model_id"')
raise(ValueError('Missing "model_id" field'))
chunk_min_idx = 0
chunk_size = 1000
while chunk_min_idx < df.shape[0]:
max_idx = min(df.shape[0], chunk_min_idx + chunk_size)
df_chunk = df.iloc[chunk_min_idx: max_idx]
df_chunk = df_chunk.to_dict('records')
self.mdb[val_type].insert_many(df_chunk)
del df_chunk
chunk_min_idx += chunk_size
def insert_into_persistent_db(self, val, val_type, row_id=None, model_id=None):
val = val.copy()
if '_id' in val:
del val['_id']
if '_id_' in val:
del val['_id_']
val['_concordia_created_at'] = datetime.datetime.utcnow()
if isinstance(val, dict):
val = self.check_row_id(val=val, row_id=row_id)
val = self.check_model_id(val=val, model_id=model_id)
for k, v in val.items():
if isinstance(v, np.generic):
val[k] = np.asscalar(v)
self.mdb[val_type].insert_one(val)
else:
self._insert_df_into_db(df=val, val_type=val_type, row_id=row_id, model_id=model_id)
return self
def make_redis_model_key(self, model_id):
return '_concordia_{}_{}'.format(model_id, 'model')
def _get_model(self, model_id):
redis_key_model = self.make_redis_model_key(model_id)
redis_result = self.rdb.get(redis_key_model)
if redis_result is 'None' or redis_result is None:
# Try to get it from MongoDB
mdb_result = self.retrieve_from_persistent_db(val_type='model_info', row_id=None, model_id=model_id)
if mdb_result is None or len(mdb_result) == 0:
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('We could not find a corresponding model for model_id {}'.format(model_id))
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
error_string = 'We could not find a corresponding model for model_id {}'.format(model_id)
raise(ValueError(error_string))
else:
model = mdb_result[0]['model']
self.rdb.set(redis_key_model, model)
redis_result = self.rdb.get(redis_key_model)
redis_result = dill.loads(codecs.decode(redis_result, 'base64'))
return redis_result
def _get_features_to_save(self, model_id):
redis_key = self.make_redis_key_features(model_id)
redis_result = self.rdb.get(redis_key)
if redis_result is None or redis_result is 'None':
mdb_result = self.retrieve_from_persistent_db(val_type='model_info', row_id=None, model_id=model_id)
if mdb_result is None or len(mdb_result) == 0:
return 'all'
else:
try:
features = mdb_result[0]['features_to_save']
except KeyError:
features = json.dumps('all')
self.rdb.set(redis_key, features)
redis_result = self.rdb.get(redis_key)
if isinstance(redis_result, bytes):
redis_result = redis_result.decode('utf-8')
redis_result = json.loads(redis_result)
return redis_result
def make_redis_key_features(self, model_id):
return '_concordia_{}_{}'.format(model_id, 'features_to_save')
# This can handle both individual dictionaries and Pandas DataFrames as inputs
def add_data_and_predictions(self, model_id, features, predictions, row_ids, actuals=None):
if not isinstance(features, pd.DataFrame):
print('Training features must be a pandas DataFrame, not a {}'.format(type(features)))
raise(TypeError('Training features must be a pandas DataFrame'))
features = features.copy()
features['row_id'] = row_ids
features['model_id'] = model_id
features_to_save = self._get_features_to_save(model_id=model_id)
concordia_features_to_save = ['row_id', 'model_id']
if features_to_save == 'all':
features_to_save = list(features.columns)
else:
features_to_save = features_to_save + concordia_features_to_save
prediction_docs = []
for idx, pred in enumerate(predictions):
if type(pred) not in self.valid_prediction_types:
pred = list(pred)
pred_doc = {
'prediction': pred
, 'row_id': row_ids.iloc[idx]
, 'model_id': model_id
}
prediction_docs.append(pred_doc)
predictions_df = pd.DataFrame(prediction_docs)
if actuals is not None:
actuals_docs = []
for idx, actual in enumerate(actuals):
actual_doc = {
'label': actual
, 'row_id': row_ids.iloc[idx]
, 'model_id': model_id
}
actuals_docs.append(actual_doc)
actuals_df = pd.DataFrame(actuals_docs)
saving_features = features[features_to_save]
self.insert_into_persistent_db(val=saving_features, val_type='training_features')
self.insert_into_persistent_db(val=predictions_df, val_type='training_predictions')
if actuals is not None:
self.insert_into_persistent_db(val=actuals_df, val_type='training_labels')
# if features_to_save == 'all':
# features_to_save = features.keys()
# else:
# features_to_save = features_to_save + concordia_features_to_save
# saving_features = {}
# for k, v in features.items():
# if k in features_to_save:
# saving_features[k] = v
# self.insert_into_persistent_db(val=saving_features, val_type='training_features', row_id=row_id, model_id=model_id)
# self.insert_into_persistent_db(val=predictions, val_type='training_predictions', row_id=row_id, model_id=model_id)
# if actuals is not None:
# self.insert_into_persistent_db(val=actuals, val_type='training_labels', row_id=row_id, model_id=model_id)
return self
# FUTURE: add in model_type, which will just get the most recent model_id for that model_type
# NOTE: we will return whatever the base model returns. We will not modify the output of that model at all (so if the model is an auto_ml model that returns a single float for a single item prediction, that's what we return. if it's a sklearn model that returns a list with a single float in it, that's what we return)
# NOTE: it is explicitly OK to call predict multiple times with the same data. If you want to filter out duplicate rows, you may do that with "drop_duplicates=True" at analytics time
def predict(self, model_id, features, row_id=None, shadow_models=None):
return self._predict(features=features, model_id=model_id, row_id=row_id, shadow_models=shadow_models, proba=False)
def predict_proba(self, model_id, features, row_id=None, shadow_models=None):
return self._predict(features=features, model_id=model_id, row_id=row_id, shadow_models=shadow_models, proba=True)
def predict_all(self, data):
pass
def _predict(self, features=None, model_id=None, row_id=None, model_ids=None, shadow_models=None, proba=False):
features = features.copy()
model = self._get_model(model_id=model_id)
if row_id is None:
row_id = features[self.default_row_id_field]
features_to_save = self._get_features_to_save(model_id=model_id)
if features_to_save == 'all':
saving_features = features
else:
saving_features = features[features_to_save]
# FUTURE: input verification here before we get predictions.
self.insert_into_persistent_db(val=saving_features, val_type='live_features', row_id=row_id, model_id=model_id)
if proba == True:
prediction = model.predict_proba(features)
else:
prediction = model.predict(features)
# Mongo doesn't handle np.ndarrays. it prefers lists.
pred_for_saving = prediction
if isinstance(pred_for_saving, np.ndarray):
pred_for_saving = list(pred_for_saving)
clean_pred_for_saving = []
for item in pred_for_saving:
if isinstance(item, np.ndarray):
item = list(item)
clean_pred_for_saving.append(item)
pred_for_saving = clean_pred_for_saving
pred_doc = {
'prediction': pred_for_saving
, 'row_id': row_id
, 'model_id': model_id
}
if isinstance(features, pd.DataFrame):
pred_doc = pd.DataFrame(pred_doc)
self.insert_into_persistent_db(val=pred_doc, val_type='live_predictions', row_id=row_id, model_id=model_id)
return prediction
# def remove_model(self, model_id, verbose=True):
# if verbose == True:
# print('Removing model {}'.format(model_id))
# print('Note that this will remove the model from being able to make predictions.')
# print('We will keep historical data associated with this model (features, predictions, labels, etc.) so you can continue to perform analysis on it.')
# # TODO: remove the model from our model_info table.
# # This is the only place we are deleting something from the db, so we might need to create a new helper function (delete_from_db) for it.
# pass
def match_training_and_live(self, df_train, df_live, row_id_field=None):
# The important part here is our live predictions
# So we'll left join the two, keeping all of our live rows
# TODO: leverage the per-model row_id_field we will build out soon
# TODO: some accounting for rows that don't match
cols_to_drop = ['_id', '_concordia_created_at']
for col in cols_to_drop:
try:
del df_train[col]
except:
pass
try:
del df_live[col]
except:
pass
df = pd.merge(df_live, df_train, on='row_id', how='inner', suffixes=('_live', '_train'))
if df.shape[0] == 0 and df_train.shape[0] > 0 and df_live.shape[0] > 0:
print('\nWe have saved data for both training and live environments, but were not able to match them together on shared row_id values. Here is some information about the row_id column to help you debug.')
print('\nTraining row_id.head')
print(df_train.row_id.head())
print('\nLive row_id.head')
print(df_live.row_id.head())
print('\nTraining row_id described:')
print(df_train.row_id.describe())
print('\nLive row_id described:')
print(df_live.row_id.describe())
warnings.warn('While we have saved data for this model_id for both live and training environments, we were not able to match them on the same row_id.')
return df
def compare_one_row_predictions(self, row):
train_pred = row.prediction_train
live_pred = row.prediction_live
count_lists = 0
if isinstance(train_pred, list) or isinstance(train_pred, pd.Series):
count_lists += 1
if isinstance(live_pred, list) or isinstance(live_pred, pd.Series):
count_lists += 1
if count_lists == 1:
print('It appears you are comparing predictions of different types (only one of them is a lsit). This might be from comparing predictions where one was a probability prediction, and one was not. We have not yet built out that functionality. Please make sure all predictions are consistent types.')
raise(TypeError('Predictions are of different types. Only one of the predictions is a list'))
if count_lists == 2:
return_val = {}
for idx, train_proba_pred in enumerate(train_pred):
live_proba_pred = live_pred[idx]
return_val['class_{}_delta'.format(idx)] = train_proba_pred - live_proba_pred
else:
delta = train_pred - live_pred
return_val = {'delta': delta}
return pd.Series(return_val)
def analyze_prediction_discrepancies(self, model_id, return_summary=True, return_deltas=True, return_matched_rows=False, sort_column=None, min_date=None, date_field=None, verbose=True, ignore_nans=True, ignore_duplicates=True):
# TODO 1: add input checking for min_date must be a datetime if date_field is none
# TODO 2: add logging if we have values for both training and live, but no matches when merging
# 1. Get live data (only after min_date)
live_predictions = self.retrieve_from_persistent_db(val_type='live_predictions', row_id=None, model_id=model_id, min_date=min_date, date_field=date_field)
# 2. Get training_data (only after min_date- we are only supporting the use case of training data being added after live data)
training_predictions = self.retrieve_from_persistent_db(val_type='training_predictions', row_id=None, model_id=model_id, min_date=min_date, date_field=date_field)
live_predictions = pd.DataFrame(live_predictions)
training_predictions = pd.DataFrame(training_predictions)
if ignore_nans == True:
if verbose:
print('Ignoring nans')
live_predictions = live_predictions[pd.notnull(live_predictions.prediction)]
training_predictions = training_predictions[pd.notnull(training_predictions.prediction)]
if ignore_duplicates == True:
if verbose:
print('Ignoring duplicates')
live_predictions.drop_duplicates(subset='row_id', inplace=True)
training_predictions.drop_duplicates(subset='row_id', inplace=True)
if verbose == True:
print('Found {} relevant live predictions'.format(live_predictions.shape[0]))
print('Found a max of {} possibly relevant train predictions'.format(training_predictions.shape[0]))
# 3. match them up (and provide a reconciliation of what rows do not match)
df_live_and_train = self.match_training_and_live(df_live=live_predictions, df_train=training_predictions)
print('Found {} rows that appeared in both our training and live datasets'.format(df_live_and_train.shape[0]))
# All of the above should be done using helper functions
# 4. Go through and analyze all feature discrepancies!
# Ideally, we'll have an "impact_on_predictions" column, though maybe only for our top 10 or top 100 features
deltas = df_live_and_train.apply(self.compare_one_row_predictions, axis=1)
summary = self.summarize_prediction_deltas(df_deltas=deltas)
return_val = self.create_analytics_return_val(summary=summary, deltas=deltas, matched_rows=df_live_and_train, return_summary=return_summary, return_deltas=return_deltas, return_matched_rows=return_matched_rows, verbose=verbose)
return return_val
def create_analytics_return_val(self, summary, deltas, matched_rows, return_summary=True, return_deltas=True, return_matched_rows=False, verbose=True):
return_val = {}
if return_summary == True:
return_val['summary'] = summary
if return_deltas == True:
return_val['deltas'] = deltas
if return_matched_rows == True:
return_val['matched_rows'] = matched_rows
if verbose:
print('\n\n******************')
print('Deltas:')
print('******************\n')
# What we want to do here is have each row be a metric, with two columns
# The metric name, and the metric value
sorted_keys = sorted(summary.keys())
printing_val = []
for key in sorted_keys:
printing_val.append((key, summary[key]))
print(tabulate(printing_val, headers=['Metric', 'Value'], floatfmt='.3f', tablefmt='psql'))
return return_val
def find_missing_columns(self, df):
columns = set(df.columns)
results = {
'train_columns_not_in_live': []
, 'live_columns_not_in_train': []
, 'matched_cols': []
}
for col in df.columns:
if col[-6:] == '_train':
live_col = col[:-6] + '_live'
if live_col not in columns:
results['train_columns_not_in_live'].append(col[:-6])
else:
results['matched_cols'].append(col[:-6])
elif col[-5:] == '_live':
train_col = col[:-5] + '_train'
if train_col not in columns:
results['live_columns_not_in_train'].append(col[:-5])
return results
def summarize_one_delta_col(self, deltas, prefix):
results = {}
percentiles = [5, 25, 50, 75, 95, 99]
results['{}_num_rows_with_deltas'.format(prefix)] = len([x for x in deltas if x != 0])
results['{}_num_rows_with_no_deltas'.format(prefix)] = len([x for x in deltas if x == 0])
results['{}_avg_delta'.format(prefix)] = np.mean(deltas)
results['{}_median_delta'.format(prefix)] = np.median(deltas)
for percentile in percentiles:
results['{}_{}th_percentile_delta'.format(prefix, percentile)] = np.percentile(deltas, percentile)
abs_deltas = np.abs(deltas)
results['{}_avg_abs_delta'.format(prefix)] = np.mean(abs_deltas)
results['{}_median_abs_delta'.format(prefix)] = np.median(abs_deltas)
for percentile in percentiles:
results['{}_{}th_percentile_abs_delta'.format(prefix, percentile)] = np.percentile(abs_deltas, percentile)
return results
def summarize_prediction_deltas(self, df_deltas):
if 'delta' in df_deltas.columns:
result = self.summarize_one_delta_col(df_deltas.delta, prefix='prediction')
else:
result = {}
for col in df_deltas.columns:
if col[-6:] == '_delta':
result.update(self.summarize_one_delta_col(df_deltas[col], col[:-6]))
return result
def summarize_feature_deltas(self, df_deltas, feature_importances):
col_results = []
for col in df_deltas.columns:
# TODO: figure out if a column is categorical. if it is, handle deltas differently (probably just count of vals that are different)
col_result = self.summarize_one_delta_col(deltas=df_deltas[col], prefix=col)
col_result['feature'] = col
if feature_importances is not None:
importance = feature_importances.get(col, 0)
col_result['feature_importance'] = importance
col_results.append(col_result)
return col_results
def analyze_feature_discrepancies(self, model_id, return_summary=True, return_deltas=True, return_matched_rows=False, sort_column=None, min_date=None, date_field=None, verbose=True, ignore_duplicates=True, sample_rate=1.0):
# 1. Get live data (only after min_date)
live_features = self.retrieve_from_persistent_db(val_type='live_features', row_id=None, model_id=model_id, min_date=min_date, date_field=date_field)
# 2. Get training_data (only after min_date- we are only supporting the use case of training data being added after live data)
training_features = self.retrieve_from_persistent_db(val_type='training_features', row_id=None, model_id=model_id, min_date=min_date, date_field=date_field)
live_features = pd.DataFrame(live_features)
training_features = | pd.DataFrame(training_features) | pandas.DataFrame |
#!/usr/bin/env python
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.externals import joblib #模型持久化
def get_sepsis_score(data, model):
data_mean = np.array([
84.58144, 97.19395, 36.97723, 123.7505, 82.4001,
63.83056, 18.7265, 32.95766, -0.68992, 24.07548,
0.554839, 7.378934, 41.02187, 92.65419, 260.2234,
23.91545, 102.4837, 7.557531, 105.8279, 1.510699,
1.836177, 136.9323, 2.64666, 2.05145, 3.544238,
4.135528, 2.114059, 8.290099, 30.79409, 10.43083,
41.23119, 11.44641, 287.3857, 196.0139, 62.00947,
0.559269, 0.496571, 0.503429, -56.1251, 26.99499])
data_mean = pd.DataFrame(data_mean.reshape(1, 40))
data = pd.DataFrame(data)
data = data.fillna(method='pad')
data = data.fillna(method='bfill') # 数据病人本身填充
values = | pd.concat([data_mean, data], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = | Series(['fooBAD__barBAD', NA, 'foo']) | pandas.Series |
#-*- coding:utf-8 -*-
# &Author AnFany
from bs4 import BeautifulSoup as bs
import urllib
html = urllib.request.urlopen("https://en.wikipedia.org/wiki/Iris_flower_data_set#Data_set")
bsObj = bs(html.read(), "html5lib")
# 字段列表
ziduan = []
for hh in bsObj.find_all('table', class_='wikitable sortable mw-collapsible mw-collapsed'):
for ii in hh.find_all('th'):
fu = ii.get_text().split()
zi = ('_').join(fu)
exec('%s = []' % zi)
ziduan.append(zi)
fu = 0
for jj in hh.find_all('td'):
ty = jj.get_text().split()
try:
float(ty[0])
exec('%s.append(%.2f)' % (ziduan[fu % 6], float(ty[0])))
except ValueError:
exec('%s.append("%s")' % (ziduan[fu % 6], str(ty[-1])))
fu += 1
#有序的字典形式,按添加的序列输出
from collections import OrderedDict
datadict=OrderedDict({})
for keyname in ziduan:
datadict[keyname] = eval(keyname)
#写入文件
import pandas as pd
df = | pd.DataFrame(datadict) | pandas.DataFrame |
import pandas
mydataset = {
'cars': ["BMW", "Volvo", "Ford"],
'passings': [3, 7, 2]
}
myvar = | pandas.DataFrame(mydataset) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 11:13:15 2019
@author: jkern
"""
from __future__ import division
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
def hydro(sim_years):
#########################################################################
# This purpose of this script is to use synthetic streamflows at major California
# reservoir sites to simulate daily hydropower production for the PG&E and SCE
# zones of the California electricty market (CAISO), using parameters optimized
# via a differential evolution algorithm.
#########################################################################
# load California storage reservoir (ORCA) sites
df_sites = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name = 'ORCA',header=0)
ORCA_sites = list(df_sites)
# load upper generation amounts for each predicted hydropower dam (PG&E and SCE)
upper_gen = pd.read_excel('CA_hydropower/upper.xlsx',header =0)
# month-day calender
calender = pd.read_excel('CA_hydropower/calender.xlsx',header=0)
# load simulated full natural flows at each California storage reservoir (ORCA site)
df_sim = pd.read_csv('Synthetic_streamflows/synthetic_streamflows_CA.csv',header=0,index_col=0)
df_sim = df_sim.loc[0:(sim_years+3)*365,:]
# load simulated outflows calculated by ORCA
df_ORCA = pd.read_csv('ORCA_output.csv')
outflow_sites = ['SHA_otf','ORO_otf','YRS_otf','FOL_otf','NML_otf','DNP_otf','EXC_otf','MIL_otf','ISB_otf','SUC_otf','KWH_otf','PFT_otf']
for i in range(0,len(df_ORCA)):
for s in outflow_sites:
df_sim.loc[i,s] = df_ORCA.loc[i,s]
sim_years = sim_years+3
#Add month and day columns to the dataframe
Month = []
Day = []
count = 0
for i in range(0,len(df_sim)):
if count < 365:
Month = np.append(Month,calender.loc[count,'Month'])
Day = np.append(Day,calender.loc[count,'Day'])
count = count + 1
else:
count = 0
Month = np.append(Month,calender.loc[count,'Month'])
Day = np.append(Day,calender.loc[count,'Day'])
count = count + 1
df_sim['Month']=Month
df_sim['Day']=Day
# calculate simulated totals
Sim_totals = []
for i in range(0,sim_years):
sample = df_sim.loc[i*365:i*365+365,'ORO_fnf':'ISB_fnf']
total = np.sum(np.sum(sample))
Sim_totals = np.append(Sim_totals,total)
# load historical full natural flows for 2001, 2005, 2010 and 2011
df_hist = pd.read_excel('CA_hydropower/hist_reservoir_inflows.xlsx',header=0)
Hist_totals = []
Hist_years = [2001,2005,2010,2011]
for i in Hist_years:
sample = df_hist[df_hist['year'] == i]
sample = sample.loc[:,'ORO_fnf':'ISB_fnf']
total = np.sum(np.sum(sample))
Hist_totals = np.append(Hist_totals,total)
# find most similar historical year for each simulated year
Rule_list=[]
for i in range(0,sim_years):
Difference=abs(Sim_totals[i]- Hist_totals)
#Select which rule to use
for n in range(0,len(Hist_years)):
if Difference[n]==np.min(Difference):
Rule=n
Rule_list.append(Rule)
# PGE hydro projects
PGE_names = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name ='PGE',header=0)
PGE_dams = list(PGE_names.loc[:,'Balch 1':])
PGE_Storage=[PGE_dams[3],PGE_dams[7],PGE_dams[8],PGE_dams[9]]
PGE_No_Data_Dams=[PGE_dams[2],PGE_dams[4],PGE_dams[10],PGE_dams[11],PGE_dams[15],PGE_dams[16],PGE_dams[17],PGE_dams[26],PGE_dams[30],PGE_dams[38],PGE_dams[39],PGE_dams[55],PGE_dams[60],PGE_dams[65]]
## SCE hydro projects
SCE_names = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name ='SCE',header=0)
SCE_dams = list(SCE_names.loc[:,'Big_Creek_1 ':])
SCE_No_Data_Dams=[SCE_dams[7],SCE_dams[8],SCE_dams[12]]
#Simulate all the PGE inflow dams
check_unused = []
PGE_name_list = []
SCE_name_list = []
for name in PGE_dams:
est_power = []
for year in range(0,sim_years):
if name in PGE_No_Data_Dams:
pass
elif name in PGE_Storage:
# which operating rule to use?
Rule=Rule_list[year]
File_name='CA_hydropower/A1.0_FNF_Storage_Rule_' + str(name) +'.txt'
Temp_Rule=pd.read_csv(File_name,delimiter=' ',header=None)
peak_flow,starting,ending,refill_1_date,evac_date,peak_end,refill_2_date,storage,power_cap,eff,min_power=Temp_Rule.loc[Rule][:]
flow_weekly = []
k = str(PGE_names.loc[0][name])
I_O=str(PGE_names.loc[1][name])
#Which site to use
if k =='Oroville' and I_O =='Inflows':
site_name=['ORO_fnf']
elif k =='Oroville' and I_O =='Outflows':
site_name=['ORO_otf']
elif k =='Pine Flat' and I_O =='Inflows':
site_name=['PFT_fnf']
elif k =='Pine Flat' and I_O =='Outflows':
site_name=['PFT_otf']
elif k =='Shasta' and I_O =='Inflows':
site_name=['SHA_fnf']
elif k =='Shasta' and I_O =='Outflows':
site_name=['SHA_otf']
elif k =='New Melones' and I_O =='Inflows':
site_name=['NML_fnf']
elif k =='New Melones' and I_O =='Outflows':
site_name=['NML_otf']
elif k =='Pardee' and I_O =='Inflows':
site_name=['PAR_fnf']
elif k =='Pardee' and I_O =='Outflows':
site_name=['PAR_otf']
elif k =='New Exchequer' and I_O =='Inflows':
site_name=['EXC_fnf']
elif k =='New Exchequer' and I_O =='Outflows':
site_name=['EXC_otf']
elif k =='Folsom' and I_O =='Inflows':
site_name=['FOL_fnf']
elif k =='Folsom' and I_O =='Outflows':
site_name=['FOL_otf']
elif k =='<NAME>' and I_O =='Inflows':
site_name=['DNP_fnf']
elif k =='<NAME>' and I_O =='Outflows':
site_name=['DNP_otf']
elif k =='Millerton' and I_O =='Inflows':
site_name=['MIL_fnf']
elif k =='Millerton' and I_O =='Outflows':
site_name=['MIL_otf']
elif k =='Isabella' and I_O =='Inflows':
site_name=['ISB_fnf']
elif k =='Isabella' and I_O =='Outflows':
site_name=['ISB_otf']
elif k =='Yuba' and I_O =='Inflows':
site_name=['YRS_fnf']
elif k =='Yuba' and I_O =='Outflows':
site_name=['YRS_otf']
else:
None
flow_ts = df_sim.loc[:,site_name].values
flow_daily = flow_ts[year*365:year*365+365]
for i in range(0,52):
flow_weekly = np.append(flow_weekly,np.sum(flow_daily[i*7:i*7+7]))
x = np.max(flow_weekly[15:36])
L = list(flow_weekly)
peak_flow = L.index(x)
for week in range(0,52):
# available hydro production based on water availability
avail_power = flow_weekly[week]*eff
# if it's during first refill
if week < refill_1_date:
gen =starting- ((starting-min_power)/refill_1_date)*week
storage = avail_power-gen
# if it maintains the water
elif week >= refill_1_date and week < evac_date:
gen=min_power
storage= storage + (avail_power- gen)
# if it's in evac period 2
elif week >= evac_date and week < peak_end:
gen= min_power+ ((power_cap-min_power)/(peak_end-evac_date)* (week- evac_date))
if gen > power_cap:
gen=power_cap
storage= storage + (avail_power- gen)
else:
storage= storage + (avail_power- gen)
# if it's in evac period 2
elif week >= peak_end and week < refill_2_date:
gen= power_cap
if gen > power_cap:
gen=power_cap
storage= storage + (avail_power- gen)
else:
storage= storage + (avail_power- gen)
elif week >=refill_2_date :
gen = power_cap-((power_cap-ending)/(52-refill_2_date)* (week-refill_2_date))
est_power = np.append(est_power,gen)
else:
upper_now=upper_gen.loc[upper_gen.loc[:,'Name']== name]
upper_now=upper_now.reset_index(drop=True)
upper=upper_now.loc[0]['Max Gen']
Rule=Rule_list[year]
File_name='CA_hydropower/PGE_DE_V1/FNF_' + str(name) +'.txt'
Temp_Rule= | pd.read_csv(File_name,delimiter=' ',header=None) | pandas.read_csv |
import numpy as np
import sys
import pandas as pd
import scipy
from sklearn.cluster import KMeans
import networkx as nx
import copy, random
import matplotlib.pyplot as plt
from SERGIO.SERGIO.sergio import sergio
from sklearn.preprocessing import StandardScaler
# NOTE: this function is called from the main file
def get_DS_data(args):
# load the data: train/valid/test = 5/5/5
train_data, valid_data, test_data = [], [], []
for k in range(args.K_train):
print('train num = ', k)
train_data.append(helper_DS_data(args, k, args.DATA_NAME))
for _ in range(args.K_valid):
k += 1
print('valid num = ', k)
valid_data.append(helper_DS_data(args, k, args.DATA_NAME))
for _ in range(args.K_test):
k += 1
print('test num = ', k)
test_data.append(helper_DS_data(args, k, args.DATA_NAME))
return train_data, valid_data, test_data
# NOTE: this function is called from the main file
def get_DS_data_v2(args):
# load the data: test = 15
ds_data = {}
for i, name in enumerate(['DS1', 'DS2', 'DS3']):
ds_data[i] = []
for k in range(15):# range(args.K_test)
# print('test num = ', k)
ds_data[i].append(helper_DS_data(args, k, name))
return ds_data
def helper_DS_data(args, k, DATA_NAME, u=1):
# get the master regulators
if DATA_NAME == 'DS1':
filepath = 'simulator/SERGIO/data_sets/De-noised_100G_9T_300cPerT_4_DS1/'
dim = 100
elif DATA_NAME == 'DS2':
filepath = 'simulator/SERGIO/data_sets/De-noised_400G_9T_300cPerT_5_DS2/'
dim = 400
elif DATA_NAME == 'DS3':
filepath = 'simulator/SERGIO/data_sets/De-noised_1200G_9T_300cPerT_6_DS3/'
dim = 1200
else:
print('CHECK DATA NAME')
given_edges = pd.read_csv(filepath+'gt_GRN.csv', header=None)
given_edges = np.array(given_edges)
master_regulators = set(given_edges[:, 0])
if k==0:
print(DATA_NAME, 'Num MR = ', len(master_regulators))
# NOTE: something is wrong.
# get true theta
G = get_graph(dim, given_edges)#, 'true_'+DATA_NAME)
edge_connections = nx.adj_matrix(G).todense()
smallest_eigval = np.min(np.linalg.eigvals(edge_connections))
# precision matrix corresponding to edge_connections
theta_connections = edge_connections + np.eye(dim)*(u- smallest_eigval)
# load the data
sim_clean_exp = pd.read_csv(filepath+'simulated_noNoise_'+str(k)+'.csv', index_col=0)
#sim = sergio(number_genes=args.D, number_bins = 9, number_sc = 300, noise_params = 1, decays=0.8, sampling_state=15, noise_type='dpd')
sim_clean_exp = np.array(sim_clean_exp)
X = np.array(sim_clean_exp).transpose()# M x D = 2700 x 100
# get the labels
#y = np.array([np.int(np.float(c/args.POINTS)) for c in range(X.shape[0])])
y = np.array([np.int(np.float(c/args.POINTS_PER_CLASS)) for c in range(X.shape[0])])
# print('set labels check: ', set(y))
return [X, y, theta_connections, list(master_regulators)]
# NOTE: this function is called from the main file
def add_technical_noise(args, data, name=0):
print('Adding technical noise')
# NOTE: Do no call sim.simulate()
if args.DATA_METHOD == 'ds_expts' and name>0:
dim_dict = {'DS1':100, 'DS2':400, 'DS3':1200}
# dim = dim_dict[args.DATA_NAME]
dim = dim_dict['DS'+str(name)]
sim = sergio(number_genes=dim, number_bins = 9, number_sc = 300, noise_params = 1, decays=0.8, sampling_state=15, noise_type='dpd')
else:
sim = sergio(number_genes=args.D, number_bins = args.C, number_sc = args.POINTS_PER_CLASS, noise_params = args.NOISE_PARAMS, decays=args.DECAYS, sampling_state=args.SAMPLING_STATE, noise_type=args.NOISE_TYPE)
noisy_data = []
for i, d in enumerate(data):
X, y, theta, MR = d
X = helper_technical_noise(args, X, sim, name)
noisy_data.append([X, y, theta, MR])
return noisy_data
def helper_technical_noise(args, X, sim, name=0):# name =0 is default and runs for the input args setting: Use for training
#print('clean shape: ', X.shape)
expr = reshaping_sim_data(X.transpose(), args).transpose()
#Add outlier genes (skipping)
# expr_O = sim.outlier_effect(expr, outlier_prob = 0.01, mean = 0.8, scale = 1)
expr_O = expr
#Add Library Size Effect (skipping)
# libFactor, expr_O_L = sim.lib_size_effect(expr_O, mean = 4.6, scale = 0.4)
expr_O_L = expr_O
#Add Dropouts
#binary_ind = sim.dropout_indicator(expr_O_L, shape = 6.5, percentile = 82)
# more shape; less dropout --- lower percentile : less dropout
if args.DATA_METHOD == 'ds_expts' and name>0:
shape_dict = {'DS1':6.5, 'DS2':6.5, 'DS3':20}
dropout_shape = shape_dict['DS'+str(name)]
binary_ind = sim.dropout_indicator(expr_O_L, shape = dropout_shape, percentile = 82.0)
else:
binary_ind = sim.dropout_indicator(expr_O_L, shape = args.dropout_shape, percentile = args.dropout_percentile)
print('BINARY IND: higher sum, less dropout ', binary_ind.size, np.sum(binary_ind), ' success rate = ', np.sum(binary_ind)/binary_ind.size)
expr_O_L_D = np.multiply(binary_ind, expr_O_L)
#Convert to UMI count
# count_matrix = sim.convert_to_UMIcounts(expr_O_L_D)
count_matrix = expr_O_L_D
noisy_matrix = np.concatenate(count_matrix, axis = 1)
# print('Noisy mat: ', noisy_matrix.shape)
return noisy_matrix.transpose()
def get_DS_graph_MR(args): # 3 NOTE: please change the paths accordingly
DATA_NAME = args.gen_DS
if DATA_NAME == 'DS1':
filepath = 'simulator/SERGIO/data_sets/De-noised_100G_9T_300cPerT_4_DS1/'
dim = 100
elif DATA_NAME == 'DS2':
filepath = 'simulator/SERGIO/data_sets/De-noised_400G_9T_300cPerT_5_DS2/'
dim = 400
elif DATA_NAME == 'DS3':
filepath = 'simulator/SERGIO/data_sets/De-noised_1200G_9T_300cPerT_6_DS3/'
dim = 1200
else:
print('CHECK DATA NAME')
given_edges = pd.read_csv(filepath+'gt_GRN.csv', header=None)
given_edges = np.array(given_edges)
master_regulators = list(set(given_edges[:, 0]))
G = get_graph(args, given_edges)#, 'true_'+DATA_NAME)
G = get_directed_graph(G)
return G, master_regulators
def helper_GRN_data(args): # 2
# initialize a random DAG
if args.gen_DS in ['DS1', 'DS2', 'DS3']:
# Use the given graph and simulate the data again
G1, master_regulators = get_DS_graph_MR(args)
else:
G1, master_regulators = random_DAG_with_MR(args)
# saving the files, random number helps avoid clash
FILE_NUM = str(np.random.randint(1000))
create_interaction_regs_files(G1, args, RANDOM_NUM=FILE_NUM)
sim_data = get_data_SERGIO_batch(args, RANDOM_NUM=FILE_NUM, num_batch=1)
return sim_data[0] + [master_regulators]
def create_GRN_data(args): # 1
# create GRN data from the SERGIO simulator
train_data, valid_data, test_data = [], [], []
for k in range(args.K_train):
print('train num = ', k)
train_data.append(helper_GRN_data(args))
for k in range(args.K_valid):
print('valid num = ', k)
valid_data.append(helper_GRN_data(args))
for k in range(args.K_test):
print('test num = ', k)
test_data.append(helper_GRN_data(args))
return train_data, valid_data, test_data
def get_directed_graph(Gu):# 5
Gd = nx.DiGraph()
Gd.add_nodes_from(Gu.nodes)
edges = Gu.edges
Gd.add_edges_from(edges)
return Gd
def random_DAG_with_MR(args): # 4
"""Generate a random Directed Acyclic Graph (DAG) with a given number of MR and sparsity."""
prob = args.sparsity
num_MR = int(prob * args.D)
print('num MR = ', num_MR)
master_regulators = np.array([n for n in range(num_MR)])
other_nodes = np.array([num_MR + n for n in range(args.D-num_MR)])
# Initializing a Bipartite graph
G = nx.bipartite.random_graph(num_MR, args.D-num_MR, p=prob, seed=None, directed=False)
# add minimal number of edges to make the graph connected
edges = np.array([[e[0], e[1]] for e in G.edges])
if len(edges) != 0:
unconnected_MR = list(set(master_regulators) - set(edges[:, 0]))
unconnected_ON = list(set(other_nodes) - set(edges[:, 1]))# other nodes
else:
unconnected_MR = list(set(master_regulators))
unconnected_ON = list(set(other_nodes)) #other nodes
# make sure that each MR as >= 1 out degree
new_edges = []
for n in unconnected_MR:
# randomly select an edge from other nodes
if len(unconnected_ON) > 0:
index = np.random.choice(len(unconnected_ON), 1, replace=False)
# print('unconnected index: ', index)
new_edges.append([n, unconnected_ON[index[0]]])
else:
index = np.random.choice(len(other_nodes), 1, replace=False)
# print('other nodes index: ', index)
new_edges.append([n, other_nodes[index[0]]])
# add the new edges
G.add_edges_from(new_edges)
# update arrays
edges = np.array([[e[0], e[1]] for e in G.edges])
unconnected_MR = list(set(master_regulators) - set(edges[:, 0]))
unconnected_ON = list(set(other_nodes) - set(edges[:, 1]))# other nodes
new_edges = []
# make sure that each other node is connected
for n in unconnected_ON:
index = np.random.choice(len(master_regulators), 1, replace=False)
new_edges.append([master_regulators[index[0]], n])
# add the new edges
G.add_edges_from(new_edges)
# checking that each node has atleast one connection.
print('Final check: is DAG connected ?', set(np.array([[e[0], e[1]] for e in G.edges]).reshape(-1)) == set(range(args.D)))
# Sanity check:
edges = np.array([[e[0], e[1]] for e in get_directed_graph(G).edges])
if edges[:, 0].all() != np.array(master_regulators).all():
print('master regulators not matching', edges[:, 0], master_regulators)
if len(master_regulators) > 1:
num_connect_tf = int(args.connect_TF_prob * len(master_regulators))
# select random pairs and join edges
index = np.random.choice(len(master_regulators), num_connect_tf, replace=False)
MR_A = set(master_regulators[index])
MR_B = list(set(master_regulators) - MR_A)
new_edges = []
for n in MR_A:
index = np.random.choice(len(MR_B), 1, replace=False)
new_edges.append([n, MR_B[index[0]]])
# add the new edges
G.add_edges_from(new_edges)
# convert G to directed
G = get_directed_graph(G)
print('total edges = ', len(G.edges))
return G, master_regulators
def load_saved_data(args):
data_path = ''
return train_data, valid_data, test_data
def normalizing_data(X):
print('Normalising the input data...')
# scaler = StandardScaler()
# scaler.fit(X)
# scaledX = scaler.transform(X)
scaledX = X - X.mean(axis=0)
scaledX = scaledX/X.std(axis=0)
# NOTE: replacing all nan's by 0, as sometimes in dropout the complete column
# goes to zero
scaledX = convert_nans_to_zeros(scaledX)
return scaledX
def convert_nans_to_zeros(X):
where_are_nans = isnan(X)
X[where_are_nans] = 0
return X
def create_interaction_regs_files(G1, args, RANDOM_NUM=''):# 6 : NOTE: change the folder paths
# get master regulators: all nodes with in-degree zero
node_in_degree = list(G1.in_degree(G1.nodes()))
#node_degree = sorted(node_degree, key=lambda tup:tup[1])
print('Master Regulators for regs file have 0 in degree: inferring using topological graph')
master_regulators = np.array([n for n, d in node_in_degree if d==0])
num_MR = len(master_regulators)
# 1. edge list
df_edge = pd.DataFrame(np.array(G1.edges()))
df_edge.to_csv('simulator/SERGIO/data_sets/custom/gt_GRN'+RANDOM_NUM+'.csv', header=None, index=None)
# 2. saving master regulator files
# Prod_cell_rate = ~U(low_exp_range) & ~U(high_exp_range)
low_cell_rate = np.random.rand(num_MR, args.C) * (args.pcr_low_max - args.pcr_low_min) + args.pcr_low_min
high_cell_rate = np.random.rand(num_MR, args.C) * (args.pcr_high_max - args.pcr_high_min) + args.pcr_high_min
mask = np.random.choice([0, 1], (num_MR, args.C))
production_cell_rates = mask * low_cell_rate + (1 - mask) * high_cell_rate
master_reg_data = np.concatenate((master_regulators.reshape(num_MR, 1), production_cell_rates), 1)
df_MR = | pd.DataFrame(master_reg_data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
from pyfintb.datetools import to_str_date, to_pd_timestamp, gen_pd_datetime_idx, td_count, TODAY
from pyfintb.utils import to_list, list_slice
# Wind API Documentation:
# visit https://www.windquant.com/qntcloud/help
# or type "API" on Wind Financial Terminal
from WindPy import *
wind_start = w.start(waitTime=10) # timeout ~10s
import pandas as pd
import warnings as wn
def custom_formatwarning(msg, *args, **kwargs):
# ignore everything except the message
return str(msg) + '\n'
wn.formatwarning = custom_formatwarning
class Wind():
def __init__(self, data_usage_limit_per_week=5000000):
self._DATA_USAGE_LIMIT_PER_WEEK = data_usage_limit_per_week
self._DATA_USAGE_WARN = self._DATA_USAGE_LIMIT_PER_WEEK * 0.01
self._PIECE_LEN = 1000
self._MAX_MAT_SIZE = 1000 * 1000
self._FUNC_DATA_USAGE_LIMIT = {"wsd": 100000, "wss": 100000, "wsee": 1000,}
self._windapi_err_detail = {
-40520000: {"symbol": "WQERR_BASE", "info": "一般性错误"},
-40520001: {"symbol": "WQERR_UNKNOWN", "info": "未知错误"},
-40520002: {"symbol": "WQERR_INTERNAL_ERROR", "info": "内部错误"},
-40520003: {"symbol": "WQERR_SYSTEM_REASON", "info": "操作系统原因"},
-40520004: {"symbol": "WQERR_LOGON_FAILED", "info": "登录失败"},
-40520005: {"symbol": "WQERR_LOGON_NOAUTH", "info": "无登录权限"},
-40520006: {"symbol": "WQERR_USER_CANCEL", "info": "用户取消"},
-40520007: {"symbol": "WQERR_NO_DATA_AVAILABLE", "info": "没有可用数据"},
-40520008: {"symbol": "WQERR_TIMEOUT", "info": "请求超时"},
-40520009: {"symbol": "WQERR_LOST_WBOX", "info": "Wbox错误"},
-40520010: {"symbol": "WQERR_ITEM_NOT_FOUND", "info": "未找到相关内容"},
-40520011: {"symbol": "WQERR_SERVICE_NOT_FOUND", "info": "未找到相关服务"},
-40520012: {"symbol": "WQERR_ID_NOT_FOUND", "info": "未找到相关ID"},
-40520013: {"symbol": "WQERR_LOGON_CONFLICT", "info": "已在本机使用其他账号登录,故无法使用指定账号登录"},
-40520014: {"symbol": "WQERR_LOGON_NO_WIM", "info": "未登录使用WIM工具,故无法登录"},
-40520015: {"symbol": "WQERR_TOO_MANY_LOGON_FAILURE", "info": "连续登录失败次数过多"},
-40521000: {"symbol": "WQERR_IOERROR_CLASS", "info": "网络数据存取错误"},
-40521001: {"symbol": "WQERR_IO_ERROR", "info": "IO操作错误"},
-40521002: {"symbol": "WQERR_SERVICE_NOT_AVAL", "info": "后台服务器不可用"},
-40521003: {"symbol": "WQERR_CONNECT_FAILED", "info": "网络连接失败"},
-40521004: {"symbol": "WQERR_SEND_FAILED", "info": "请求发送失败"},
-40521005: {"symbol": "WQERR_RECEIVE_FAILED", "info": "数据接收失败"},
-40521006: {"symbol": "WQERR_NETWORK_ERROR", "info": "网络错误"},
-40521007: {"symbol": "WQERR_SERVER_REFUSED", "info": "服务器拒绝请求"},
-40521008: {"symbol": "WQERR_SVR_BAD_RESPONSE", "info": "错误的应答"},
-40521009: {"symbol": "WQERR_DECODE_FAILED", "info": "数据解码失败"},
-40521010: {"symbol": "WQERR_INTERNET_TIMEOUT", "info": "网络超时"},
-40521011: {"symbol": "WQERR_ACCESS_FREQUENTLY", "info": "频繁访问"},
-40521012: {"symbol": "WQERR_SERVER_INTERNAL_ERROR", "info": "服务器内部错误"},
-40522000: {"symbol": "WQERR_INVALID_CLASS", "info": "请求输入错误"},
-40522001: {"symbol": "WQERR_ILLEGAL_SESSION", "info": "无合法会话"},
-40522002: {"symbol": "WQERR_ILLEGAL_SERVICE", "info": "非法数据服务"},
-40522003: {"symbol": "WQERR_ILLEGAL_REQUEST", "info": "非法请求"},
-40522004: {"symbol": "WQERR_WINDCODE_SYNTAX_ERR", "info": "万得代码语法错误"},
-40522005: {"symbol": "WQERR_ILLEGAL_WINDCODE", "info": "不支持的万得代码"},
-40522006: {"symbol": "WQERR_INDICATOR_SYNTAX_ERR", "info": "指标语法错误"},
-40522007: {"symbol": "WQERR_ILLEGAL_INDICATOR", "info": "不支持的指标"},
-40522008: {"symbol": "WQERR_OPTION_SYNTAX_ERR", "info": "指标参数语法错误"},
-40522009: {"symbol": "WQERR_ILLEGAL_OPTION", "info": "不支持的指标参数"},
-40522010: {"symbol": "WQERR_DATE_TIME_SYNTAX_ERR", "info": "日期与时间语法错误"},
-40522011: {"symbol": "WQERR_INVALID_DATE_TIME", "info": "不支持的日期与时间"},
-40522012: {"symbol": "WQERR_ILLEGAL_ARG", "info": "不支持的请求参数"},
-40522013: {"symbol": "WQERR_INDEX_OUT_OF_RANGE", "info": "数组下标越界"},
-40522014: {"symbol": "WQERR_DUPLICATE_WQID", "info": "重复的WQID"},
-40522015: {"symbol": "WQERR_UNSUPPORTED_NOAUTH", "info": "请求无相应权限"},
-40522016: {"symbol": "WQERR_UNSUPPORTED_DATA_TYPE", "info": "不支持的数据类型"},
-40522017: {"symbol": "WQERR_DATA_QUOTA_EXCEED", "info": "数据提取量超限"},
-40522018: {"symbol": "WQERR_ILLEGAL_ARG_COMBINATION", "info": "不支持的请求参数"},
}
def _windapi_err_raise(self, err_code):
if err_code == 0:
pass
else:
info = self._windapi_err_detail[err_code]["info"]
raise Exception("Wind API Error ID {}: {}".format(err_code, info))
# convert Wind data object to pandas DataFrame
def _wdata2dataframe(self, wdata):
self._windapi_err_raise(wdata.ErrorCode)
field = wdata.Fields
code = wdata.Codes
time = wdata.Times
data = wdata.Data
datetime_idx = pd.to_datetime(time)
if len(field) == 1:
col = code
else:
if len(code) == 1:
col = field
else:
col = pd.MultiIndex.from_product([field, code], names=['field', 'code'])
if len(time) == 1:
if len(field) == 1:
result_df = pd.DataFrame(data, index=datetime_idx, columns=col)
else:
result_df = pd.DataFrame(data, index=col, columns=datetime_idx).T
else:
result_df = pd.DataFrame(data, index=col, columns=datetime_idx).T
return result_df
# if is Wind sector ID
def is_wind_sectorid(self, wcode):
for i in to_list(wcode):
# rule: length of string is 16 #AND# no dots in the string
if (len(i) == 16) and ("." not in i):
continue
else:
return False
return True
# if is Wind EDB ID
def is_wind_edbid(self, wcode):
for i in to_list(wcode):
# rule: length of string is 8 #AND# no dots in the string #AND# starting with M, S or G
if (len(i) == 8) and ("." not in i) and (i[0].upper() in ["M", "S", "G"]):
continue
else:
return False
return True
# get components of Wind code
def wind_components(self, wcode, show_name=False, date=TODAY):
code = to_list(wcode)
query = "sectorid=" if self.is_wind_sectorid(code) else "windcode="
wdata_obj = w.wset("sectorconstituent", "date="+to_str_date(date), query+code[0])
self._windapi_err_raise(wdata_obj.ErrorCode)
if wdata_obj.Data == []:
return []
cpn_code_list = wdata_obj.Data[1]
cpn_name_list = wdata_obj.Data[2]
if show_name:
cpn_code_dict = dict(zip(cpn_code_list, cpn_name_list))
return cpn_code_dict
else:
return cpn_code_list
# get the name of Wind code
def wind_name(self, wcode, eng=False):
code = to_list(wcode)
lang = "sec_englishname" if eng else "sec_name"
wdata_obj = w.wss(code, lang)
self._windapi_err_raise(wdata_obj.ErrorCode)
name = wdata_obj.Data[0]
if name[0] is None:
wn.warn("The input code is not a standard Wind code.")
return []
else:
return name
# get time series data
def wind_series(self, wcode, field, start_date, end_date=TODAY, col=None, **kwargs):
if (not isinstance(field, list)) and (field.upper() == "EDB"): # get data from Wind Economic Database
return self.wind_edb(wcode, start_date, end_date, col=col, **kwargs)
code = to_list(wcode)
field = to_list(field)
code_len = len(code)
field_len = len(field)
date_len = td_count(start_date, end_date, days="alldays") # conservative count
one_fetch_size = code_len * date_len
all_fetch_size = field_len * one_fetch_size
if all_fetch_size >= self._DATA_USAGE_LIMIT_PER_WEEK:
wn.warn("Data usage this time exceeds max usage limitation per week.")
return None
if all_fetch_size >= self._DATA_USAGE_WARN:
wn.warn("Data usage this time: almost {0} cells".format(all_fetch_size))
result_df = pd.DataFrame()
if one_fetch_size < self._FUNC_DATA_USAGE_LIMIT["wsd"]: # if exceed max data matrix size limitation
if ((code_len > 1) and (field_len > 1)):
for f in field:
wdata_obj = w.wsd(code, f, to_str_date(start_date), to_str_date(end_date), **kwargs)
wdata_df = self._wdata2dataframe(wdata_obj)
result_df = pd.concat([result_df, wdata_df], axis=1)
if col is None:
result_df.columns = pd.MultiIndex.from_product([field, code], names=['field', 'code'])
else:
col = to_list(col)
result_df.columns = pd.MultiIndex.from_product([field, col], names=['field', 'code'])
else:
wdata_obj = w.wsd(code, field, to_str_date(start_date), to_str_date(end_date), **kwargs)
result_df = self._wdata2dataframe(wdata_obj)
if col is None:
result_df.columns = code if field_len == 1 else field
else:
result_df.columns = to_list(col)
else:
date_idx = gen_pd_datetime_idx(start_date, end_date, **kwargs)
for sub_date_range in list_slice(date_idx, self._PIECE_LEN):
sub_start_date = sub_date_range[0]
sub_end_date = sub_date_range[-1]
sub_df = pd.DataFrame()
for sub_code in list_slice(code, self._FUNC_DATA_USAGE_LIMIT["wsd"]//self._PIECE_LEN):
for f in field:
wdata_obj = w.wsd(sub_code, f, to_str_date(sub_start_date), to_str_date(sub_end_date), **kwargs)
wdata_df = self._wdata2dataframe(wdata_obj)
wdata_df.columns = | pd.MultiIndex.from_product([[f], sub_code], names=['field', 'code']) | pandas.MultiIndex.from_product |
import numpy as np
import pandas as pd
from . import util as DataUtil
from . import cols as DataCol
"""
The main data loader.
TODO: population & common special dates
"""
class DataCenter:
def __init__(self):
self.__kabko = None
self.__dates_global = | pd.DataFrame([], columns=DataCol.DATES_GLOBAL) | pandas.DataFrame |
from pathlib import Path
import pandas as pd
import pytest
from data_pipeline_api.file_formats import object_file
import simple_network_sim.network_of_populations.visualisation as vis
from tests.utils import compare_mpl_plots
def test_plotStates_three_rows():
simple = pd.DataFrame([
{"date": "2020-04-12", "node": "hb1", "state": "S", "std": 0.0, "mean": 15.0},
{"date": "2020-04-12", "node": "hb2", "state": "S", "std": 0.0, "mean": 21.0},
{"date": "2020-04-12", "node": "hb3", "state": "S", "std": 0.0, "mean": 20.0},
{"date": "2020-04-12", "node": "hb3", "state": "E", "std": 0.0, "mean": 0.0},
{"date": "2020-04-12", "node": "hb4", "state": "S", "std": 0.0, "mean": 10.0},
{"date": "2020-04-12", "node": "hb5", "state": "S", "std": 0.0, "mean": 10.0},
{"date": "2020-04-12", "node": "hb6", "state": "S", "std": 0.0, "mean": 10.0},
{"date": "2020-04-12", "node": "hb7", "state": "S", "std": 0.0, "mean": 10.0},
{"date": "2020-04-13", "node": "hb1", "state": "S", "std": 0.0, "mean": 10.0},
{"date": "2020-04-13", "node": "hb2", "state": "S", "std": 0.0, "mean": 5.0},
{"date": "2020-04-13", "node": "hb3", "state": "S", "std": 0.0, "mean": 5.0},
{"date": "2020-04-13", "node": "hb3", "state": "E", "std": 0.0, "mean": 15.0},
{"date": "2020-04-13", "node": "hb4", "state": "S", "std": 0.0, "mean": 0.0},
{"date": "2020-04-13", "node": "hb5", "state": "S", "std": 0.0, "mean": 10.0},
{"date": "2020-04-13", "node": "hb6", "state": "S", "std": 0.0, "mean": 10.0},
{"date": "2020-04-13", "node": "hb7", "state": "S", "std": 0.0, "mean": 10.0},
])
compare_mpl_plots(vis.plot_nodes(pd.DataFrame(simple)))
def test_plotStates_two_rows():
simple = pd.DataFrame([
{"date": "2020-04-12", "node": "hb1", "state": "S", "std": 0.0, "mean": 15.0},
{"date": "2020-04-12", "node": "hb2", "state": "S", "std": 0.0, "mean": 21.0},
{"date": "2020-04-12", "node": "hb3", "state": "S", "std": 0.0, "mean": 20.0},
{"date": "2020-04-12", "node": "hb3", "state": "E", "std": 0.0, "mean": 0.0},
{"date": "2020-04-12", "node": "hb4", "state": "S", "std": 0.0, "mean": 10.0},
{"date": "2020-04-13", "node": "hb1", "state": "S", "std": 0.0, "mean": 10.0},
{"date": "2020-04-13", "node": "hb2", "state": "S", "std": 0.0, "mean": 5.0},
{"date": "2020-04-13", "node": "hb3", "state": "S", "std": 0.0, "mean": 5.0},
{"date": "2020-04-13", "node": "hb3", "state": "E", "std": 0.0, "mean": 15.0},
{"date": "2020-04-13", "node": "hb4", "state": "S", "std": 0.0, "mean": 0.0},
])
compare_mpl_plots(vis.plot_nodes(pd.DataFrame(simple)))
def test_plotStates_single_row():
simple = pd.DataFrame([
{"date": "2020-04-12", "node": "hb1", "state": "S", "std": 0.0, "mean": 15.0},
{"date": "2020-04-12", "node": "hb2", "state": "S", "std": 0.0, "mean": 21.0},
{"date": "2020-04-13", "node": "hb1", "state": "S", "std": 0.0, "mean": 10.0},
{"date": "2020-04-13", "node": "hb2", "state": "S", "std": 0.0, "mean": 5.0},
])
compare_mpl_plots(vis.plot_nodes(pd.DataFrame(simple)))
def test_plotStates_empty_node():
simple = pd.DataFrame([{"time": 0, "node": "hb1", "state": "S", "std": 0.0, "mean": 15.0}])
with pytest.raises(ValueError):
vis.plot_nodes(pd.DataFrame(simple), nodes=[])
def test_plotStates_empty_states():
simple = pd.DataFrame([{"time": 0, "node": "hb1", "state": "S", "std": 0.0, "mean": 15.0}])
with pytest.raises(ValueError):
vis.plot_nodes(pd.DataFrame(simple), states=[])
def test_plotStates_empty_missing_column():
simple = pd.DataFrame([{"node": "hb1", "state": "S", "std": 0.0, "mean": 15.0}])
with pytest.raises(ValueError):
vis.plot_nodes(pd.DataFrame(simple), states=[])
def test_read_output(tmpdir):
df = pd.DataFrame([{"a": 10, "b": 20}])
with open(str(tmpdir / Path("simple.h5")), "wb") as fp:
object_file.write_table(fp, "outbreak-timeseries", df)
path = str(tmpdir / Path("access.yaml"))
with open(path, "w") as fp:
fp.write(f"""
config:
data_directory: {str(tmpdir)}
io:
- type: write
call_metadata:
data_product: output/simple_network_sim/outbreak-timeseries
component: outbreak-timeseries
access_metadata:
data_product: output/simple_network_sim/outbreak-timeseries
filename: simple.h5
""")
output = vis.read_output("output/simple_network_sim/outbreak-timeseries", path)
pd.testing.assert_frame_equal(output, df)
def test_read_output_ignore_read(tmpdir):
df = pd.DataFrame([{"a": 10, "b": 20}])
with open(str(tmpdir / Path("simple.h5")), "wb") as fp:
object_file.write_table(fp, "outbreak-timeseries", df)
path = str(tmpdir / Path("access.yaml"))
with open(path, "w") as fp:
fp.write(f"""
config:
data_directory: {str(tmpdir)}
io:
- type: read
call_metadata:
data_product: output/simple_network_sim/outbreak-timeseries
access_metadata:
data_product: output/simple_network_sim/outbreak-timeseries
- type: write
call_metadata:
data_product: output/simple_network_sim/outbreak-timeseries
component: outbreak-timeseries
access_metadata:
data_product: output/simple_network_sim/outbreak-timeseries
filename: simple.h5
""")
output = vis.read_output("output/simple_network_sim/outbreak-timeseries", path)
pd.testing.assert_frame_equal(output, df)
def test_read_output_multiple_writes(tmpdir):
df = | pd.DataFrame([{"a": 10, "b": 20}]) | pandas.DataFrame |
"""Specifices base classes"""
import pandas as pd
import pathlib
from typing import TypedDict
from . utils import NoClonesError, NoConstructsError
class TacoProject():
"""Holds project data"""
def __init__(self, project_name:str):
self._project_name = project_name
self.constructs = list()
@property
def project_name(self) -> str:
"""Contains the name of the project."""
return self._project_name
def generate_construct_template(self, filename:str=None, properties:list=None):
""" Generates an Excel file to be used as a template for construct data input.
Args:
filename (str): Filename for the input template. If None, project name + '_constructs.xlsx' will be used.
properties (list): Properties associated with constructs. If None, ['property_1', 'property_2', 'property_3'] will be used.
Raises:
FileExistsError: If the file already exists.
"""
if properties == None:
template = pd.DataFrame(columns=['identifier', 'property_1', 'property_2', 'property_3'])
else:
template = pd.DataFrame(columns=['identifier'] + properties)
if filename == None:
_filename = pathlib.Path(f'{self.project_name}_constructs.xlsx')
else:
_filename = pathlib.Path(filename)
if _filename.exists():
raise FileExistsError('File already exists. Please delete the old template or choose a different file name.')
else:
template.to_excel(_filename, index=False)
def read_construct_input(self, filename:str=None):
""" Reads an Excel file containing a list of constructs.
Args:
filename (str): Filename for the input. If None, project name + '_constructs.xlsx' will be used.
"""
if filename == None:
_filename = f'{self.project_name}_constructs.xlsx'
else:
_filename = filename
data = pd.read_excel(_filename, index_col=0).to_dict('index')
unique_counter=0
duplicate_counter=0
existing_ids = {x['identifier'] for x in self.constructs}
for identifier, properties in data.items():
if identifier in existing_ids:
duplicate_counter+=1
else:
unique_counter += 1
new_construct: Construct = {'identifier': identifier, 'properties': properties, 'clones': []}
self.constructs.append(new_construct)
if unique_counter:
print(f'{unique_counter} constructs added to the project. {duplicate_counter} duplicates were skipped.')
else:
raise NoConstructsError('No new constructs found in file')
def generate_transformation_template(self, filename:str=None):
""" Generates an Excel file to be used as a template for data input from a transformation round.
Args:
filename (str): Filename for the input template. If None, project name + '_transformation_results.xlsx' will be used.
Raises:
FileExistsError: If the file already exists.
"""
if filename == None:
_filename = pathlib.Path(f'{self.project_name}_transformation_results.xlsx')
else:
_filename = pathlib.Path(filename)
if _filename.exists():
raise FileExistsError('File already exists. Please delete the old template or choose a different file name.')
template = pd.DataFrame(columns=['identifier', 'agar_plate_number', 'agar_plate_position', 'number_of_clones'])
template['identifier'] = [construct['identifier'] for construct in self.constructs]
template.to_excel(_filename, index=False)
def read_transformation_input(self, filename:str=None):
""" Reads an Excel file containing a list of transformed constructs.
Args:
filename (str): Filename for the input. If None, project name + '_transformation_results.xlsx' will be used.
"""
if filename == None:
_filename = f'{self.project_name}_transformation_results.xlsx'
else:
_filename = filename
data = pd.read_excel(_filename)
clone_counter = 0
for _, row in data.iterrows():
for construct in self.constructs:
if row['identifier'] == construct['identifier']:
break
else:
raise ValueError(f'Could not find {row["identifier"]} in list of constructs.')
clones = []
for i in range(1, row['number_of_clones'] + 1):
new_clone: Clone = {
'identifier': f"{row['identifier']}_{i}",
'agar_plate_number': row['agar_plate_number'],
'agar_plate_position': row['agar_plate_position'],
'pcr_result': None,
'seq_result': None,
'storage_plate_number': None,
'storage_plate_position': None,
}
clones.append(new_clone)
clone_counter += 1
construct['clones'] = clones
print(f'{clone_counter} clones were added to the project.')
def generate_pcr_template(self, filename:str=None, max_clones:int=None, use_mtp=False):
""" Generates an Excel file to be used as a template for PCR data input.
Args:
max_clones (int): Maximum number of clones to test. If None, all clones of each construct are choosen.
filename (str): Filename for the input template. If None, project name + '_pcr_results.xlsx' will be used.
use_mtp (boolean): If true, template will have additional columns for 96 well PCR plates and positions
Raises:
FileExistsError: If the file already exists.
NoClonesError: If non of the constructs has any clones.
"""
if filename == None:
_filename = pathlib.Path(f'{self.project_name}_pcr_results.xlsx')
else:
_filename = pathlib.Path(filename)
if _filename.exists():
raise FileExistsError('File already exists. Please delete the old template or choose a different file name.')
pcr_counter = 1
pcr_plate_counter = 1
pcr_position_counter = 1
pcr_position_mapping = dict(zip(range(1,97), [f'{l}{n}' for l in 'ABCDEFGH' for n in range(1,13)]))
all_clone_counter = 0
collection = []
for construct in self.constructs:
clone_counter = 0
for clone in construct['clones']:
new_entry = {
'construct_identifier': construct['identifier'],
'clone_identifier': clone['identifier'],
'agar_plate_number': clone['agar_plate_number'],
'agar_plate_position': clone['agar_plate_position'],
'pcr_identifier': pcr_counter,
}
if use_mtp:
new_entry.update({
'pcr_plate': pcr_plate_counter,
'pcr_plate_position': pcr_position_mapping[pcr_position_counter],
})
new_entry.update({'pcr_result': None})
collection.append(new_entry)
pcr_counter += 1
clone_counter += 1
all_clone_counter += 1
if pcr_position_counter == 96:
pcr_position_counter = 1
pcr_plate_counter += 1
else:
pcr_position_counter += 1
if max_clones and (clone_counter >= max_clones):
break
if all_clone_counter == 0:
raise NoClonesError('There are no clones in your project.')
template = pd.DataFrame(collection)
template.to_excel(_filename)
def read_pcr_input(self, filename:str=None):
""" Reads an Excel file containing results from colony PCR.
Args:
filename (str): Filename for the input. If None, project name + '_pcr_results.xlsx' will be used.
"""
if filename == None:
_filename = f'{self.project_name}_pcr_results.xlsx'
else:
_filename = filename
data = pd.read_excel(_filename)
counter = 0
for _, row in data.iterrows():
for construct in self.constructs:
for clone in construct['clones']:
if row['clone_identifier'] == clone['identifier']:
break
else:
clone = None
if not clone is None:
break
else:
raise ValueError(f'Clone {row["clone_identifier"]} could not be found.')
if row['pcr_result'] == 'y' or row['pcr_result'] == True:
clone['pcr_result'] = 'success'
counter += 1
elif row['pcr_result'] == 'n' or row['pcr_result'] == False:
clone['pcr_result'] = 'fail'
counter += 1
print(f'PCR data for {counter} clones added')
def generate_seq_template(self, only_with_positive_pcr:bool=True, max_clones:int=None, filename:str=None):
""" Generates an Excel file to be used as a template for SEQ data input.
Args:
only_with_positive_pcr (bool): If True, only clones with a positive PCR results are considered.
max_clones (int): Maximum number of clones to test. If None, all clones of each construct are potentially choosen.
filename (str): Filename for the input template. If None, project name + '_seq_results.xlsx' will be used.
Raises:
FileExistsError: If the file already exists.
NoClonesError: If non of the constructs has any clones.
"""
if filename == None:
_filename = pathlib.Path(f'{self.project_name}_seq_results.xlsx')
else:
_filename = pathlib.Path(filename)
if _filename.exists():
raise FileExistsError('File already exists. Please delete the old template or choose a different file name.')
seq_counter = 1
all_clone_counter = 0
collection = []
for construct in self.constructs:
clone_counter = 0
for clone in construct['clones']:
if only_with_positive_pcr:
if clone['pcr_result'] == 'fail' or clone['pcr_result'] == None:
continue
collection.append({
'clone_identifier': clone['identifier'],
'agar_plate_number': clone['agar_plate_number'],
'agar_plate_position': clone['agar_plate_position'],
'seq_identifier': seq_counter,
'seq_result': None,
})
seq_counter += 1
clone_counter += 1
all_clone_counter += 1
if max_clones and (clone_counter >= max_clones):
break
if all_clone_counter == 0:
raise NoClonesError('There are no clones in your project.')
template = pd.DataFrame(collection).set_index('clone_identifier')
template.to_excel(_filename)
def read_seq_input(self, filename:str=None):
""" Reads an Excel file containing a list of constructs.
Args:
filename (str): Filename for the input. If None, project name + '_seq_results.xlsx' will be used.
"""
if filename == None:
_filename = f'{self.project_name}_seq_results.xlsx'
else:
_filename = filename
data = | pd.read_excel(_filename) | pandas.read_excel |
#!/usr/bin/env python3
# this requires terminal states from CellRank's GPCCA (see the time benchmarks)
import scanpy as sc
import palantir
import numpy as np
import pandas as pd
import pickle
import os
import traceback
from anndata import AnnData
from typing import Optional, List
from math import ceil
def _clean_orig_names(series):
return np.array(list(map(np.array, series.str.split(":"))))[:, 1]
def _add_annotations(adata: AnnData, annot: pd.DataFrame) -> None:
adata.obs['genes_cleaned'] = _clean_orig_names(adata.obs.index)
assert len(set(adata.obs['genes_cleaned'])) == len(adata)
annot['genes_cleaned'] = np.array(list(map(np.array, annot.index.str.split("_"))))[:, 2]
annot['genes_cleaned'] = annot['genes_cleaned'].str.replace("-", "x-")
tmp = adata.obs.merge(annot, how='left', on='genes_cleaned')
tmp.drop_duplicates('genes_cleaned', inplace=True)
tmp.set_index('genes_cleaned', drop=True, inplace=True)
adata.obs = tmp
adata.obs['Reprogramming Day'] = adata.obs['Reprogramming Day'].astype('category')
def _select_root_cell(adata: AnnData) -> str:
obs = adata.obs['Reprogramming Day']
min_val = np.nanmin(obs.cat.categories)
return obs[obs == min_val].index[0]
def _load_cellrank_final_states(adata: AnnData, data) -> Optional[list]:
try:
index = _clean_orig_names(data['main_states'].index)
valid_ixs = np.isin(index, adata.obs.index)
x = data['lin_probs'][valid_ixs, :]
x = pd.DataFrame(x, index=index[valid_ixs])
if len(index) < 3:
return None
ixs = []
for lin in range(x.shape[1]):
y = x[~np.isin(x.index, ixs)]
assert len(y) + len(ixs) == x.shape[0], "Sanity check failed"
ix = np.argmax(y.values[:, lin])
ixs.append(y.index[ix])
return ixs
except Exception as e:
print(f"Unexpected error: `{e}`.")
raise e
def _palantir_preprocess(adata: AnnData):
sc.pp.filter_genes(adata, min_cells=10)
sc.pp.normalize_total(adata)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata, flavor='cell_ranger', n_top_genes=1500)
print("Running PCA")
n_comps = 300
sc.pp.pca(adata, use_highly_variable=True, n_comps=n_comps)
print("Diff maps")
dm_res = palantir.utils.run_diffusion_maps(pd.DataFrame(adata.obsm['X_pca'][:, :n_comps],
index=adata.obs_names))
print("MS space")
ms_data = palantir.utils.determine_multiscale_space(dm_res)
return ms_data
def _benchmark_palantir(bdata: AnnData, size: int, col: int, annot: pd.DataFrame, fs_data: pd.DataFrame,
n_jobs: int = 32) -> Optional[List[float]]:
from utils import benchmark
run_palantir = benchmark(palantir.core.run_palantir)
res = None
try:
print(f"Subsetting data to `{size}`, split `{col}`.")
_add_annotations(bdata, annot)
assert bdata.n_obs == size
root_cell = _select_root_cell(bdata)
final_states = _load_cellrank_final_states(bdata, fs_data)
if final_states is None:
print("No final states found, skipping")
return None
elif root_cell in final_states:
print("Root cell is in final states, skipping")
return None
print("Preprocessing")
ms_data = _palantir_preprocess(bdata)
print(f"Running with CellRank terminal states `root_cell={root_cell}` and "
f"`final_states={final_states}`")
res, _ = run_palantir(ms_data,
root_cell,
terminal_states=final_states,
knn=30,
num_waypoints=int(ceil(size * 0.15)),
n_jobs=n_jobs,
scale_components=False,
use_early_cell_as_start=True)
except Exception as e:
print(f"Unable to run `Palantir` with size `{size}` on split `{col}`. Reason: `{e}`.")
print(traceback.format_exc())
return res
def benchmark_palantir(adata, size: int, col: int, n_jobs: int = 32) -> None:
from utils import PROFILER_ROOT, PROFILER_ROOT_1_CORE, DATA_DIR
path = PROFILER_ROOT_1_CORE if n_jobs == 1 else PROFILER_ROOT
path = path / "palantir" / f"{size}_{col}.pickle"
if not os.path.isfile(path.parent):
os.makedirs(path.parent, exist_ok=True)
annot = | pd.read_csv(DATA_DIR / "morris_data" / "annotations" / "supp_table_4.csv", index_col=0, header=2) | pandas.read_csv |
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyfora.pandas_util
import pyfora.algorithms
import pyfora.algorithms.LinearRegression as LinearRegression
import pyfora.pure_modules.pure_pandas as PurePandas
import numpy
import pandas
import pandas.util.testing
import random
class InMemoryPandasTestCases(object):
def checkFramesEqual(self, df1, df2):
pandas.util.testing.assert_frame_equal(df1, df2)
return True
def checkSeriesEqual(self, series1, series2):
pandas.util.testing.assert_series_equal(series1, series2)
return True
def test_pandas_series_basic(self):
s = pandas.Series(range(10))
def f():
return s
self.equivalentEvaluationTest(f)
def test_repeated_dataframe_ctor(self):
df = pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]})
def f():
return pandas.DataFrame(df)
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkFramesEqual
)
def test_repeated_series_ctor(self):
s = pandas.Series([1,2,3])
def f():
return pandas.Series(s)
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkSeriesEqual
)
def test_pandas_dataframes_basic(self):
df = pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]})
def f():
return df
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkFramesEqual
)
def test_pandas_series_indexing_1(self):
s = pandas.Series(4)
def f(ix):
return s.iloc[ix]
for ix in range(-len(s), len(s)):
self.equivalentEvaluationTest(
f,
ix,
comparisonFunction=lambda x, y: x == y
)
def test_pandas_dataframe_indexing_1(self):
df = pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]})
def f(ix, jx):
return df.iloc[ix, jx]
for ix in range(-df.shape[0], df.shape[0]):
for jx in range(-df.shape[1], df.shape[1]):
self.equivalentEvaluationTest(
f, ix, jx,
comparisonFunction=lambda x, y: int(x) == int(y)
)
def test_pandas_dataframe_indexing_2(self):
df = pandas.DataFrame({'A': [1,2], 'B': [5,6]})
def f(ix1, ix2, jx):
return df.iloc[ix1:ix2, jx]
ixes = range(-df.shape[0], df.shape[1]) + [None]
jxes = range(-df.shape[1], df.shape[1])
for ix1 in ixes:
for ix2 in ixes:
for jx in jxes:
self.equivalentEvaluationTest(
f, ix1, ix2, jx,
comparisonFunction=lambda x, y: list(x) == list(y)
)
def test_pandas_dataframe_indexing_3(self):
# due to some hashing stuff, this test will fail if
# key 'D' is replaced by 'C'. Ehh ...
df = pandas.DataFrame({'A': range(5), 'B': range(5,10), 'D': range(10,15)})
def f():
return df.iloc[:,:-1]
def g():
return df.iloc[:,-1:]
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkFramesEqual
)
self.equivalentEvaluationTest(
g,
comparisonFunction=self.checkFramesEqual
)
def test_pandas_dataframe_indexing_4(self):
df = pandas.DataFrame({'A': range(5), 'B': range(5,10), 'D': range(10,15)})
def f():
return df.iloc[:,:]
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkFramesEqual
)
def test_pandas_dataframe_indexing_5(self):
df = pandas.DataFrame({'A': range(5), 'B': range(5,10), 'D': range(10,15)})
def f():
return df.iloc[:,]
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkFramesEqual
)
def test_pandas_dataframe_indexing_6(self):
df = pandas.DataFrame({'A': range(5), 'B': range(5,10), 'D': range(10,15)})
def f(jx):
return df.iloc[:jx]
for jx in xrange(2, 5, 2):
self.equivalentEvaluationTest(
f, jx,
comparisonFunction=self.checkFramesEqual
)
def test_pandas_shape(self):
df = pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]})
self.equivalentEvaluationTest(lambda: df.shape)
def test_pandas_dataframe_ctor_1(self):
items = [('A', [1,2,3]), ('B', [4,5,6])]
self.equivalentEvaluationTest(
lambda: pandas.DataFrame(dict(items)),
comparisonFunction=self.checkFramesEqual
)
def test_pandas_dataframe_ctor_2(self):
# NOTE: this form breaks the pandas API
col1 = [1,2,3]
col2 = [4,5,6]
data = [col1, col2]
res = self.evaluateWithExecutor(
lambda: pandas.DataFrame(data)
)
self.checkFramesEqual(
res,
pandas.DataFrame({
'C0': col1,
'C1': col2
})
)
def test_pandas_dataframe_class(self):
self.equivalentEvaluationTest(
lambda: pandas.DataFrame,
comparisonFunction=lambda x, y: x == y
)
def test_pandas_read_csv_1(self):
# there's some weirdness with whitspace that we have to deal
# with, on the fora side. For example, after indenting all the
# lines of s here, the read csv will miss the first line
# o_O
s = """
A,B,C
1,2,3
4,5,6
7,8,9
10,11,12
"""
res = self.evaluateWithExecutor(
lambda: pyfora.pandas_util.read_csv_from_string(s)
)
self.checkFramesEqual(
res,
pandas.DataFrame(
{
'A': [1,4,7,10],
'B': [2,5,8,11],
'C': [3,6,9,12]
},
dtype=float
)
)
def test_pandas_read_csv_2(self):
# there's some weirdness with whitspace that we have to deal
# with, on the fora side. For example, after indenting all the
# lines of s here, the read csv will miss the first line
# o_O
s = """
A,B,C
1,2,3
4,notAFloat,6
7,8,9
10,11,12
"""
def f():
try:
return pyfora.pandas_util.read_csv_from_string(s)
except Exception as e:
return e
res = self.evaluateWithExecutor(f)
self.assertIsInstance(res, Exception)
def test_pandas_read_csv_from_s3(self):
s = """
A,B,C
1,2,3
4,5,6
7,8,9
10,11,12
"""
with self.create_executor() as executor:
s3 = self.getS3Interface(executor)
key = "test_pandas_read_csv_from_s3_key"
s3().setKeyValue("bucketname", key, s)
remoteCsv = executor.importS3Dataset("bucketname", key).result()
with executor.remotely.downloadAll():
df = pyfora.pandas_util.read_csv_from_string(remoteCsv)
self.checkFramesEqual(
df,
pandas.DataFrame(
{
'A': [1,4,7,10],
'B': [2,5,8,11],
'C': [3,6,9,12]
},
dtype=float
)
)
def pyfora_linear_regression_test(self):
random.seed(42)
nRows = 100
x_col_1 = []
x_col_2 = []
y_col = []
for _ in range(nRows):
x1 = random.uniform(-10, 10)
x2 = random.uniform(-10, 10)
noise = random.uniform(-1, 1)
y = x1 * 5 + x2 * 2 - 8 + noise
x_col_1.append(x1)
x_col_2.append(x2)
y_col.append(y)
def computeCoefficients():
predictors = PurePandas.PurePythonDataFrame([x_col_1, x_col_2], ["x1", "x2"])
responses = PurePandas.PurePythonDataFrame([y_col], ["y"])
return LinearRegression.linearRegression(predictors, responses)
res_python = computeCoefficients()
res_pyfora = self.evaluateWithExecutor(computeCoefficients)
self.assertArraysAreAlmostEqual(res_python, res_pyfora)
df_x = pandas.DataFrame({
'x1': x_col_1,
'x2': x_col_2
})
df_y = pandas.DataFrame({
'y': y_col
})
res_pandas = LinearRegression.linearRegression(df_x, df_y)
self.assertArraysAreAlmostEqual(res_python, res_pandas)
# verified using sklearn.linear_model.LinearRegression, on nRows = 100
res_scikit = numpy.array([[4.96925412, 2.00279298, -7.98208391]])
self.assertArraysAreAlmostEqual(res_python, res_scikit)
def test_pyfora_linear_regression_1(self):
self.pyfora_linear_regression_test()
def test_pyfora_linear_regression_with_splitting(self):
# note: the right way to do this is to expose _splitLimit
# as an argument to LinearRegression.linearRegression, but a
# lack of named arguments in pyfora means that the code
# would be slightly more verbose than it should need be.
oldSplitLimit = LinearRegression._splitLimit
try:
LinearRegression._splitLimit = 10
self.pyfora_linear_regression_test()
finally:
LinearRegression._splitLimit = oldSplitLimit
def test_series_sort_values(self):
s = pandas.Series([5,5,2,2,1,2,3,4,2,3,1,5])
def f():
return list(s.sort_values().values)
self.equivalentEvaluationTest(
f,
comparisonFunction=lambda x, y: all(map(lambda v:v[0]==v[1], zip(x, y)))
)
def test_series_unique(self):
s = pandas.Series([5,5,2,2,1,2,3,4,2,3,1,5])
def f():
return sorted(list(s.unique()))
self.equivalentEvaluationTest(
f,
comparisonFunction=lambda x, y: all(map(lambda v:v[0]==v[1], zip(x, y)))
)
def test_dataframe_pyfora_addColumn(self):
d = {'A': [1,2,3,4], 'B': [5,6,7,8]}
df = pandas.DataFrame(d)
c = range(8, 12)
def f():
return df.pyfora_addColumn('C', c)
newDict = d.copy()
newDict['C'] = c
self.checkFramesEqual(
self.evaluateWithExecutor(f),
pandas.DataFrame(newDict)
)
def test_series_isinstance(self):
s = pandas.Series([1,2,3,4])
def f():
return isinstance(s, list)
self.equivalentEvaluationTest(f)
def test_dataframe_as_matrix(self):
df = pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]})
def f():
return df.as_matrix()
self.equivalentEvaluationTest(f)
def test_series_as_matrix(self):
s = pandas.Series([1,2,3])
def f():
return s.as_matrix()
self.equivalentEvaluationTest(f)
def test_DataFrameRow_1(self):
df = | pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]}) | pandas.DataFrame |
import pandas as pd
import fbprophet as Prophet
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
# to plot within notebook
import matplotlib.pyplot as plt
from stock_analysis.iam_util.iam_utils import calculate_impact
# company_name = 'kelani_valley'
# file = '/home/randilu/fyp_integration/Impact-Analysis-Module/data/external/stock-data-companies/' + company_name + '.csv'
def extract_changepoints_from_prophet(company_name, stock_csv_file):
plt.rcParams['figure.figsize'] = (20, 10)
# plt.style.use('fivethirtyeight')
plt.style.use('ggplot')
# reading from csv
file = stock_csv_file
stock_df = pd.read_csv(file, sep='\,', encoding='utf-8', index_col='date', parse_dates=True)
calculate_impact(stock_df, 4)
print(stock_df.head())
df = stock_df.reset_index()
print(df.head())
df = df.rename(columns={'date': 'ds', 'close': 'y'})
print(df.head())
df.set_index('ds').y.plot()
plt.show()
# log transform for get non stationary points
df['y_orig'] = df['y']
df['y'] = np.log(df['y'])
#
# applying prophet model
#
model = Prophet.Prophet(changepoint_range=1, changepoint_prior_scale=0.05)
model.fit(df)
# Create future dataframe
future = model.make_future_dataframe(periods=90)
print(future.tail())
# Forecast for future dataframe
forecast = model.predict(future)
print(forecast.tail())
print('Forecast: \n', forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail())
forecast['yhat_scaled'] = np.exp(forecast['yhat'])
model.plot(forecast)
model.plot_components(forecast)
plt.show()
viz_df = df.join(forecast[['yhat_scaled', 'yhat', 'yhat_lower', 'yhat_upper']], how='outer')
viz_df[['y_orig', 'yhat_scaled']].plot()
plt.show()
#
# change point detection
#
changepoints = model.changepoints
print(changepoints)
cp_df = pd.DataFrame({'date': changepoints})
| pd.to_datetime(cp_df['date']) | pandas.to_datetime |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data['Rating'].hist()
data = data[data['Rating']<=5]
data['Rating'].hist()
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = total_null/data.isnull().count()
missing_data = pd.concat([total_null,percent_null],axis=1,keys=['Total','Percent'])
print(missing_data)
data.dropna(inplace=True)
total_null_1 = data.isnull().sum()
percent_null_1 = total_null_1/data.isnull().count()
missing_data_1 = | pd.concat([total_null_1,percent_null_1],axis=1,keys=['Total','Percent']) | pandas.concat |
import pandas as pd
import json
import urllib
import requests
from bs4 import BeautifulSoup
def get_stock_data(data_type, name):
""" This function retireves stock data from www.wallstreet-online.de and stores the result in a dataframe
Arguments:
data_type (string): type of data "stock" or "index"
name (string): name of data, e.g. "bitcoin-group", "volkswagen", "dowjones"
Return:
Pandas dataframe: Columns "Day_cts", "Dates", "Start", "Max", "Min", "End", "volume" and "t_in_sec". Will return empty dataframe in case of error
"""
# Call webpage with chart data
if data_type == "stock":
quote_page = "https://www.wallstreet-online.de/aktien/"+name+"-aktie/chart#t:max||s:lines||a:abs||v:day||ads:null"
else:
quote_page = "https://www.wallstreet-online.de/indizes/"+name+"/chart#t:max||s:lines||a:abs||v:day||ads:null"
# Load page in variable and analyze
try:
page = urllib.request.urlopen(quote_page)
pass
except urllib.error.URLError:
# Something went wrong
return pd.DataFrame()
soup = BeautifulSoup(page, "html.parser")
inst_id = soup.find("input", attrs={"name": "inst_id"})["value"]
market_id = soup.find("input", attrs={"class": "marketSelect"})["value"]
# Get JSON file with chart data
url = "https://www.wallstreet-online.de/_rpc/json/instrument/chartdata/loadRawData?q%5BinstId%5D="+inst_id+"&q%5BmarketId%5D="+market_id+"&q%5Bmode%5D=hist"
resp = requests.get(url=url)
data = json.loads(resp.text)
# Store the stock data in Pandas Dataframe
end = data["markets"][market_id]["lastUpdate"][0:10]
day_cts = len(data["data"])
dates = pd.date_range(end=end, periods=day_cts, freq="B")
# Adjust for holidays
if data["data"][-1][-1] == None:
end = dates[-1]+1
dates = | pd.date_range(end=end, periods=day_cts, freq="B") | pandas.date_range |
import pytest
import sys
import numpy as np
import swan_vis as swan
import networkx as nx
import math
import pandas as pd
###########################################################################
##################### Related to adding metadata ##########################
###########################################################################
class TestMetadata(object):
# test add_metadata - one after the other with dupe cols
# yes overwrite
def test_add_metadata_4(self):
sg = swan.SwanGraph()
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_dupecols.tsv'
sg.add_metadata(meta, overwrite=True)
assert {'3','4'} == set(sg.adata.obs.cluster.tolist())
# test add_metadata - one after the other with dupe cols
# don'e overwrite
def test_add_metadata_3(self):
sg = swan.SwanGraph()
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_dupecols.tsv'
sg.add_metadata(meta, overwrite=False)
assert {'2', '1'} == set(sg.adata.obs.cluster.tolist())
# test add_metadata - one after the other
def test_add_metadata_2(self):
pass
sg = swan.SwanGraph()
# just gencode vM21 chr 11 and tcf3
gtf = 'files/chr11_and_Tcf3.gtf'
sg.add_annotation(gtf, verbose=True)
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_2.tsv'
sg.add_metadata(meta)
test = sg.adata.obs
data = [['D12', '1', 'K562', 'G0'],
['PB65_B017', '2', 'GM12878', 'M'],
['PB65_B018', '2', 'GM12878', 'S']]
cols = ['dataset', 'cluster', 'sample', 'cell_state']
ctrl = pd.DataFrame(data=data, columns=cols)
ctrl.index = ctrl.dataset
ctrl = ctrl[test.columns]
ctrl.sort_index(inplace=True)
test.sort_index(inplace=True)
print('test')
print(test)
print('control')
print(ctrl)
assert test.equals(ctrl)
# test add_metadata - vanilla
def test_add_metadata(self):
sg = swan.SwanGraph()
# just gencode vM21 chr 11 and tcf3
# gtf = 'files/chr11_and_Tcf3.gtf'
# sg.add_annotation(gtf, verbose=True)
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
# print(sg.t_df)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
test = sg.adata.obs
data = [['D12', '1', 'K562'],
['PB65_B017', '2', 'GM12878'],
['PB65_B018', '2', 'GM12878']]
cols = ['dataset', 'cluster', 'sample']
ctrl = pd.DataFrame(data=data, columns=cols)
ctrl.index = ctrl.dataset
ctrl = ctrl[test.columns]
ctrl.sort_index(inplace=True)
test.sort_index(inplace=True)
print('test')
print(test)
print('control')
print(ctrl)
assert test.equals(ctrl)
###########################################################################
############### Related to high-level dataset addition ####################
###########################################################################
class TestDataset(object):
# TODO
# add_dataset, add_transcriptome, add_annotation
# tests add_transcriptome - added after adding an annotation
def test_add_transcriptome_2(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full_annotation.gtf')
sg.add_transcriptome('files/test_full.gtf')
# t_df
sg.t_df = sg.t_df[['tid', 'annotation']]
data = [['test1', True],
['test2', True],
['test3', False],
['test4', True],
['test5', True],
['test6', True]]
cols = ['tid', 'annotation']
ctrl_t_df = pd.DataFrame(data=data, columns=cols)
ctrl_t_df = swan.create_dupe_index(ctrl_t_df, 'tid')
ctrl_t_df = swan.set_dupe_index(ctrl_t_df, 'tid')
# first order to make them comparable
# sort all values by their IDs
sg.t_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_t_df = ctrl_t_df[sg.t_df.columns]
print('test')
print(sg.t_df)
print('control')
print(ctrl_t_df)
assert (sg.t_df == ctrl_t_df).all(axis=0).all()
# loc_df - new location at chr2, 65
print('test')
print(sg.loc_df)
ind = (sg.loc_df.chrom=='chr2')&(sg.loc_df.coord==65)
temp = sg.loc_df.loc[ind, 'annotation'].to_frame()
for i, entry in temp.iterrows():
assert entry.annotation == False
temp = sg.loc_df.loc[~ind]
for i, entry in temp.iterrows():
assert entry.annotation == True
# tests add_transcriptome - vanilla
def test_add_transcriptome_1(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
# tests add_annotation - transcriptome already in SG
def test_add_annotation_2(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.add_annotation('files/test_full_annotation.gtf')
# t_df
sg.t_df = sg.t_df[['tid', 'annotation']]
data = [['test1', True],
['test2', True],
['test3', False],
['test4', True],
['test5', True],
['test6', True]]
cols = ['tid', 'annotation']
ctrl_t_df = pd.DataFrame(data=data, columns=cols)
ctrl_t_df = swan.create_dupe_index(ctrl_t_df, 'tid')
ctrl_t_df = swan.set_dupe_index(ctrl_t_df, 'tid')
# first order to make them comparable
# sort all values by their IDs
sg.t_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_t_df = ctrl_t_df[sg.t_df.columns]
print('test')
print(sg.t_df)
print('control')
print(ctrl_t_df)
assert (sg.t_df == ctrl_t_df).all(axis=0).all()
# loc_df - new location at chr2, 65
print('test')
print(sg.loc_df)
ind = (sg.loc_df.chrom=='chr2')&(sg.loc_df.coord==65)
temp = sg.loc_df.loc[ind, 'annotation'].to_frame()
for i, entry in temp.iterrows():
assert entry.annotation == False
temp = sg.loc_df.loc[~ind]
for i, entry in temp.iterrows():
assert entry.annotation == True
# tests add_annotation - vanilla
def test_add_annotation_1(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full_annotation.gtf')
# # loc_df
# data = [['chr1', 1, 0, True],
# ['chr1', 20, 1, True],
# ['chr1', 25, 2, True],
# ['chr1', 30, 3, True],
# ['chr1', 35, 4, True],
# ['chr1', 40, 5, True],
# ['chr2', 45, 6, True],
# ['chr2', 50, 7, True],
# ['chr2', 60, 8, True],
# ['chr2', 75, 10, True],
# ['chr2', 80, 11, True],
# ['chr2', 100, 12, True],
# ['chr2', 110, 13, True]]
# cols = ['chrom', 'coord', 'vertex_id', 'annotation']
# ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
# ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
# ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
#
# print('test')
# print(sg.loc_df)
# print('ctrl')
# print(ctrl_loc_df)
#
# print(sg.edge_df)
# assert 1 == 0
# # edge_df
# data = [[0, 1, '+', 'exon', 0, True],
# [1, 2],
# [2, 3],
# [3, 4],
# [4, 5],
# [5, 6],
# [6, 7],
#
#
# ]
# cols = ['v1', 'v2', 'strand', 'edge_type', 'annotation']
#
# # t_df
# data = [['test1', 'test1_tname', 'test1_gid', 'test1_gname', [0,1,2,3,4]], [0,1,2,3,4,5], True],
# ['test2', 'test2_tname', 'test2_gid', 'test2_gname', [5,6,7,8,9], [12,11,10,8,7,6], True],
# ['test4', 'test4_tname', 'test4_gid', 'test4_gname', [10], [6,7], True],
# ['test5', 'test5_tname', 'test2_gid', 'test2_gname', [5,11,12], [12,11,8,7], True],
# ['test6', 'test6_tname', 'test2_gid', 'test2_gname', [,6,7,8,9], [13,11,10,8,7,6], True]]
# cols = ['tid', 'tname', 'gid', 'gname', 'path', 'loc_path', 'annotation']
#
assert sg.annotation == True
assert 'annotation' in sg.t_df.columns
assert 'annotation' in sg.edge_df.columns
assert 'annotation' in sg.loc_df.columns
for ind, entry in sg.t_df.iterrows():
assert entry.annotation == True
assert entry.novelty == 'Known'
for ind, entry in sg.edge_df.iterrows():
assert entry.annotation == True
for ind, entry in sg.loc_df.iterrows():
assert entry.annotation == True
# tests:, label_annotated
# label annotated transcripts
def test_label_annotated(self):
sg = swan.SwanGraph()
data = [[0, [0,1]],
[1, [2,3]],
[2, [4,5]]]
sg.t_df = pd.DataFrame(data=data, columns=['tid', 'path'])
data = [[0,0,1], [1,1,2], [2,2,3], [3,3,4],
[4,4,5], [5,5,6]]
sg.edge_df = pd.DataFrame(data=data, columns=['edge_id', 'v1', 'v2'])
data = [0,1,2,3,4,5,6]
sg.loc_df = pd.DataFrame(data=data, columns=['vertex_id'])
tids = [0,1]
sg.label_annotated(tids)
ctrl_tids = [0,1]
tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
assert set(ctrl_tids) == set(tids)
ctrl_edges = [0,1,2,3]
edges = sg.edge_df.loc[sg.edge_df.annotation == True, 'edge_id'].tolist()
assert set(ctrl_edges) == set(edges)
ctrl_locs = [0,1,2,3,4]
locs = sg.loc_df.loc[sg.loc_df.annotation == True, 'vertex_id'].tolist()
assert set(ctrl_locs) == set(locs)
# add to empty sg, don't add isms
def test_add_transcriptome(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel_talon.gtf', include_isms=False)
print(sg.t_df)
assert "ISM" not in sg.t_df.novelty.unique()
# assert 1 == 0
# tests if correct error is thrown when adding annotation to
# sg that already has one
def test_add_annotation_already(self):
sg = swan.SwanGraph()
sg.annotation = True
with pytest.raises(Exception) as e:
sg.add_annotation('files/Canx.gtf')
assert 'Annotation already' in str(e.value)
# add annotation to empty sg
def test_add_annotation_empty_sg(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full.gtf')
# check annotation columns
assert all(sg.t_df.annotation.tolist())
assert all(sg.edge_df.annotation.tolist())
assert all(sg.loc_df.annotation.tolist())
# check novelty column in t_df
assert len(sg.t_df.loc[sg.t_df.novelty=='Known']) == len(sg.t_df.index)
# check annotation flag
assert sg.annotation == True
# add annotation to sg with data already in it
def test_add_annotation_sg_data(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel.gtf')
sg.add_annotation('files/test_known.gtf')
# t_df
annot_tids = ['test1', 'test2', 'test4']
assert all(sg.t_df.loc[annot_tids, 'annotation'])
ctrl_novel_tids = ['test3', 'test5']
novel_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
assert len(set(ctrl_novel_tids)-set(novel_tids)) == 0
assert len(ctrl_novel_tids) == len(novel_tids)
# make sure the novelty assignment worked
annot_tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Known', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
annot_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Undefined', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
# loc_df
ctrl_novel_locs = [('chr2', 65)]
temp = sg.loc_df[sg.loc_df.annotation == False]
chroms = temp.chrom.tolist()
coords = temp.coord.tolist()
novel_locs = [(chrom, coord) for chrom, coord in zip(chroms, coords)]
print('control')
print(ctrl_novel_locs)
print('test')
print(novel_locs)
assert len(set(ctrl_novel_locs)-set(novel_locs)) == 0
assert len(novel_locs) == len(ctrl_novel_locs)
# edge_df
edge_df = sg.add_edge_coords()
edge_df = edge_df.loc[edge_df.annotation == False]
ctrl_novel_edges = [('chr2', 75, 65, '-', 'exon'),
('chr2', 65, 50, '-', 'intron'),
('chr2', 80, 60, '-', 'intron'),
('chr2', 60, 50, '-', 'exon')]
chroms = edge_df.chrom.tolist()
v1s = edge_df.v1_coord.tolist()
v2s = edge_df.v2_coord.tolist()
strands = edge_df.strand.tolist()
etypes = edge_df.edge_type.tolist()
novel_edges = [(chrom,v1,v2,strand,etype) for chrom,v1,v2,strand,etype \
in zip(chroms,v1s,v2s,strands,etypes)]
print('control')
print(ctrl_novel_edges)
print('test')
print(novel_edges)
assert len(set(ctrl_novel_edges)-set(novel_edges)) == 0
assert len(ctrl_novel_edges) == len(novel_edges)
# add annotation to sg with data where data contains dupe transcript
def test_add_annotation_sg_data_dupe_tid(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel_1.gtf')
sg.add_annotation('files/test_known.gtf')
# check with coord/chr bc of reindexing fuckery not being
# remimplemented yet
# t_df
annot_tids = ['test1', 'test2', 'test4']
assert all(sg.t_df.loc[annot_tids, 'annotation'])
ctrl_novel_tids = ['test3', 'test5']
novel_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
assert len(set(ctrl_novel_tids)-set(novel_tids)) == 0
assert len(ctrl_novel_tids) == len(novel_tids)
# make sure the novelty assignment worked
annot_tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Known', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
annot_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Undefined', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
# loc_df
ctrl_novel_locs = [('chr2', 65)]
temp = sg.loc_df[sg.loc_df.annotation == False]
chroms = temp.chrom.tolist()
coords = temp.coord.tolist()
novel_locs = [(chrom, coord) for chrom, coord in zip(chroms, coords)]
print('control')
print(ctrl_novel_locs)
print('test')
print(novel_locs)
assert len(set(ctrl_novel_locs)-set(novel_locs)) == 0
assert len(novel_locs) == len(ctrl_novel_locs)
# edge_df
edge_df = sg.add_edge_coords()
edge_df = edge_df.loc[edge_df.annotation == False]
ctrl_novel_edges = [('chr2', 75, 65, '-', 'exon'),
('chr2', 65, 50, '-', 'intron'),
('chr2', 80, 60, '-', 'intron'),
('chr2', 60, 50, '-', 'exon')]
chroms = edge_df.chrom.tolist()
v1s = edge_df.v1_coord.tolist()
v2s = edge_df.v2_coord.tolist()
strands = edge_df.strand.tolist()
etypes = edge_df.edge_type.tolist()
novel_edges = [(chrom,v1,v2,strand,etype) for chrom,v1,v2,strand,etype \
in zip(chroms,v1s,v2s,strands,etypes)]
print('control')
print(ctrl_novel_edges)
print('test')
print(novel_edges)
assert len(set(ctrl_novel_edges)-set(novel_edges)) == 0
assert len(ctrl_novel_edges) == len(novel_edges)
###########################################################################
###################### Related to file parsing ############################
###########################################################################
class TestFiles(object):
# tests GTF parsing
def test_parse_gtf(self):
gtf_file = 'files/Canx.gtf'
t_df, exon_df, from_talon = swan.parse_gtf(gtf_file, True, False)
t_df.index.name = 'tid_index'
t_df = t_df.sort_values(by='tid_index')
ctrl_t_df = pd.read_csv('files/Canx_transcript.tsv',sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df = ctrl_t_df.sort_values(by='tid_index')
ctrl_exons = ctrl_t_df.exons.tolist()
ctrl_exons = [exons.split(',') for exons in ctrl_exons]
ctrl_t_df['exons'] = ctrl_exons
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
# tests TALON DB parsing - no pass_list
def test_parse_db_1(self):
db_file = 'files/test_full.db'
pass_list = 'files/test_full_pass_list.csv'
t_df, edge_df = swan.parse_db(db_file, None, False, True, False)
ctrl_t_df, ctrl_e_df = get_test_transcript_exon_dicts()
for key, item in ctrl_t_df.items():
item['exons'] = swan.reorder_exons(item['exons'])
ctrl_t_df = pd.DataFrame(ctrl_t_df).transpose()
ctrl_e_df = pd.DataFrame(ctrl_e_df).transpose()
# sort all values by their IDs
edge_df.sort_index(inplace=True)
t_df.sort_index(inplace=True)
ctrl_e_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_e_df = ctrl_e_df[edge_df.columns]
ctrl_t_df = ctrl_t_df[t_df.columns]
assert 'novelty' in t_df.columns
print('test')
print(edge_df)
print('control')
print(ctrl_e_df)
print(edge_df == ctrl_e_df)
assert (edge_df == ctrl_e_df).all(axis=0).all()
print('test')
print(t_df)
print(t_df.exons)
print('control')
print(ctrl_t_df)
print(ctrl_t_df.exons)
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
# tests TALON DB parsing - yes pass_list
def test_parse_db_2(self):
db_file = 'files/test_full.db'
pass_list = 'files/test_full_pass_list.csv'
t_df, edge_df = swan.parse_db(db_file, pass_list, False, True, False)
ctrl_t_df, ctrl_e_df = get_test_transcript_exon_dicts()
for key, item in ctrl_t_df.items():
item['exons'] = swan.reorder_exons(item['exons'])
# delete entries that weren't on pass list
del ctrl_e_df['chr2_45_50_+_exon']
del ctrl_t_df['test4']
ctrl_t_df = pd.DataFrame(ctrl_t_df).transpose()
ctrl_e_df = pd.DataFrame(ctrl_e_df).transpose()
# sort all values by their IDs
edge_df.sort_index(inplace=True)
t_df.sort_index(inplace=True)
ctrl_e_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_e_df = ctrl_e_df[edge_df.columns]
ctrl_t_df = ctrl_t_df[t_df.columns]
assert 'novelty' in t_df.columns
print('test')
print(edge_df)
print('control')
print(ctrl_e_df)
print(edge_df == ctrl_e_df)
assert (edge_df == ctrl_e_df).all(axis=0).all()
print('test')
print(t_df)
print(t_df.exons)
print('control')
print(ctrl_t_df)
print(ctrl_t_df.exons)
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
###########################################################################
####################### Related to DF creation ############################
###########################################################################
class TestCreateDFs(object):
# add_edge_coords, get_current_locs, get_current_edges,
# create_loc_dict, create_transcript_edge_dict create_dfs,
# tests add_edge_coords
def test_add_edge_coords(self):
sg = swan.SwanGraph()
sg = add_transcriptome_no_reorder_gtf(sg, 'files/test_full.gtf')
# sg.add_transcriptome('files/test_full.gtf')
cols = ['edge_id', 'v1', 'v2', 'strand', 'edge_type',
'v1_coord', 'v2_coord']
# print(sg.edge_df.head())
edge_df = sg.add_edge_coords()
print(edge_df.head())
edge_df = edge_df[cols]
ctrl_edge_df = pd.read_csv('files/test_add_edge_coords_result.tsv', sep='\t')
ctrl_edge_df = ctrl_edge_df[cols]
# first order to make them comparable
# sort all values by their IDs
edge_df.sort_values(by='edge_id', inplace=True)
ctrl_edge_df.sort_values(by='edge_id', inplace=True)
# and order columns the same way
ctrl_edge_df = ctrl_edge_df[edge_df.columns]
print('test')
print(edge_df)
print('control')
print(ctrl_edge_df)
assert (edge_df == ctrl_edge_df).all(axis=0).all()
# tests get_current_locs with an empty swangraph
def test_get_current_locs_empty_sg(self):
sg = swan.SwanGraph()
locs, n = sg.get_current_locs()
assert locs == {}
assert n == -1
# tests get_current_locs with a swangraph with data
def test_get_current_locs_sg_data(self):
sg = swan.SwanGraph()
cols = ['vertex_id', 'chrom', 'coord']
data = [[0, 1, 2], [1, 1, 3], [2, 3, 50]]
sg.loc_df = pd.DataFrame(data=data, columns=cols)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs, n = sg.get_current_locs()
ctrl_locs = {(1,2):0, (1,3):1, (3,50):2}
assert locs == ctrl_locs
assert n == 2
# tests get_current_edges with an empty swangraph
def test_get_current_edges_empty_sg(self):
sg = swan.SwanGraph()
edges, n = sg.get_current_edges()
assert(edges == {})
assert(n == -1)
# tests get_current_edges in a sg with data
def test_get_current_edges_sg_data(self):
sg = swan.SwanGraph()
cols = ['vertex_id', 'chrom', 'coord']
data = [[0, 1, 2], [1, 1, 3], [2, 1, 50]]
sg.loc_df = pd.DataFrame(data=data, columns=cols)
cols = ['edge_id', 'v1', 'v2', 'strand', 'edge_type']
data = [[0, 0, 1, '+', 'exon'],
[1, 1, 2, '+', 'intron']]
sg.edge_df = pd.DataFrame(data=data, columns=cols)
cols = ['tid']
data = [0]
sg.t_df = | pd.DataFrame(data=data, columns=cols) | pandas.DataFrame |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import pickle as pkl
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import torch.nn.functional as F
from torch.autograd import Variable
from model import AttentionLSTMClassifier
from torch.utils.data import Dataset, DataLoader
from early_stop import EarlyStop
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import *
import itertools
import pickle
import copy
import pandas as pd
from tqdm import tqdm
from sklearn.utils.class_weight import compute_class_weight
NUM_CLASS = 11
def load_data(f_name, label_cols):
data = pd.read_csv(f_name, delimiter='\t')
# test_data = pd.read_csv('data/test.csv')
# label = pd.read_csv('data/test.csv')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
y_train = data[label_cols]
stop_words = set(stopwords.words('english'))
X_train = []
for t in data['Tweet'].fillna("fillna").values:
t = t.lower()
word_tokens = word_tokenize(t)
# filtered_sentence = [w for w in word_tokens if not w in stop_words]
X_train.append(' '.join(word_tokens))
return X_train, y_train, data['ID'], data['Tweet']
class DataSet(Dataset):
def __init__(self, __X, __y, __pad_len, __word2id, __num_labels, max_size=None, use_unk=True):
self.pad_len = __pad_len
self.word2id = __word2id
self.pad_int = __word2id['<pad>']
if max_size is not None:
self.source = self.source[:max_size]
self.target = self.target[:max_size]
self.tag = self.tag[:max_size]
self.data = []
self.label = []
self.num_label = __num_labels
self.seq_len = []
self.only_single = True
self.use_unk = use_unk
self.read_data(__X, __y) # process data
assert len(self.seq_len) == len(self.data) == len(self.label)
def read_data(self, __X, __y):
assert len(__X) == len(__y)
num_empty_lines = 0
for X, y in zip(__X, __y.as_matrix()):
tokens = X.split()
if self.use_unk:
tmp = [self.word2id[x] if x in self.word2id else self.word2id['<unk>'] for x in tokens]
else:
tmp = [self.word2id[x] for x in tokens if x in self.word2id]
if len(tmp) == 0:
tmp = [self.word2id['<empty>']]
num_empty_lines += 1
# continue
self.seq_len.append(len(tmp) if len(tmp) < self.pad_len else self.pad_len)
if len(tmp) > self.pad_len:
tmp = tmp[: self.pad_len]
self.data.append(tmp + [self.pad_int] * (self.pad_len - len(tmp)))
# a_label = [0] * self.num_label
# if int(y) == 1:
# a_label = [0, 1]
# else:
# a_label = [1, 0]
self.label.append(y)
print(num_empty_lines, 'empty lines found')
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return torch.LongTensor(self.data[idx]), torch.LongTensor([self.seq_len[idx]]), torch.FloatTensor(self.label[idx])
class TestDataSet(Dataset):
def __init__(self, __X, __pad_len, __word2id, __num_labels, max_size=None, use_unk=True):
self.pad_len = __pad_len
self.word2id = __word2id
self.pad_int = __word2id['<pad>']
if max_size is not None:
self.source = self.source[:max_size]
self.target = self.target[:max_size]
self.tag = self.tag[:max_size]
self.data = []
self.num_label = __num_labels
self.seq_len = []
self.only_single = True
self.use_unk = use_unk
self.read_data(__X) # process data
assert len(self.seq_len) == len(self.data)
def read_data(self, __X):
num_empty_lines = 0
for X in __X:
tokens = X.split()
if self.use_unk:
tmp = [self.word2id[x] if x in self.word2id else self.word2id['<unk>'] for x in tokens]
else:
tmp = [self.word2id[x] for x in tokens if x in self.word2id]
if len(tmp) == 0:
tmp = [self.word2id['<empty>']]
num_empty_lines += 1
# continue
self.seq_len.append(len(tmp) if len(tmp) < self.pad_len else self.pad_len)
if len(tmp) > self.pad_len:
tmp = tmp[: self.pad_len]
self.data.append(tmp + [self.pad_int] * (self.pad_len - len(tmp)))
print(num_empty_lines, 'empty lines found')
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return torch.LongTensor(self.data[idx]), torch.LongTensor([self.seq_len[idx]])
def build_vocab(X_train, vocab_size):
word_count = {}
word2id = {}
id2word = {}
for line in X_train:
tokens = line.split()
for word in tokens:
if word in word_count:
word_count[word] += 1
else:
word_count[word] = 1
word_list = [x for x, _ in sorted(word_count.items(), key=lambda v: v[1], reverse=True)]
if len(word_count) < vocab_size:
raise Exception('Vocab less than requested!!!')
# add <pad> first
word2id['<pad>'] = 0
id2word[0] = '<pad>'
word2id['<unk>'] = 1
id2word[1] = '<unk>'
word2id['<empty>'] = 2
id2word[2] = '<empty>'
n = len(word2id)
word_list = word_list[:vocab_size - n]
for word in word_list:
word2id[word] = n
id2word[n] = word
n += 1
return word2id, id2word
def sort_batch(batch, ys, lengths):
seq_lengths, perm_idx = lengths.sort(0, descending=True)
seq_tensor = batch[perm_idx]
targ_tensor = ys[perm_idx]
return seq_tensor, targ_tensor, seq_lengths
def sort_batch_test(batch, lengths):
seq_lengths, perm_idx = lengths.sort(0, descending=True)
seq_tensor = batch[perm_idx]
rever_sort = np.zeros(len(seq_lengths))
for i, l in enumerate(perm_idx):
rever_sort[l] = i
return seq_tensor, seq_lengths, rever_sort.astype(int)
def one_fold(X_train, y_train, X_dev, y_dev):
num_labels = NUM_CLASS
vocab_size = 10000
pad_len = 50
batch_size = 24
embedding_dim = 200
hidden_dim = 400
__use_unk = False
word2id, id2word = build_vocab(X_train, vocab_size)
train_data = DataSet(X_train, y_train, pad_len, word2id, num_labels, use_unk=__use_unk)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
dev_data = DataSet(X_dev, y_dev, pad_len, word2id, num_labels, use_unk=__use_unk)
dev_loader = DataLoader(dev_data, batch_size=batch_size, shuffle=False)
model = AttentionLSTMClassifier(embedding_dim, hidden_dim, vocab_size, word2id,
num_labels, batch_size, use_att=True, soft_last=False)
model.load_glove_embedding(id2word)
model.cuda()
es = EarlyStop(2)
optimizer = optim.Adam(model.parameters(), lr=1e-5)
loss_criterion = nn.MSELoss() #
old_model = None
for epoch in range(100):
print('Epoch:', epoch, '===================================')
train_loss = 0
model.train()
for i, (data, seq_len, label) in enumerate(train_loader):
data, label, seq_len = sort_batch(data, label, seq_len.view(-1))
y_pred = model(Variable(data).cuda(), seq_len)
#roc_reward = roc_auc_score(label.numpy().argmax(axis=1), y_pred.data.cpu().numpy()[:, 1])
optimizer.zero_grad()
loss = loss_criterion(y_pred, Variable(label).cuda()) #* Variable(torch.FloatTensor([roc_reward])).cuda()
loss.backward()
optimizer.step()
train_loss += loss.data[0] * batch_size
pred_list = []
gold_list = []
test_loss = 0
model.eval()
for _, (_data, _seq_len, _label) in enumerate(dev_loader):
data, label, seq_len = sort_batch(_data, _label, _seq_len.view(-1))
y_pred = model(Variable(data, volatile=True).cuda(), seq_len)
loss = loss_criterion(y_pred, Variable(label).cuda()) #* Variable(torch.FloatTensor([roc_reward])).cuda()
test_loss += loss.data[0] * batch_size
y_pred = y_pred.data.cpu().numpy()
pred_list.append(y_pred) # x[np.where( x > 3.0 )]
gold_list.append(label.numpy())
# pred_list_2 = np.concatenate(pred_list, axis=0)[:, 1]
pred_list = np.concatenate(pred_list, axis=0)
gold_list = np.concatenate(gold_list, axis=0)
# roc = roc_auc_score(gold_list, pred_list_2)
# print('roc:', roc)
# a = accuracy_score(gold_list, pred_list)
# p = precision_score(gold_list, pred_list, average='binary')
# r = recall_score(gold_list, pred_list, average='binary')
# f1 = f1_score(gold_list, pred_list, average='binary')
# print('accuracy:', a, 'precision_score:', p, 'recall:', r, 'f1:', f1)
print("Train Loss: ", train_loss/len(train_data),
" Evaluation: ", test_loss/len(dev_data))
es.new_loss(test_loss)
if old_model is not None:
del old_model, old_pred_list
old_model = copy.deepcopy(model)
old_pred_list = copy.deepcopy(pred_list)
else:
old_model = copy.deepcopy(model)
old_pred_list = copy.deepcopy(pred_list)
if es.if_stop():
print('Start over fitting')
del model
model = old_model
pred_list = old_pred_list
torch.save(
model.state_dict(),
open(os.path.join(
'checkpoint',
'cbet.model'), 'wb'
)
)
with open('checkpoint/some_data.pkl', 'wb') as f:
pickle.dump([word2id, id2word], f)
break
return gold_list, pred_list, model, pad_len, word2id, num_labels
def make_test(X_test, model, pad_len, word2id, num_labels):
batch_size = 32
test_data = TestDataSet(X_test, pad_len, word2id, num_labels, use_unk=False)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)
pred_list = []
model.eval()
for i, (data, seq_len) in tqdm(enumerate(test_loader), total=len(test_data.data)/batch_size):
data, seq_len, rever_sort = sort_batch_test(data, seq_len.view(-1))
y_pred = model(Variable(data, volatile=True).cuda(), seq_len)
pred_list.append(y_pred.data.cpu().numpy()[rever_sort])
return np.concatenate(pred_list, axis=0)
def accuracy(gold_list, pred_list):
n = len(gold_list)
score = 0
for gold, pred in zip(gold_list, pred_list):
intersect = np.sum(np.dot(gold, pred))
union = np.sum(gold) + np.sum(pred) - intersect
score += intersect/union
score /= n
return score
if __name__ == '__main__':
# label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
label_cols = ['anger', 'anticipation', 'disgust', 'fear', 'joy',
'love', 'optimism', 'pessimism', 'sadness', 'surprise', 'trust']
f_name_train = 'data/semeval2018/2018-E-c-En-train.txt'
X_train, y_train, _, _ = load_data(f_name_train, label_cols)
f_name_dev = 'data/semeval2018/2018-E-c-En-dev.txt'
X_dev, y_dev, _, _ = load_data(f_name_dev, label_cols)
gold_list, pred_list, model, pad_len, word2id, num_labels = one_fold(X_train, y_train, X_dev, y_dev)
thres_dict = {}
for threshold in [0.025 * x for x in range(4, 20)]:
print('Threshold:', threshold)
tmp_pred_list = np.asarray([1 & (v > threshold) for v in pred_list])
p = precision_score(gold_list, tmp_pred_list, average='macro')
r = recall_score(gold_list, tmp_pred_list, average='macro')
f1 = f1_score(gold_list, tmp_pred_list, average='macro')
f1_micro = f1_score(gold_list, tmp_pred_list, average='micro')
a = accuracy(gold_list, tmp_pred_list)
thres_dict[threshold] = f1
print('macro F1', f1,
'micro F1', f1_micro,
'accuracy', a)
f_name_test = 'data/semeval2018/2018-E-c-En-test.txt'
X_test, _, ID, tweet = load_data(f_name_test, label_cols)
pred_test = make_test(X_test, model, pad_len, word2id, num_labels)
label_cols_test = [col_name+'_val' for col_name in label_cols]
import operator
t = max(thres_dict.items(), key=operator.itemgetter(1))[0]
print('best threshold is ', t)
final_test = np.asarray([1 & (v > t) for v in pred_test])
test_df = | pd.DataFrame() | pandas.DataFrame |
import logging
import os
import random
import string
import pandas as pd
import pytest
import great_expectations as ge
from great_expectations.core.batch import Batch, BatchRequest
from great_expectations.core.util import get_or_create_spark_application
from great_expectations.data_context.util import file_relative_path
from great_expectations.execution_engine import SqlAlchemyExecutionEngine
from great_expectations.execution_engine.sqlalchemy_batch_data import (
SqlAlchemyBatchData,
)
from great_expectations.profile.base import (
OrderedProfilerCardinality,
profiler_semantic_types,
)
from great_expectations.profile.user_configurable_profiler import (
UserConfigurableProfiler,
)
from great_expectations.self_check.util import (
connection_manager,
get_sql_dialect_floating_point_infinity_value,
)
from great_expectations.util import is_library_loadable
from great_expectations.validator.validator import Validator
from tests.profile.conftest import get_set_of_columns_and_expectations_from_suite
logger = logging.getLogger(__name__)
try:
import sqlalchemy as sqlalchemy
import sqlalchemy.dialects.postgresql as postgresqltypes
POSTGRESQL_TYPES = {
"TEXT": postgresqltypes.TEXT,
"CHAR": postgresqltypes.CHAR,
"INTEGER": postgresqltypes.INTEGER,
"SMALLINT": postgresqltypes.SMALLINT,
"BIGINT": postgresqltypes.BIGINT,
"TIMESTAMP": postgresqltypes.TIMESTAMP,
"DATE": postgresqltypes.DATE,
"DOUBLE_PRECISION": postgresqltypes.DOUBLE_PRECISION,
"BOOLEAN": postgresqltypes.BOOLEAN,
"NUMERIC": postgresqltypes.NUMERIC,
}
except ImportError:
sqlalchemy = None
postgresqltypes = None
POSTGRESQL_TYPES = {}
logger = logging.getLogger(__name__)
def get_pandas_runtime_validator(context, df):
batch_request = BatchRequest(
datasource_name="my_pandas_runtime_datasource",
data_connector_name="my_data_connector",
batch_data=df,
data_asset_name="IN_MEMORY_DATA_ASSET",
partition_request={
"batch_identifiers": {
"an_example_key": "a",
"another_example_key": "b",
}
},
)
expectation_suite = context.create_expectation_suite(
"my_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite=expectation_suite
)
return validator
def get_spark_runtime_validator(context, df):
spark = get_or_create_spark_application(
spark_config={
"spark.sql.catalogImplementation": "hive",
"spark.executor.memory": "450m",
# "spark.driver.allowMultipleContexts": "true", # This directive does not appear to have any effect.
}
)
df = spark.createDataFrame(df)
batch_request = BatchRequest(
datasource_name="my_spark_datasource",
data_connector_name="my_data_connector",
batch_data=df,
data_asset_name="IN_MEMORY_DATA_ASSET",
partition_request={
"batch_identifiers": {
"an_example_key": "a",
"another_example_key": "b",
}
},
)
expectation_suite = context.create_expectation_suite(
"my_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite=expectation_suite
)
return validator
def get_sqlalchemy_runtime_validator_postgresql(
df, schemas=None, caching=True, table_name=None
):
sa_engine_name = "postgresql"
db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost")
try:
engine = connection_manager.get_engine(
f"postgresql://postgres@{db_hostname}/test_ci"
)
except sqlalchemy.exc.OperationalError:
return None
sql_dtypes = {}
if (
schemas
and sa_engine_name in schemas
and isinstance(engine.dialect, postgresqltypes.dialect)
):
schema = schemas[sa_engine_name]
sql_dtypes = {col: POSTGRESQL_TYPES[dtype] for (col, dtype) in schema.items()}
for col in schema:
type_ = schema[col]
if type_ in ["INTEGER", "SMALLINT", "BIGINT"]:
df[col] = | pd.to_numeric(df[col], downcast="signed") | pandas.to_numeric |
import numpy as np
import pandas as pd
url = 'Reordered Linescan_nro 31 label JORDAN_234_P1_201901271901_MGA94_55.csv'
dfdos = pd.read_csv(url)
url = 'Reordered Linescan_nro 38 label WALHALLA_295_P1_201902011156_MGA94_55.csv'
dftres = pd.read_csv(url)
url = 'Reordered Linescan_nro 39 label JORDAN_310_P1_201902012046_MGA94_55.csv'
dfcuatro= pd.read_csv(url)
url = 'Reordered Linescan_nro 41 label WALHALLA_339_P1_201902030520_MGA94_55.csv'
dfcinco= | pd.read_csv(url) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Benchmark Results
# This notebook visualizes the output from the different models on different classification problems
# In[1]:
import collections
import glob
import json
import os
import numpy as np
import pandas as pd
from plotnine import *
from saged.utils import split_sample_names, create_dataset_stat_df, get_dataset_stats, parse_map_file
# ## Set Up Functions and Get Metadata
# In[3]:
def return_unlabeled():
# For use in a defaultdict
return 'unlabeled'
# In[4]:
data_dir = '../../data/'
map_file = os.path.join(data_dir, 'sample_classifications.pkl')
sample_to_label = parse_map_file(map_file)
sample_to_label = collections.defaultdict(return_unlabeled, sample_to_label)
# In[ ]:
metadata_path = os.path.join(data_dir, 'aggregated_metadata.json')
metadata = None
with open(metadata_path) as json_file:
metadata = json.load(json_file)
sample_metadata = metadata['samples']
# In[ ]:
experiments = metadata['experiments']
sample_to_study = {}
for study in experiments:
for accession in experiments[study]['sample_accession_codes']:
sample_to_study[accession] = study
# ## Sepsis classification
# In[8]:
in_files = glob.glob('../../results/single_label.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# In[11]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=3)
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# ## All labels
# In[12]:
in_files = glob.glob('../../results/all_labels.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[13]:
metrics = None
for path in in_files:
if metrics is None:
metrics = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
metrics['unsupervised'] = unsupervised_model
metrics['supervised'] = supervised_model
else:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
metrics = pd.concat([metrics, new_df])
metrics
# In[14]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# In[15]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=2)
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# # Subsets of healthy labels
# In[16]:
in_files = glob.glob('../../results/subset_label.sepsis*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[17]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[18]:
print(sepsis_metrics[sepsis_metrics['healthy_used'] == 1])
# In[19]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[20]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[21]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Same analysis, but with tb instead of sepsis
# In[22]:
in_files = glob.glob('../../results/subset_label.tb*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[23]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[24]:
print(tuberculosis_metrics[tuberculosis_metrics['healthy_used'] == 1])
# In[25]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[26]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[27]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Supervised Results Only
# The results above show that unsupervised learning mostly hurts performance rather than helping.
# The visualizations below compare each model based only on its supervised results.
# In[28]:
supervised_sepsis = sepsis_metrics[sepsis_metrics['unsupervised'] == 'untransformed']
# In[29]:
plot = ggplot(supervised_sepsis, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[30]:
supervised_tb = tuberculosis_metrics[tuberculosis_metrics['unsupervised'] == 'untransformed']
# In[31]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[32]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Batch Effect Correction
# In[33]:
in_files = glob.glob('../../results/subset_label.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[34]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
print(model_info)
model_info = model_info.split('.')
print(model_info)
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[35]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[36]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## TB Batch effect corrected
# In[37]:
in_files = glob.glob('../../results/subset_label.tb*be_corrected.tsv')
print(in_files[:5])
# In[38]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[39]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[40]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Better Metrics, Same Label Distribution in Train and Val sets
# In[11]:
in_files = glob.glob('../../results/keep_ratios.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[12]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[13]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[14]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[15]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[16]:
sepsis_stat_df = create_dataset_stat_df(sepsis_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'sepsis')
sepsis_stat_df.tail(5)
# In[17]:
ggplot(sepsis_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[18]:
plot = ggplot(sepsis_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Effect of All Sepsis Data')
plot
# ## Same Distribution Tuberculosis
# In[19]:
in_files = glob.glob('../../results/keep_ratios.tb*be_corrected.tsv')
print(in_files[:5])
# In[20]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics = tb_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
tb_metrics['healthy_used'] = tb_metrics['healthy_used'].round(1)
tb_metrics
# In[21]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[22]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[23]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[24]:
tb_stat_df = create_dataset_stat_df(tb_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'tb')
tb_stat_df.tail(5)
# In[55]:
ggplot(tb_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[25]:
plot = ggplot(tb_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot
# ## Results from Small Datasets
# In[57]:
in_files = glob.glob('../../results/small_subsets.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[58]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[59]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Dataset Size Effects (equal label counts)')
print(plot)
# In[60]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Datset Size by Model (equal label counts)')
print(plot)
# In[61]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## Small Training Set TB
# In[62]:
in_files = glob.glob('../../results/small_subsets.tb*be_corrected.tsv')
print(in_files[:5])
# In[63]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[64]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('TB Dataset Size Effects (equal label counts)')
print(plot)
# In[65]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('TB Dataset Size vs Models (equal label counts)')
print(plot)
# In[66]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth(method='loess')
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('TB (lack of a) Crossover Point')
plot
# ## Small training sets without be correction
# In[67]:
in_files = glob.glob('../../results/small_subsets.sepsis*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[68]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[69]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Dataset Size Effects (equal label counts)')
print(plot)
# In[70]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Datset Size by Model (equal label counts)')
print(plot)
# In[71]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## TB Not Batch Effect Corrected
# In[72]:
in_files = glob.glob('../../results/small_subsets.tb*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[73]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[74]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('tb Dataset Size Effects (equal label counts)')
print(plot)
# In[75]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('tb Datset Size by Model (equal label counts)')
print(plot)
# In[76]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('tb Crossover Point')
plot
# ## Large training sets without be correction
# In[6]:
in_files = glob.glob('../../results/keep_ratios.sepsis*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## TB Not Batch Effect Corrected
# In[80]:
in_files = glob.glob('../../results/keep_ratios.tb*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[81]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = | pd.read_csv(path, sep='\t') | pandas.read_csv |
# dale
import openpyxl
from default.sets import InitialSetting
from default.webdriver_utilities.wbs import WDShorcuts
from default.interact import press_keys_b4, press_key_b4
from selenium.webdriver import Chrome
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import *
from default.webdriver_utilities.pre_drivers import pgdas_driver, ginfess_driver
from time import sleep
from default.webdriver_utilities.pre_drivers import ginfess_driver
from openpyxl import Workbook
from openpyxl.utils.cell import coordinate_from_string
from openpyxl.utils import get_column_letter as gcl
import pandas as pd
import os
class DownloadGinfessGui(InitialSetting, WDShorcuts):
# only static methods from JsonDateWithDataImprove
def __init__(self, *dados, compt, show_driver=False):
# driver
__r_social, __cnpj, _ginfess_cod, link = dados
self.compt = compt
# mesma coisa de self.any_to_str, só que ele aceita args desempacotados
self.client_path = self.files_pathit(__r_social.strip(), self.compt)
# Checa se já existe certificado
if _ginfess_cod.lower() == 'não há':
# removi o ja_imported
print(
f'\033[1;31m o cliente {__r_social} não possui notas\n...(muito bom) O certificado anula o _ja_imported...\033[m')
elif self.check_done(self.client_path, '.png', startswith=__r_social):
# Checka o certificado ginfess, somente
if show_driver:
driver = pgdas_driver
self.__driver__name = driver.__name__
self.driver = driver = pgdas_driver(self.client_path)
else:
driver = ginfess_driver
self.__driver__name = driver.__name__
driver = self.driver = ginfess_driver(self.client_path)
driver.maximize_window()
self.driver.get(link)
if self.driver.title != 'NFS-e' and 'tremembe' not in self.driver.current_url:
self.driver.quit()
driver = pgdas_driver
self.__driver__name = driver.__name__
self.driver = driver = pgdas_driver(self.client_path)
self.driver.get(link)
# escolhe driver
print('tremembe' not in self.driver.current_url)
# for
driver = self.driver
super().__init__(self.driver)
# if city in 'ABC':
if self.driver.title == 'NFS-e':
# links das cidades
# driver.maximize_window()
# #######################################################
self.ABC_ginfess(__cnpj, _ginfess_cod)
# #######################################################
try:
# Find existent tags
driver.implicitly_wait(5)
self.tags_wait('table', 'tbody', 'tr', 'td')
print('printscreen aqui')
self.download()
driver.implicitly_wait(5)
# Creation initial
excel_file = os.path.join(
self.client_path, f'{__cnpj}.xlsx')
# Aqui
self.excel_from_html_above(
excel_file, html=self.ginfess_table_valores_html_code())
except IndexError:
print('~' * 30)
print('não emitiu nenhuma nota'.upper())
print('~' * 30)
driver.save_screenshot(self.certif_feito(
self.client_path, add=__r_social))
# coloquei tudo no dele
elif self.driver.current_url == 'https://tremembe.sigiss.com.br/tremembe/contribuinte/login.php':
driver.implicitly_wait(5)
zero_um = _ginfess_cod.split('//')
# ginfess login//senha
self.tags_wait('html')
self.tags_wait('body')
while True:
driver.implicitly_wait(5)
ccm = driver.find_element(By.ID, 'ccm')
senha = driver.find_element(By.ID, 'senha')
confirma = driver.find_element(By.ID, 'confirma')
ccm.send_keys(zero_um[0])
for el in ccm, senha, confirma:
el.clear()
ccm.send_keys(zero_um[0])
senha.send_keys(zero_um[1])
trem_cod = self.captcha_hacking()
confirma.send_keys(trem_cod)
# driver.find_element(By.ID, 'btnOk').click()
if 'login.php' in driver.current_url:
driver.refresh()
driver.implicitly_wait(6)
else:
break
print('break')
driver.implicitly_wait(10)
driver.execute_script("""function abre_arquivo(onde){
var iframe = document.getElementById("main");
iframe.src = onde;
}
""")
driver.execute_script(
"abre_arquivo('dmm/_menuPeriodo.php');")
driver.implicitly_wait(5)
# self.tag_with_text('td', 'Movimento ').click()
sleep(5)
iframe = driver.find_element(By.ID, 'main')
driver.switch_to.frame(iframe)
driver.find_element(By.NAME, 'btnAlterar').click()
driver.implicitly_wait(5)
# handelling select
compt = self.compt
mes, ano = compt.split('-')
driver.find_element(By.NAME, 'ano').clear()
driver.find_element(By.NAME, 'ano').send_keys(ano)
mes = self.nome_mes(int(mes))
driver.find_element(By.XPATH,
f"//select[@name='mes']/option[text()='{mes}']").click()
# driver.find_element(By.NAME, 'ano').send_keys(ano)
driver.implicitly_wait(5)
driver.find_element(By.ID, 'btnOk').click()
driver.implicitly_wait(10)
# iframe = driver.find_element(By.ID, 'iframe')
# driver.switch_to.frame(iframe)
self.tag_with_text('td', 'Encerramento').click()
# driver.switch_to.alert().accept()
# driver.get('../fechamento/prestado.php')
driver.find_element(By.XPATH,
'//a[contains(@href,"../fechamento/prestado.php")]').click()
driver.implicitly_wait(10)
try:
driver.find_element(By.ID, 'btnSalvar').click()
driver.implicitly_wait(5)
driver.switch_to.alert().accept()
driver.implicitly_wait(5)
# driver.back()
except (NoSuchElementException, NoAlertPresentException):
print('Já encerrado')
finally:
driver.implicitly_wait(5)
driver.back()
driver.back()
driver.execute_script("""function abre_arquivo(onde){
var iframe = document.getElementById("main");
iframe.src = onde;
}
""")
driver.execute_script(
"abre_arquivo('dmm/_menuPeriodo.php');")
iframe = driver.find_element(By.ID, 'main')
driver.switch_to.frame(iframe)
driver.find_element(By.NAME, 'btnAlterar').click()
driver.find_element(By.NAME, 'btnOk').click()
# ############### validar driver.back()
url = '/'.join(driver.current_url.split('/')[:-1])
driver.get(f'{url}/nfe/nfe_historico_exportacao.php')
driver.implicitly_wait(3)
self.tags_wait('html')
self.tags_wait('body')
driver.implicitly_wait(2)
driver.find_element(By.ID, 'todos').click()
driver.find_element(By.ID, 'btnExportar').click()
driver.switch_to.alert.accept()
path_zip = self.client_path
print(f'path_zip-> {path_zip}')
self.unzip_folder(path_zip)
driver.switch_to.default_content()
driver.save_screenshot(self.certif_feito(
self.client_path, add=__r_social))
elif self.driver.current_url == 'https://app.siappa.com.br/issqn_itupeva/servlet/com.issqnwebev3v2.login':
self.driver.find_element(By.ID, 'vUSR_COD').send_keys(__cnpj)
self.driver.find_element(By.CSS_SELECTOR,
'[type="password"]').send_keys(_ginfess_cod)
# d = Chrome().
press_keys_b4('f9')
driver.save_screenshot(self.certif_feito(
self.client_path, add=__r_social))
elif self.driver.current_url == 'https://bragancapaulista.giap.com.br/apex/pmbp/f?p=994:101':
a = __login, __senha = _ginfess_cod.split('//')
self.driver.find_element(By.ID,
'P101_USERNAME').send_keys(__login)
self.driver.find_element(By.CSS_SELECTOR,
'[type="password"]').send_keys(str(__senha))
self.click_ac_elementors(self.tag_with_text('span', 'ENTRAR'))
# CONSULTAR
self.driver.implicitly_wait(30)
self.driver.execute_script(
"javascript:apex.submit('EMISSAO NOTA');")
mes, ano = self.compt.split('-')
mes = self.nome_mes(int(mes))
self.driver.find_element(By.XPATH,
f"//select[@name='P26_MES']/option[text()='{mes}']").click()
self.driver.find_element(By.XPATH,
f"//select[@name='P26_ANO']/option[text()='{ano}']").click()
# CONSULTAR
self.driver.execute_script(
"apex.submit({request:'P26_BTN_CONSULTAR'});")
print('Digite f9 para continuar')
press_key_b4('f9')
self.driver.save_screenshot(self.certif_feito(
self.client_path, add=__r_social))
else:
print(__r_social)
driver.execute_script("javascript:location.reload();")
self.send_keys_anywhere(__cnpj)
self.send_keys_anywhere(Keys.TAB)
self.send_keys_anywhere(_ginfess_cod)
self.send_keys_anywhere(Keys.TAB)
if self.__driver__name == "pgdas_driver":
from win10toast import ToastNotifier
ToastNotifier().show_toast("Pressione F9 para continuar", duration=10)
press_keys_b4('f9')
driver.save_screenshot(self.certif_feito(
self.client_path, add=__r_social))
[(print(f'Sleeping before close {i}'), sleep(1))
for i in range(5, -1, -1)]
driver.close()
def wait_main_tags(self):
self.tags_wait('body', 'div', 'table')
def ABC_ginfess(self, __cnpj, __senha):
driver = self.driver
def label_with_text(searched):
label = driver.find_element(By.XPATH,
f"//label[contains(text(),'{searched.rstrip()}')]")
return label
def button_with_text(searched):
bt = driver.find_element(By.XPATH,
f"//button[contains(text(),'{searched.rstrip()}')]")
return bt
def a_with_text(searched):
link_tag = driver.find_element(By.XPATH,
f"//a[contains(text(),'{searched.rstrip()}')]")
return link_tag
self.wait_main_tags()
""" ~~~~~~~~~~~~~~~~~~~~~~ GLÓRIA A DEUS ~~~~~~~~~~~~~~~~~~"""
name_c = 'gwt-DialogBox'
self.del_dialog_box(name_c)
self.wait_main_tags()
self.tags_wait('img')
driver.implicitly_wait(10)
try:
try:
button_with_text('OK').click()
except (NoSuchElementException, ElementClickInterceptedException):
pass
driver.find_element(By.XPATH, '//img[@src="imgs/001.gif"]').click()
except (NoSuchElementException, ElementClickInterceptedException):
pass
name_c = 'x-window-dlg', 'ext-el-mask', 'x-shadow'
try:
for name in name_c:
try:
self.del_dialog_box(name)
except NoSuchElementException:
print('Except dentro do except e no for, [linha 310]')
...
driver.implicitly_wait(5)
button_with_text('OK').click()
except (NoSuchElementException, ElementClickInterceptedException):
print('Sem janela possivel, linha 246')
# driver.execute_script('window.alert("Não foi possível prosseguir")')
driver.implicitly_wait(5)
""" ~~~~~~~~~~~~~~~~~~~~~~ GLÓRIA A DEUS ~~~~~~~~~~~~~~~~~~"""
# print('mandando teclas...')
label_with_text("CNPJ:").click()
self.send_keys_anywhere(__cnpj)
passwd = driver.find_element(By.XPATH, "//input[@type='password']")
self.tags_wait('body', 'img')
passwd.clear()
passwd.send_keys(__senha)
button_with_text("Entrar").click()
# tratando atualiza dados
driver.implicitly_wait(15)
try:
self.wait_main_tags()
button_with_text('X').click()
button_with_text('X').click()
print('CLICADO, X. Linha 263')
except (NoSuchElementException, ElementClickInterceptedException):
print('Tentando atualizar os dados')
a_with_text("Consultar").click()
print('Waiting main_excel_manager tags')
self.wait_main_tags()
period = label_with_text('Período')
period.click()
driver.implicitly_wait(5)
de = label_with_text('De:')
de.click()
self.send_keys_anywhere(Keys.BACKSPACE, 10)
first, last = self.first_and_last_day_compt(
self.compt, zdate_wontbe_greater=False)
self.send_keys_anywhere(first)
driver.implicitly_wait(2.5)
self.send_keys_anywhere(Keys.TAB)
self.send_keys_anywhere(last)
driver.implicitly_wait(5)
button_with_text("Consultar").click()
self.wait_main_tags()
driver.implicitly_wait(10)
def download(self):
"""
:city: A, B, C only
:return:
"""
driver = self.driver
try:
try:
# self.del_dialog_box('x-shadow')
xsh = driver.find_element(By.CLASS_NAME, 'x-shadow')
if 'block' in xsh.get_attribute('style'):
self.del_dialog_box('x-shadow')
driver.implicitly_wait(10)
print('W-SHADOW-DELETADO')
else:
raise NoSuchElementException
except NoSuchElementException:
print('Tem notas')
driver.implicitly_wait(10)
downloada_xml = driver.find_element(By.XPATH,
'//img[@src="imgs/download.png"]')
try:
downloada_xml.click()
except ElementClickInterceptedException:
self.click_ac_elementors(downloada_xml)
except ElementNotInteractableException:
pass
# self.click_ac_elementors(downloada_xml)
except NoSuchElementException:
print('NÃO CONSEGUI FAZER DOWNLOAD...')
def ginfess_table_valores_html_code(self):
"""
:return: (html_cod): código dele se existe a class ytb-text, scrap_it
"""
driver = self.driver
max_value_needed = driver.find_elements(By.CLASS_NAME, 'ytb-text')
max_value_needed = max_value_needed[1].text[-1]
print(max_value_needed)
self.tags_wait('input', 'body')
cont_export = 1
xml_pages = driver.find_element(By.XPATH,
'//input[@class="x-tbar-page-number"]')
driver.implicitly_wait(5)
number_in_pages = xml_pages.get_attribute('value')
html_cod = """
<style>/*.detalheNota:after{background: red; content: 'cancelada';}*/
.notaCancelada{
background: red
}
</style>
""".strip()
for i in range(10):
print(number_in_pages)
xml_pages.send_keys(Keys.BACKSPACE)
xml_pages.send_keys(cont_export)
xml_pages.send_keys(Keys.ENTER)
driver.implicitly_wait(5)
print('CALMA...')
cont_export += 1
number_in_pages = xml_pages.get_attribute('value')
# // div[ @ id = 'a'] // a[ @class ='click']
wanted_wanted = driver.find_elements(By.XPATH,
"//div[contains(@class, 'x-grid3-row')]")
print(wanted_wanted[0].text)
# table = wanted_wanted
for w in wanted_wanted:
# w.click()
print(w.text)
html_cod += w.get_attribute('innerHTML')
# sleep(2)
# XML_to_excel.ginfess_scrap()
if int(cont_export) == int(max_value_needed) + 1:
break
print('breakou')
print('~~~~~~~~')
return html_cod
# de.send_keys(Keys.TAB)
def excel_from_html_above(self, excel_file, html):
from bs4 import BeautifulSoup
from openpyxl.styles import PatternFill
mylist = pd.read_html(html)
soup = BeautifulSoup(html, 'html.parser')
with_class = tables = [str(table) for table in soup.select('table')]
df = | pd.concat([l for l in mylist]) | pandas.concat |
# install pattern
# install gensim
# install nltk
# install pyspellchecker
import re
import pandas as pd
import numpy as np
import gensim
from collections import Counter
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
from spellchecker import SpellChecker
class Cleaning:
def __init__(self):
self.WORDS = {}
return
# remove urls (starts with https, http)
def remove_URL(self, col):
text = col.tolist()
TEXT=[]
for word in text:
if pd.isnull(word):
TEXT.append(word)
else:
TEXT.append(re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', str(word)))
se = pd.Series(TEXT)
return(se)
def count_mark(self, col):
df = pd.DataFrame()
rdf = pd.DataFrame()
# remove the special characters (numbers, exclamations and question marks) from the text
# store them in a dataframe for later use
text = col.tolist()
for row in text:
if pd.isnull(row):
ser = pd.Series([np.nan,np.nan,np.nan,np.nan], index=['Number', 'Exclamation_count', 'Question_Mark_count', 'Comments_OE'])
df = df.append(ser, ignore_index=True)
else:
numVals = []
excCount = []
quesCount = []
num = re.findall(r'\b\d+\b', row)
numVals.append(num)
# remove the number from the text
for n in num:
row = row.replace(n, '')
excCount.append(row.count('!'))
row = row.replace('!', '')
quesCount.append(row.count('?'))
row = row.replace('?', '')
numSeries = pd.Series(numVals)
rdf['Number'] = numSeries.values
excSeries = pd.Series(excCount)
rdf['Exclamation_count'] = excSeries
quesSeries = pd.Series(quesCount)
rdf['Question_Mark_count'] = quesSeries
txtSeries = pd.Series(row)
rdf['Comments_OE'] = txtSeries
df = df.append(rdf, ignore_index=True)
rdf = pd.DataFrame()
df.reset_index(inplace=True)
return(df)
def remove_special(self, col):
tokenizer = RegexpTokenizer(r'\w+')
text = col.str.lower().tolist()
TEXT=[]
for word in text:
if pd.isnull(word):
TEXT.append(word)
else:
TEXT.append(' '.join(tokenizer.tokenize(word)))
se = pd.Series(TEXT)
return(se)
def remove_stop (self, col):
stop_words = stopwords.words('english')
# Customize stopwords list, add UBC and ubc
fileName = "config/pair_stopwords.txt"
lineList = [line.rstrip('\n') for line in open(fileName)]
stop_words.extend(lineList)
# print (stop_words)
TEXT = []
filtered_text = []
for resp in col:
if pd.isnull(resp):
TEXT.append(resp)
else:
resp = resp.replace(' co op ', ' coop ') # problem specific
resp = resp.replace(' co operative ', ' cooperative ') # problem specific
wordlst = resp.split()
for word in wordlst:
if word not in stop_words:
filtered_text.append(word)
TEXT.append(' '.join(filtered_text))
filtered_text = []
se = pd.Series(TEXT)
return(se)
# function to convert nltk tag to wordnet tag
def __nltk_tag_to_wordnet_tag(self, nltk_tag):
if nltk_tag.startswith('J'):
return wordnet.ADJ
elif nltk_tag.startswith('V'):
return wordnet.VERB
elif nltk_tag.startswith('N'):
return wordnet.NOUN
elif nltk_tag.startswith('R'):
return wordnet.ADV
else:
return None
def lemmatize_response(self, col):
lemmatizer = WordNetLemmatizer()
TEXT = []
lemma_text = []
for resp in col:
if | pd.isnull(resp) | pandas.isnull |
from pyworkflow.node import IONode, NodeException
from pyworkflow.parameters import *
import pandas as pd
class WriteCsvNode(IONode):
"""WriteCsvNode
Writes the current DataFrame to a CSV file.
Raises:
NodeException: any error writing CSV file, converting
from DataFrame.
"""
name = "Write CSV"
num_in = 1
num_out = 0
download_result = True
OPTIONS = {
"file": StringParameter(
"Filename",
docstring="CSV file to write"
),
"sep": StringParameter(
"Delimiter",
default=",",
docstring="Column delimiter"
),
"index": BooleanParameter(
"Write Index",
default=True,
docstring="Write index as column?"
),
}
def execute(self, predecessor_data, flow_vars):
try:
# Convert JSON data to DataFrame
df = | pd.DataFrame.from_dict(predecessor_data[0]) | pandas.DataFrame.from_dict |
import pandas as pd
import string
import numpy as np
import pkg_resources
import seaborn as sns
from PIL import Image
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from pdfminer.high_level import extract_text
from tqdm import tqdm
import os
class wording:
def __init__(self):
self.resource_package = __name__
self.file = '/'.join(('config', 'lematizer.csv'))
self.file_path = pkg_resources.resource_filename(self.resource_package, self.file)
self.df_lema = self.load_lema()
self.file = '/'.join(('config', 'stopwords.csv'))
self.file_path = pkg_resources.resource_filename(self.resource_package, self.file)
self.df_stopwords = self.load_stopwords()
self.file = '/'.join(('config', 'positive.csv'))
self.file_path = pkg_resources.resource_filename(self.resource_package, self.file)
self.positive_words = self.load_positive_words()
self.file = '/'.join(('config', 'negative.csv'))
self.file_path = pkg_resources.resource_filename(self.resource_package, self.file)
self.negative_words = self.load_negative_words()
self.file_cw = '/'.join(('config', 'class_words.csv'))
self.file_path_cw = pkg_resources.resource_filename(self.resource_package, self.file_cw)
self.df_wc = self.load_class_words()
self.file_nomes = '/'.join(('config', 'nomes.csv'))
self.file_path_nomes = pkg_resources.resource_filename(self.resource_package, self.file_nomes)
self.nomes_pessoas = self.load_nomes()
self.file_cidades = '/'.join(('config', 'cidades.csv'))
self.file_path_cidades = pkg_resources.resource_filename(self.resource_package, self.file_cidades)
self.cidades = self.load_cidades()
self.file_estados = '/'.join(('config', 'estados.csv'))
self.file_path_estados = pkg_resources.resource_filename(self.resource_package, self.file_estados)
self.estados = self.load_estados()
self.tfidf = pd.DataFrame()
self.colection = pd.DataFrame()
def load_file(self, file='none', type='txt', header=False, sep=',', column='None'):
if file == 'none':
raise ValueError('No Filename was provided, need one')
if type == 'excel':
df = pd.read_excel(file)
if column != 'None':
df = df[column]
df.rename(column={column: 'word'}, inplace=True)
else:
raise TypeError("An xlsx file column was not selected")
if type == 'csv':
if header:
header=0
else:
header=None
df = pd.read_csv(file, header=header, sep=sep)
if column != 'None':
df = pd.DataFrame({'word': df[column]})
else:
raise TypeError("An csv file column was not selected")
if type == 'txt':
f = open(file, "r", encoding='utf8', errors='ignore')
df = f.read()
df = pd.DataFrame(df.split('\n'))
df.columns = ['word']
if type == 'pdf' :
df = self.load_pdf(file)
df = pd.DataFrame([df])
df.columns = ['word']
self.colection = df.copy()
def load_lema(self):
df_lema = pd.read_csv(self.file_path, sep=',')
df_lema.columns = ['word','lema']
return(df_lema)
def load_positive_words(self):
df_pw = pd.read_csv(self.file_path)
df_pw.columns = ['word']
return(df_pw)
def load_negative_words(self):
df_pw = pd.read_csv(self.file_path)
df_pw.columns = ['word']
return(df_pw)
def load_stopwords(self):
df_sw = pd.read_csv(self.file_path, sep=';', header=None)
df_sw.columns = ['stopword']
return(df_sw)
def load_nomes(self):
df_nome = pd.read_csv(self.file_path_nomes, sep=';')
return(df_nome)
def load_cidades(self):
df_cidades = pd.read_csv(self.file_path_cidades, sep=';')
return(df_cidades)
def load_estados(self):
df_estados = pd.read_csv(self.file_path_estados, sep=';')
return(df_estados)
def del_stopwords(self, text, stopwords=True):
output = list()
text = self.del_punck(text)
text = text.lower()
for word in text.split(' '):
if stopwords:
if len(word) > 3:
result = ''.join([str(x) for x in self.df_stopwords[self.df_stopwords['stopword'] == word]['stopword']])
if len(result) == 0:
output.append(word)
else:
output.append(word)
return(output)
def del_punck(self, text):
punck = ",.;/<>:?[]{}+_)(*&$#@!)1234567890\n\t\r"
for c in punck:
text = text.replace(c,'')
text = text.replace('"', '')
return(text)
def get_lema(self, text, lemmatizer=True):
output = list()
for word in text:
if lemmatizer:
w_lema = ''.join([self.df_lema[self.df_lema['lema'] == word]['word'].unique()][0])
if len(w_lema) == 0:
output.append(word)
else:
output.append(w_lema)
else:
output.append(word)
return(output)
def build_tf(self, df, stopwords=True, lemmatizer=True, silence=False):
frame_tfidf = pd.DataFrame()
if silence:
for i in range(df.shape[0]):
frame_aux = pd.DataFrame()
line = ''.join(df.loc[i])
text = self.del_stopwords(line, stopwords=stopwords)
text = self.get_lema(text, lemmatizer=lemmatizer)
frame_aux['word'] = text
frame_aux['doc'] = 'doc-' + str(i)
frame_tfidf = frame_tfidf.append(frame_aux)
else:
for i in tqdm(range(df.shape[0])):
frame_aux = pd.DataFrame()
line = ''.join(df.loc[i])
text = self.del_stopwords(line, stopwords=stopwords)
text = self.get_lema(text, lemmatizer=lemmatizer)
frame_aux['word'] = text
frame_aux['doc'] = 'doc-' + str(i)
frame_tfidf = frame_tfidf.append(frame_aux)
frame_tfidf['count'] = 1
return(frame_tfidf[['doc','word','count']])
def build_tf_idf(self, stopwords=True, lemmatizer=True, silence=False):
df = self.colection.copy()
f = self.build_tf(df, stopwords=stopwords, lemmatizer=lemmatizer, silence=silence)
n = df.shape[0]
f = f.groupby(by=['doc','word']).count().reset_index()
f.rename(columns={'count':'f'},inplace=True)
f['tf'] = 1+ np.log2(f['f'])
f['idf'] = 0
idf = f.groupby(by=['word']).count().reset_index()[['word','tf']]
idf.rename(columns={'tf':'idf'}, inplace=True)
idf['log'] = np.log2(1+ (n/idf['idf']))
if silence:
for i in range(f.shape[0]):
w = ''.join(f.loc[i:i,'word'])
f.loc[i:i,'idf'] = float(idf[idf['word'] == w]['log'])
else:
for i in tqdm(range(f.shape[0])):
w = ''.join(f.loc[i:i,'word'])
f.loc[i:i,'idf'] = float(idf[idf['word'] == w]['log'])
f['tf_idf'] = f['tf'] * f['idf']
self.tfidf = f.copy()
self.set_sign()
def set_sign(self):
self.tfidf['sign'] = ''
for i in range(self.tfidf.shape[0]):
word = self.tfidf.loc[i,'word']
p = self.positive_words[self.positive_words['word'] == word]
n = self.negative_words[self.negative_words['word'] == word]
if len(p) == 0 and len(n) > 0:
self.tfidf.loc[i,'sign'] = 'negative'
elif len(p) == 0 and len(n) == 0:
self.tfidf.loc[i,'sign'] = 'neutral'
elif len(p) > 0 and len(n) == 0:
self.tfidf.loc[i,'sign'] = 'positive'
elif len(p) > 0 and len(n) > 0:
self.tfidf.loc[i,'sign'] = 'ambiguous'
def sentimental_graf(self, rotate=False):
bar = pd.DataFrame(self.tfidf['sign'].value_counts()).reset_index()
bar.columns = ['Sentimental','frequency']
if rotate:
img = sns.barplot(y='Sentimental', x='frequency', data=bar)
else:
img = sns.barplot(x='Sentimental', y='frequency', data=bar)
return(img)
def sentimental_table(self):
bar = pd.DataFrame(self.tfidf['sign'].value_counts()).reset_index()
bar.columns = ['Sentimental','frequency']
return(bar)
def word_cloud(self, picture='none'):
resource_package = __name__
file = '/'.join(('config', 'cloud.jpeg'))
file_path = pkg_resources.resource_filename(resource_package, file)
if picture == 'none':
mask = np.array(Image.open(file_path))
else:
mask = np.array(Image.open(picture))
tuples = [tuple(x) for x in self.tfidf[['word','tf_idf']].values]
wc = WordCloud(background_color="white", max_words=1000, mask=mask).generate_from_frequencies(frequencies=dict(tuples))
plt.figure(figsize=(15,15))
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
return(plt)
def look2word(self, wsearch):
output = pd.DataFrame({'index': [],'word': []})
for i in range(self.colection.shape[0]):
line = self.del_punck(self.colection.loc[i,'word'])
for word in line.split(' '):
if word == wsearch:
output = output.append(pd.DataFrame({'index':[int(i)],'word':[line]}))
break
output['index'] = output['index'].apply(lambda x: int(x))
output = output.set_index('index')
return(output)
def load_class_words(self):
df_lema = pd.read_csv(self.file_path_cw, sep=';')
return(df_lema)
def set_class(self, word='none', wclass='none', force=False):
word = word.lower()
wclass = wclass.lower()
exist = ''.join(self.df_wc[self.df_wc['word'] == word]['class'])
save = False
if exist == '-':
self.df_wc.loc[self.df_wc['word'] == word,'class'] = wclass
save = True
elif force:
self.df_wc.loc[self.df_wc['word'] == word,'class'] = wclass
save = True
else:
print('Word ' + word + ' has a class named ' + wclass + ' you must use force=True to change the class')
if save:
self.df_wc.to_csv(self.file_path_cw, sep=';', index=False)
def get_class(self, word='none'):
word = word.lower()
return(''.join(self.df_wc[self.df_wc['word'] == word]['class']))
def load_pdf(self, file, silence=False):
if not silence:
print('Reading PDF file ' + file)
text = extract_text(file)
text_line = text.split('\n')
doc = ''
if silence:
for line in text_line:
if len(line) > 0:
doc = doc + line + ' '
else:
for line in tqdm(text_line):
if len(line) > 0:
doc = doc + line + ' '
return(doc)
def find_cities(self, city):
result = int(self.colection['word'].str.find(city))
return('Substring ' + city + ' found at index: ' + str(result))
def load_colection(self, dir, type='pdf', silence=False):
files = [x for x in os.listdir(dir) if x.endswith("." + type)]
if len(files) == 0:
raise TypeError("File type " + type + " not found")
if silence:
for file in files:
if type == 'pdf':
df = self.load_pdf(os.path.join(dir, file),silence=silence)
elif type == 'txt':
f = open(file, "r")
df = f.read()
df = pd.DataFrame(df.split('\n'))
df.columns = ['word']
f.close()
else:
raise TypeError("File type " + type + " not permited")
df = pd.DataFrame([df])
df.columns = ['word']
self.colection = self.colection.append(df)
else:
for file in tqdm(files):
if type == 'pdf':
df = self.load_pdf(os.path.join(dir, file),silence=silence)
elif type == 'txt':
f = open(file, "r")
df = f.read()
df = pd.DataFrame(df.split('\n'))
df.columns = ['word']
f.close()
else:
raise TypeError("File type " + type + " not permited")
df = | pd.DataFrame([df]) | pandas.DataFrame |
################################################################################
##### For Bloomberg ------------------------------------------------------------
##### Can't use this if you're on a Mac :(
################################################################################
from __future__ import print_function
from __future__ import absolute_import
from optparse import OptionParser
import os
import platform as plat
import sys
if sys.version_info >= (3, 8) and plat.system().lower() == "windows":
# pylint: disable=no-member
with os.add_dll_directory(os.getenv('BLPAPI_LIBDIR')):
import blpapi
else:
import blpapi
from utils import date_to_str
import pandas as pd
def parseCmdLine():
parser = OptionParser(description="Retrieve reference data.")
parser.add_option("-a",
"--ip",
dest="host",
help="server name or IP (default: %default)",
metavar="ipAddress",
default="localhost")
parser.add_option("-p",
dest="port",
type="int",
help="server port (default: %default)",
metavar="tcpPort",
default=8194)
(options, args) = parser.parse_args()
return options
def req_historical_data(bbg_identifier, startDate, endDate):
# Recast start & end dates in Bloomberg's format
startDate = date_to_str(startDate, "%Y%m%d")
endDate = date_to_str(endDate, "%Y%m%d")
if(pd.to_datetime(startDate) >= | pd.to_datetime(endDate) | pandas.to_datetime |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from datetime import datetime
import pycountry
import pandas as pd
import dateutil.parser
import numpy as np
def convert_string(s):
datetimeObj = dateutil.parser.parse(s)
return datetimeObj
def convert_time_date64(dt):
dt64 = np.datetime64(dt)
ts = (dt64 - np.datetime64('1970-01-01T00:00:00Z'))
ts = ts/np.timedelta64(1,'s')
return datetime.utcfromtimestamp(ts)
app = dash.Dash()
# excel imports to pandas dataframes
df_age_gender = pd.read_excel('excels/lite/lite-Ages-Gender.xlsx')
df_regions = pd.read_excel('excels/lite/RegionDF.xlsx')
df_page_info = pd.read_excel('excels/lite/lite-Page-Info.xlsx')
df_page_post = pd.read_excel('excels/lite/lite-Page-Post.xlsx')
df_cities = pd.read_excel('excels/lite/lite-City.xlsx')
df_countries = pd.read_excel('excels/lite/lite-Country.xlsx')
df_posts = | pd.read_excel('excels/lite/lite-Post-Info.xlsx') | pandas.read_excel |
import json
import os
import numpy as np
import pandas as pd
from collections import OrderedDict
from copy import deepcopy
from typing import Tuple, List
from pydantic import BaseModel
from icolos.core.containers.compound import Conformer
from icolos.core.containers.generic import GenericData
from icolos.utils.enums.program_parameters import ModelBuilderEnum
from icolos.utils.enums.step_enums import StepModelBuilderEnum
from icolos.core.workflow_steps.io.base import StepIOBase
from icolos.core.workflow_steps.step import _LE, StepSettingsParameters
from icolos.utils.enums.write_out_enums import WriteOutEnum
from icolos.utils.execute_external.execute import Executor
_SMBE = StepModelBuilderEnum()
_SME = ModelBuilderEnum()
_WE = WriteOutEnum()
class StepModelBuilder(StepIOBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
# initialize the executor
self._initialize_backend(executor=Executor)
def _generate_temporary_input_output_files(
self, tmp_dir: str
) -> Tuple[str, str, str, str, str]:
tmp_input_config_json = os.path.join(tmp_dir, _SMBE.TMP_INPUT_CONFIG)
tmp_input_data_csv = os.path.join(tmp_dir, _SMBE.TMP_INPUT_DATA)
tmp_output_best_model_pkl = os.path.join(tmp_dir, _SMBE.TMP_OUTPUT_BEST_MODEL)
tmp_output_best_parameters_json = os.path.join(
tmp_dir, _SMBE.TMP_OUTPUT_BEST_PARAMETERS
)
tmp_output_production_pkl = os.path.join(
tmp_dir, _SMBE.TMP_OUTPUT_PRODUCTION_MODEL
)
return (
tmp_input_config_json,
tmp_input_data_csv,
tmp_output_best_model_pkl,
tmp_output_best_parameters_json,
tmp_output_production_pkl,
)
def _update_data_block(
self, conf: dict, tmp_input_data_csv: str, settings: StepSettingsParameters
) -> dict:
# the user can specify additional things for the "data" block of the configuration
# in the "additional" field; the input CSV file needs to be overwritten in every case, though
specified_data_block = settings.additional.get(_SMBE.DATA, {})
for key in specified_data_block.keys():
conf[_SMBE.DATA][key] = specified_data_block[key]
conf[_SMBE.DATA][_SMBE.DATA_TRAININGSET_FILE] = tmp_input_data_csv
if _SMBE.DATA_TESTSET_FILE in conf[_SMBE.DATA].keys():
conf[_SMBE.DATA].pop(_SMBE.DATA_TESTSET_FILE, None)
self._logger.log(
f"Removed test set specification, not supported yet.", _LE.WARNING
)
return conf
def _write_OptunaAZ_configuration(
self,
tmp_input_config_json: str,
tmp_input_data_csv: str,
settings: StepSettingsParameters,
):
config_path = settings.arguments.parameters[_SME.CONFIG]
with open(config_path, "r") as file:
optunaaz_conf = file.read().replace("\r", "").replace("\n", "")
optunaaz_conf = json.loads(optunaaz_conf)
optunaaz_conf = self._update_data_block(
optunaaz_conf, tmp_input_data_csv, settings
)
with open(tmp_input_config_json, "w") as file:
json.dump(optunaaz_conf, fp=file, indent=4)
self._logger.log(
f"Wrote updated OptunaAZ configuration file to {tmp_input_config_json}.",
_LE.DEBUG,
)
def _write_input_csv(
self,
conformers: List[Conformer],
tmp_input_data_csv: str,
settings: StepSettingsParameters,
):
def _get_tag(conformer: Conformer, tag: str) -> str:
try:
value = conformer.get_molecule().GetProp(tag).strip()
except KeyError:
value = np.nan
return value
smiles_column = settings.additional[_SMBE.DATA][_SMBE.DATA_INPUT_COLUMN]
response_column = settings.additional[_SMBE.DATA][_SMBE.DATA_RESPONSE_COLUMN]
# initialize the dictionary
dict_result = OrderedDict()
dict_result[_WE.RDKIT_NAME] = ["" for _ in range(len(conformers))]
dict_result[smiles_column] = ["" for _ in range(len(conformers))]
dict_result[response_column] = ["" for _ in range(len(conformers))]
# populate the dictionary with the values
for irow in range(len(conformers)):
conf = conformers[irow]
dict_result[_WE.RDKIT_NAME][irow] = conf.get_index_string()
dict_result[smiles_column][irow] = _get_tag(conf, smiles_column)
dict_result[response_column][irow] = _get_tag(conf, response_column)
# do the writeout (after sanitation)
df_result = | pd.DataFrame.from_dict(dict_result) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import pytest
from pandera import (
Column, DataFrameSchema, Index, SeriesSchema, Bool, Category, Check,
DateTime, Float, Int, Object, String, Timedelta, errors)
def test_dataframe_schema():
schema = DataFrameSchema(
{
"a": Column(Int,
Check(lambda x: x > 0, element_wise=True)),
"b": Column(Float,
Check(lambda x: 0 <= x <= 10, element_wise=True)),
"c": Column(String,
Check(lambda x: set(x) == {"x", "y", "z"})),
"d": Column(Bool,
Check(lambda x: x.mean() > 0.5)),
"e": Column(Category,
Check(lambda x: set(x) == {"c1", "c2", "c3"})),
"f": Column(Object,
Check(lambda x: x.isin([(1,), (2,), (3,)]))),
"g": Column(DateTime,
Check(lambda x: x >= pd.Timestamp("2015-01-01"),
element_wise=True)),
"i": Column(Timedelta,
Check(lambda x: x < pd.Timedelta(10, unit="D"),
element_wise=True))
})
df = pd.DataFrame(
{
"a": [1, 2, 3],
"b": [1.1, 2.5, 9.9],
"c": ["z", "y", "x"],
"d": [True, True, False],
"e": pd.Series(["c2", "c1", "c3"], dtype="category"),
"f": [(3,), (2,), (1,)],
"g": [pd.Timestamp("2015-02-01"),
pd.Timestamp("2015-02-02"),
pd.Timestamp("2015-02-03")],
"i": [pd.Timedelta(1, unit="D"),
pd.Timedelta(5, unit="D"),
pd.Timedelta(9, unit="D")]
})
assert isinstance(schema.validate(df), pd.DataFrame)
# error case
with pytest.raises(errors.SchemaError):
schema.validate(df.drop("a", axis=1))
with pytest.raises(errors.SchemaError):
schema.validate(df.assign(a=[-1, -2, -1]))
# checks if 'a' is converted to float, while schema says int, will a schema
# error be thrown
with pytest.raises(errors.SchemaError):
schema.validate(df.assign(a=[1.7, 2.3, 3.1]))
def test_dataframe_schema_strict():
# checks if strict=True whether a schema error is raised because 'a' is not
# present in the dataframe.
schema = DataFrameSchema({"a": Column(Int, nullable=True)},
strict=True)
df = pd.DataFrame({"b": [1, 2, 3]})
with pytest.raises(errors.SchemaError):
schema.validate(df)
def test_series_schema():
int_schema = SeriesSchema(
Int, Check(lambda x: 0 <= x <= 100, element_wise=True))
assert isinstance(int_schema.validate(
pd.Series([0, 30, 50, 100])), pd.Series)
str_schema = SeriesSchema(
String, Check(lambda s: s.isin(["foo", "bar", "baz"])),
nullable=True, coerce=True)
assert isinstance(str_schema.validate(
pd.Series(["foo", "bar", "baz", None])), pd.Series)
assert isinstance(str_schema.validate(
pd.Series(["foo", "bar", "baz", np.nan])), pd.Series)
# error cases
for data in [-1, 101, 50.1, "foo"]:
with pytest.raises(errors.SchemaError):
int_schema.validate(pd.Series([data]))
for data in [-1, {"a": 1}, -1.0]:
with pytest.raises(TypeError):
int_schema.validate(TypeError)
non_duplicate_schema = SeriesSchema(
Int, allow_duplicates=False)
with pytest.raises(errors.SchemaError):
non_duplicate_schema.validate(pd.Series([0, 1, 2, 3, 4, 1]))
# when series name doesn't match schema
named_schema = SeriesSchema(Int, name="my_series")
with pytest.raises(
errors.SchemaError,
match=r"^Expected .+ to have name"):
named_schema.validate(pd.Series(range(5), name="your_series"))
# when series floats are declared to be integer
with pytest.raises(
errors.SchemaError,
match=r"^after dropping null values, expected values in series"):
SeriesSchema(Int, nullable=True).validate(
pd.Series([1.1, 2.3, 5.5, np.nan]))
# when series contains null values when schema is not nullable
with pytest.raises(
errors.SchemaError,
match=r"^non-nullable series .+ contains null values"):
SeriesSchema(Float, nullable=False).validate(
pd.Series([1.1, 2.3, 5.5, np.nan]))
# when series contains null values when schema is not nullable in addition
# to having the wrong data type
with pytest.raises(
errors.SchemaError,
match=(
r"^expected series '.+' to have type .+, got .+ and "
"non-nullable series contains null values")):
SeriesSchema(Int, nullable=False).validate(
pd.Series([1.1, 2.3, 5.5, np.nan]))
def test_series_schema_multiple_validators():
schema = SeriesSchema(
Int, [
Check(lambda x: 0 <= x <= 50, element_wise=True),
Check(lambda s: (s == 21).any())])
validated_series = schema.validate(pd.Series([1, 5, 21, 50]))
assert isinstance(validated_series, pd.Series)
# raise error if any of the validators fails
with pytest.raises(errors.SchemaError):
schema.validate(pd.Series([1, 5, 20, 50]))
class SeriesGreaterCheck:
# pylint: disable=too-few-public-methods
"""Class creating callable objects to check if series elements exceed a
lower bound.
"""
def __init__(self, lower_bound):
self.lower_bound = lower_bound
def __call__(self, s: pd.Series):
"""Check if the elements of s are > lower_bound.
:returns Series with bool elements
"""
return s > self.lower_bound
def series_greater_than_zero(s: pd.Series):
"""Return a bool series indicating whether the elements of s are > 0"""
return s > 0
def series_greater_than_ten(s: pd.Series):
"""Return a bool series indicating whether the elements of s are > 10"""
return s > 10
@pytest.mark.parametrize("check_function, should_fail", [
(lambda s: s > 0, False),
(lambda s: s > 10, True),
(series_greater_than_zero, False),
(series_greater_than_ten, True),
(SeriesGreaterCheck(lower_bound=0), False),
(SeriesGreaterCheck(lower_bound=10), True)
])
def test_dataframe_schema_check_function_types(check_function, should_fail):
schema = DataFrameSchema(
{
"a": Column(Int,
Check(fn=check_function, element_wise=False)),
"b": Column(Float,
Check(fn=check_function, element_wise=False))
})
df = pd.DataFrame({
"a": [1, 2, 3],
"b": [1.1, 2.5, 9.9]
})
if should_fail:
with pytest.raises(errors.SchemaError):
schema.validate(df)
else:
schema.validate(df)
def test_nullable_int_in_dataframe():
df = pd.DataFrame({"column1": [5, 1, np.nan]})
null_schema = DataFrameSchema({
"column1": Column(Int, Check(lambda x: x > 0), nullable=True)
})
assert isinstance(null_schema.validate(df), pd.DataFrame)
# test case where column is an object
df = df.astype({"column1": "object"})
assert isinstance(null_schema.validate(df), pd.DataFrame)
def test_coerce_dtype_in_dataframe():
df = pd.DataFrame({
"column1": [10.0, 20.0, 30.0],
"column2": ["2018-01-01", "2018-02-01", "2018-03-01"],
"column3": [1, 2, None],
"column4": [1., 1., np.nan],
})
# specify `coerce` at the Column level
schema1 = DataFrameSchema({
"column1": Column(Int, Check(lambda x: x > 0), coerce=True),
"column2": Column(DateTime, coerce=True),
"column3": Column(String, coerce=True, nullable=True),
})
# specify `coerce` at the DataFrameSchema level
schema2 = DataFrameSchema({
"column1": Column(Int, Check(lambda x: x > 0)),
"column2": Column(DateTime),
"column3": Column(String, nullable=True),
}, coerce=True)
for schema in [schema1, schema2]:
result = schema.validate(df)
assert result.column1.dtype == Int.value
assert result.column2.dtype == DateTime.value
for _, x in result.column3.iteritems():
assert | pd.isna(x) | pandas.isna |
__author__ = 'lucabasa'
__version__ = '5.1.0'
__status__ = 'development'
import pandas as pd
import numpy as np
from source.aggregated_stats import process_details, full_stats, rolling_stats
from source.add_info import add_seed, add_rank, highlow_seed, add_stage, add_quality
def make_teams_target(data, league):
'''
Take the playoff compact data and double the dataframe by inverting W and L
It also creates the ID column
data: playoff compact results
league: men or women, useful to know when to cut the data
'''
if league == 'men':
limit = 2003
else:
limit = 2010
df = data[data.Season >= limit].copy()
df['Team1'] = np.where((df.WTeamID < df.LTeamID), df.WTeamID, df.LTeamID)
df['Team2'] = np.where((df.WTeamID > df.LTeamID), df.WTeamID, df.LTeamID)
df['target'] = np.where((df['WTeamID'] < df['LTeamID']), 1, 0)
df['target_points'] = np.where((df['WTeamID'] < df['LTeamID']), df.WScore - df.LScore, df.LScore - df.WScore)
df.loc[df.WLoc == 'N', 'LLoc'] = 'N'
df.loc[df.WLoc == 'H', 'LLoc'] = 'A'
df.loc[df.WLoc == 'A', 'LLoc'] = 'H'
df['T1_Loc'] = np.where((df.WTeamID < df.LTeamID), df.WLoc, df.LLoc)
df['T2_Loc'] = np.where((df.WTeamID > df.LTeamID), df.WLoc, df.LLoc)
df['T1_Loc'] = df['T1_Loc'].map({'H': 1, 'A': -1, 'N': 0})
df['T2_Loc'] = df['T2_Loc'].map({'H': 1, 'A': -1, 'N': 0})
reverse = data[data.Season >= limit].copy()
reverse['Team1'] = np.where((reverse.WTeamID > reverse.LTeamID), reverse.WTeamID, reverse.LTeamID)
reverse['Team2'] = np.where((reverse.WTeamID < reverse.LTeamID), reverse.WTeamID, reverse.LTeamID)
reverse['target'] = np.where((reverse['WTeamID'] > reverse['LTeamID']),1,0)
reverse['target_points'] = np.where((reverse['WTeamID'] > reverse['LTeamID']),
reverse.WScore - reverse.LScore,
reverse.LScore - reverse.WScore)
reverse.loc[reverse.WLoc == 'N', 'LLoc'] = 'N'
reverse.loc[reverse.WLoc == 'H', 'LLoc'] = 'A'
reverse.loc[reverse.WLoc == 'A', 'LLoc'] = 'H'
reverse['T1_Loc'] = np.where((reverse.WTeamID > reverse.LTeamID), reverse.WLoc, reverse.LLoc)
reverse['T2_Loc'] = np.where((reverse.WTeamID < reverse.LTeamID), reverse.WLoc, reverse.LLoc)
reverse['T1_Loc'] = reverse['T1_Loc'].map({'H': 1, 'A': -1, 'N': 0})
reverse['T2_Loc'] = reverse['T2_Loc'].map({'H': 1, 'A': -1, 'N': 0})
df = pd.concat([df, reverse], ignore_index=True)
to_drop = ['WScore','WTeamID', 'LTeamID', 'LScore', 'WLoc', 'LLoc', 'NumOT']
for col in to_drop:
del df[col]
df.loc[:,'ID'] = df.Season.astype(str) + '_' + df.Team1.astype(str) + '_' + df.Team2.astype(str)
return df
def make_training_data(details, targets):
'''
details: seasonal stats by team
targets: result of make_teams_target with each playoff game present twice
Add the prefix T1_ and T2_ to the seasonal stats and add it to the playoff game
This creates the core training set where we use seasonal stats to predict the playoff games
Add the delta_ statistics, given by the difference between T1_ and T2_
'''
tmp = details.copy()
tmp.columns = ['Season', 'Team1'] + \
['T1_'+col for col in tmp.columns if col not in ['Season', 'TeamID']]
total = pd.merge(targets, tmp, on=['Season', 'Team1'], how='left')
tmp = details.copy()
tmp.columns = ['Season', 'Team2'] + \
['T2_'+col for col in tmp.columns if col not in ['Season', 'TeamID']]
total = pd.merge(total, tmp, on=['Season', 'Team2'], how='left')
if total.isnull().any().any():
print(total.columns[total.isnull().any()])
raise ValueError('Something went wrong')
stats = [col[3:] for col in total.columns if 'T1_' in col and 'region' not in col]
for stat in stats:
total['delta_'+stat] = total['T1_'+stat] - total['T2_'+stat]
try:
total['delta_off_edge'] = total['T1_off_rating'] - total['T2_def_rating']
total['delta_def_edge'] = total['T2_off_rating'] - total['T1_def_rating']
except KeyError:
pass
return total
def prepare_data(league):
save_loc = 'processed_data/' + league + '/'
if league == 'women':
regular_season = 'data/raw_women/WDataFiles_Stage2/WRegularSeasonDetailedResults.csv'
playoff = 'data/raw_women/WDataFiles_Stage2/WNCAATourneyDetailedResults.csv'
playoff_compact = 'data/raw_women/WDataFiles_Stage2/WNCAATourneyCompactResults.csv'
seed = 'data/raw_women/WDataFiles_Stage2/WNCAATourneySeeds.csv'
rank = None
stage2 = 'data/raw_women/WDataFiles_Stage2/WSampleSubmissionStage2.csv'
stage2_yr = 2021
save_loc = 'data/processed_women/'
else:
regular_season = 'data/raw_men/MDataFiles_Stage2/MRegularSeasonDetailedResults.csv'
playoff = 'data/raw_men/MDataFiles_Stage2/MNCAATourneyDetailedResults.csv'
playoff_compact = 'data/raw_men/MDataFiles_Stage2/MNCAATourneyCompactResults.csv'
seed = 'data/raw_men/MDataFiles_Stage2/MNCAATourneySeeds.csv'
rank = 'data/raw_men/MDataFiles_Stage2/MMasseyOrdinals.csv'
stage2 = 'data/raw_men/MDataFiles_Stage2/MSampleSubmissionStage2.csv'
stage2_yr = 2021
save_loc = 'data/processed_men/'
# Season stats
reg = pd.read_csv(regular_season)
reg = process_details(reg, rank)
reg.to_csv(save_loc + 'game_details_regular_extended.csv', index=False)
regular_stats = full_stats(reg)
# Last 2 weeks stats
last2weeks = reg[reg.DayNum >= 118].copy()
last2weeks = full_stats(last2weeks)
last2weeks.columns = ['L2W_' + col for col in last2weeks]
last2weeks.rename(columns={'L2W_Season': 'Season', 'L2W_TeamID': 'TeamID'}, inplace=True)
regular_stats = pd.merge(regular_stats, last2weeks, on=['Season', 'TeamID'], how='left')
regular_stats = add_seed(seed, regular_stats)
# Playoff stats
play = pd.read_csv(playoff)
play = process_details(play)
play.to_csv(save_loc + 'game_details_playoff_extended.csv', index=False)
playoff_stats = full_stats(play)
playoff_stats = add_seed(seed, playoff_stats)
if rank:
regular_stats = add_rank(rank, regular_stats)
playoff_stats = add_rank(rank, playoff_stats)
# Target data generation
target_data = pd.read_csv(playoff_compact)
target_data = make_teams_target(target_data, league)
# Add high and low seed wins perc
regular_stats = highlow_seed(regular_stats, reg, seed)
all_reg = make_training_data(regular_stats, target_data)
all_reg = all_reg[all_reg.DayNum >= 136] # remove pre tourney
all_reg = add_stage(all_reg)
all_reg = add_quality(all_reg, reg)
all_reg.to_csv(save_loc + 'training_data.csv', index=False)
playoff_stats.to_csv(save_loc + 'playoff_stats.csv', index=False)
if stage2:
test_data_reg = regular_stats[regular_stats.Season == stage2_yr].copy()
sub = pd.read_csv(stage2)
sub['Team1'] = sub['ID'].apply(lambda x: int(x[5:9]))
sub['Team2'] = sub['ID'].apply(lambda x: int(x[10:]))
tmp = sub.copy()
tmp = tmp.rename(columns={'Team1': 'Team2', 'Team2': 'Team1'})
tmp = tmp[['Team1', 'Team2', 'Pred']]
sub = | pd.concat([sub[['Team1', 'Team2', 'Pred']], tmp], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
class CGM(object):
"""
For Freestyle Libre, the imported .csv files have the following descriptions:
'Serial Number' ID of the row.
'Device Timestamp' Date and time that indicates when the record was taken.
'Register Type' column. The type of registers can take the following values:
0: automatic glucose value register, saved each 15 minutes by the device.
'Historic Glucose mg/dL' column. Blood glucose value in rows with register type 0 (mg/dl).
1: manual blood glucose value register, saved in the record after a read by the patient.
'Scan Glucose mg/dL' column. Blood glucose value in rows with register type 1 (mg/dl).
2: register of insulin without a numeric value.
Rapid insulin register without a numeric value in rows with register type 2.
3: register of carbohydrates without a numeric value.
Carbohydrates without a numeric value in rows with register type 3.
4: register of insulin done with a numeric value.
Units of rapid insulin entered by the patient in rows with register type 4.
5: register of carbohydrates with a numeric value.
Units of carbohydrates entered by the patient in rows with register type 5.
"""
def __init__(self):
"""
:param input: Either a path to a PreProcessing file saved with ``export_hyp`` or a PreProcessing object
"""
self.data = None
def from_file(self,
file_path,
pid: str = -1,
device_col: str = 'Device',
device_serial: str = 'Serial Number',
cgm_time_col: str = 'Device Timestamp',
strftime: str = '%m-%d-%Y %I:%M %p',
reading_type_col: str = 'Record Type',
glucose_col_auto: str = 'Historic Glucose mg/dL',
glucose_col_man: str = 'Scan Glucose mg/dL',
ket_col: str = 'Ketone mmol/L'
):
"""
Parameters
----------
file_path : str
Path to file.
pid : str, optional
DESCRIPTION. The default is -1.
device_col : str, optional
DESCRIPTION. The default is 'Device'.
device_serial : str, optional
DESCRIPTION. The default is 'Serial Number'.
cgm_time_col : str, optional
DESCRIPTION. The default is 'Device Timestamp'.
strftime : str, optional
DESCRIPTION. The default is '%m-%d-%Y %I:%M %p'. Time format for device data.
reading_type_col : str, optional
DESCRIPTION. The default is 'Record Type'. What is recorded - manual / automatic glucose, insulin dose, food
glucose_col_auto : str, optional
DESCRIPTION. The default is 'Historic Glucose mg/dL'. CGM readings
glucose_col_man : str, optional
DESCRIPTION. The default is 'Scan Glucose mg/dL'. Manual input of finger strip glucose
ket_col : str, optional
DESCRIPTION. The default is 'Ketone mmol/L'. CGM ketone level reading.
Returns
-------
DataFrame
DESCRIPTION. Contains CGM device metadata, timestamp column 'hyp_time_col' and glucose and ketone readings
"""
self.data = | pd.read_csv(file_path,header=1) | pandas.read_csv |
# Module: preprocessing.py
# Functions to reproduce the pre-processing of the data
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from acquire import concat_csv_files
from prepare import prepare_df, set_index
################################################## Feature Engineering ###################################################
def create_features(df):
'''
Creates features based on the original dataframe. Converts track duration into new columns for length in seconds and
length in minutes. Creates a feature for a boolean value if the track features an artist or not. Lower cases all
characters in text data columns. Returns the df with these features created at the end and string conversions.
'''
# Feature Engineering
# converting track length from ms to seconds as new column
df['duration_seconds'] = df.duration_ms / 1_000
# converting track length from seconds to minutes as new column
df['duration_minutes'] = df.duration_seconds / 60
# creating boolean if track has a featured artist
df['is_featured_artist'] = df.track_name.str.contains('feat').astype('int')
# Lowercasing String
# using string function to convert all characters to lowercase
df['artist'] = df.artist.str.lower()
df['album'] = df.album.str.lower()
df['track_name'] = df.track_name.str.lower()
# Create Seperate Columns for Year, Month, and Day
# creating dataframe of release day split by '-'
dates = df.release_date.str.split('-', expand=True)
# renaming columns to respective year, month, and day
dates.columns = ['release_year','release_month','release_day']
# ensuring index is the same as the df
dates.index = df.index
# adding to the dataframe with axis=1 to add column-wise
df = pd.concat([dates, df], axis=1)
# Flatten column MultiIndex
#df.columns = [x[0] for x in df.columns]
df.release_year = df.release_year.astype('int')
# bins set edge points for range of label
# goes from 1980-1989, 1990-1999, 2000-2009, 2019-2019, 2020-2029
df['decade'] = pd.cut(x=df.release_year, bins=[1979,1989,1999,2009,2019,2029],
labels=['80s','90s','2000s','2010s','2020s'])
# create is_top_billboard_label
top_ten_billboard = ['Def Jam', 'Young Money', 'Roc-A-Fella', 'Jive', 'Bad Boy', 'Grand Hustle', 'Shady', 'Ruffhouse', 'Cash Money', 'Columbia']
pattern = '|'.join(top_ten_billboard)
df['is_top_billboard_label'] = df.label.str.contains(pattern)
df['is_top_billboard_label'] = df.is_top_billboard_label.astype('int')
return df
##################################################### Split the Data #####################################################
def split_df(df):
'''
Splits dataframe into train, validate, and test - 70%, 20%, 10% respectively.
Prints out the percentage shape and row/column shape of the split dataframes.
Returns train, validate, test.
'''
# Import to use split function, can only split two at a time
from sklearn.model_selection import train_test_split
# First, split into train + validate together and test by itself
# Test will be %10 of the data, train + validate is %70 for now
# Set random_state so we can reproduce the same 'random' data
train_validate, test = train_test_split(df, test_size = .10, random_state = 666)
# Second, split train + validate into their seperate dataframes
# Train will be %70 of the data, Validate will be %20 of the data
# Set random_state so we can reproduce the same 'random' data
train, validate = train_test_split(train_validate, test_size = .22, random_state = 666)
# These two print functions allow us to ensure the date is properly split
# Will print the shape of each variable when running the function
print("train shape: ", train.shape, ", validate shape: ", validate.shape, ", test shape: ", test.shape)
# Will print the shape of each variable as a percentage of the total data set
# Variable to hold the sum of all rows (total observations in the data)
total = df.count()[0]
#calculating percentages of the split df to the original df
train_percent = round(((train.shape[0])/total),2) * 100
validate_percent = round(((validate.shape[0])/total),2) * 100
test_percent = round(((test.shape[0])/total),2) * 100
print("\ntrain percent: ", train_percent, ", validate percent: ", validate_percent,
", test percent: ", test_percent)
return train, validate, test
def spotify_split(df, target):
'''
This function takes in a dataframe and the string name of the target variable
and splits it into test (15%), validate (15%), and train (70%).
It also splits test, validate, and train into X and y dataframes.
Returns X_train, y_train, X_validate, y_validate, X_test, y_test, train, validate, test.
'''
# first, since the target is a continuous variable and not a categorical one,
# in order to use stratification, we need to turn it into a categorical variable with binning.
bin_labels_5 = ['Low', 'Moderately Low', 'Moderate', 'Moderately High', 'High']
df['pop_strat_bin'] = pd.qcut(df['popularity'], q=5, precision=0, labels=bin_labels_5)
# split df into test (15%) and train_validate (85%)
train_validate, test = train_test_split(df, test_size=.15, stratify=df['pop_strat_bin'], random_state=666)
# drop column used for stratification
train_validate = train_validate.drop(columns=['pop_strat_bin'])
test = test.drop(columns=['pop_strat_bin'])
# split train_validate off into train (82.35% of 85% = 70%) and validate (17.65% of 85% = %15)
train, validate = train_test_split(train_validate, test_size=.1765, random_state=666)
# split train into X & y
X_train = train.drop(columns=[target])
y_train = train[target]
# split validate into X & y
X_validate = validate.drop(columns=[target])
y_validate = validate[target]
# split test into X & y
X_test = test.drop(columns=[target])
y_test = test[target]
print('Shape of train:', X_train.shape, '| Shape of validate:', X_validate.shape, '| Shape of test:', X_test.shape)
print('Percent train:', round(((train.shape[0])/df.count()[0]),2) * 100, ' | Percent validate:', round(((validate.shape[0])/df.count()[0]),2) * 100, ' | Percent test:', round(((test.shape[0])/df.count()[0]),2) * 100)
return X_train, y_train, X_validate, y_validate, X_test, y_test, train, validate, test
def encode_features(df):
'''
This function encodes non-numeric features for use in modeling.
Takes in df and returns df.
'''
# encode 'explicit'
df['is_explicit'] = df.explicit.map({True: 1, False: 0})
df = df.drop(columns=['explicit'])
return df
##################################################### Scale the Data #####################################################
def scale_data(train, validate, test, predict, scaler):
'''
Scales a df based on scaler chosen: 'MinMax', 'Standard', or 'Robust'.
Needs three dfs: train, validate, and test. Fits the scaler object to train
only, transforms on all 3. Returns the three dfs scaled.
'predict' is the target variable name.
'''
import sklearn.preprocessing
# removing predictive feature
X_train = train.drop(predict, axis=1)
X_validate = validate.drop(predict, axis=1)
X_test = test.drop(predict, axis=1)
if scaler == 'MinMax':
# create scaler object for MinMax Scaler
scaler = sklearn.preprocessing.MinMaxScaler()
elif scaler == 'Standard':
# create scaler object for Standard Scaler
scaler = sklearn.preprocessing.StandardScaler()
elif scaler == 'Robust':
# create scaler object for Robust Scaler
scaler = sklearn.preprocessing.StandardScaler()
# Note that we only call .fit with the training data,
# but we use .transform to apply the scaling to all the data splits.
scaler.fit(X_train)
# transforming all three dfs with the scaler object
# this turns it into an array
X_train_scaled = scaler.transform(X_train)
X_validate_scaled = scaler.transform(X_validate)
X_test_scaled = scaler.transform(X_test)
# converting scaled array back to df
# first by converting to a df, it will not have the original index and column names
X_train_scaled = pd.DataFrame(X_train_scaled)
X_validate_scaled = pd.DataFrame(X_validate_scaled)
X_test_scaled = | pd.DataFrame(X_test_scaled) | pandas.DataFrame |
from __future__ import division
import torch
import numpy as np
from training.config import Config
from training.model import SeqLSTM
from training.data_utils import get_data_loader_chr
from analyses.feature_attribution.tf import TFChip
import pandas as pd
from analyses.feature_attribution.segway import SegWay
from analyses.classification.run import DownstreamTasks
from analyses.classification.fires import Fires
from analyses.classification.loops import Loops
from analyses.classification.domains import Domains
from training.data_utils import get_cumpos
from analyses.plot.plot_utils import plot_gbr
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def run_captum(cfg, model, chr):
"""
run_captum(cfg, model, chr) -> DataFrame
Gets data for chromosome and cell type. Runs IG using captum.
Saves resulting IG DataFrame.
Args:
cfg (Config): The configuration to use for the experiment.
model (SeqLSTM): The model to run captum on.
chr (int): The chromosome to run captum on.
"""
torch.manual_seed(123)
np.random.seed(123)
"get DataLoader"
data_loader = get_data_loader_chr(cfg, chr, shuffle=False)
"run IG"
ig_df = model.get_captum_ig(data_loader)
"save IG dataframe"
np.save(cfg.output_directory + "ig_df_chr%s.npy" % (str(chr)), ig_df)
return ig_df
def get_top_tfs_chip(cfg, ig_df, chr):
"""
get_top_tfs_chip(cfg, ig_df, chr) -> DataFrame
Attributed importance to each of the TFs according to CHipSeq Peaks.
Args:
cfg (Config): The configuration to use for the experiment.
ig_df (DataFrame): Dataframe containing positions and IG values.
chr (int): The chromosome to which IG values belong.
"""
cumpos = get_cumpos(cfg, chr)
tf_ob = TFChip(cfg, chr)
chip_data = tf_ob.get_chip_data()
chip_data = chip_data.drop_duplicates(keep='first').reset_index(drop=True)
pos_chip_data = pd.DataFrame(columns=["pos", "target", "start", "end", "chr"])
if chip_data.index[0] == 1:
chip_data.index -= 1
for i in range(0, chip_data.shape[0]):
start = chip_data.loc[i, "start"]
end = chip_data.loc[i, "end"]
for j in range(start, end + 1):
pos_chip_data = pos_chip_data.append(
{'pos': j, 'target': chip_data.loc[i, "target"], 'start': chip_data.loc[i, "start_full"],
'end': chip_data.loc[i, "end_full"], 'chr': chip_data.loc[i, "chr"]}, ignore_index=True)
pos_chip_data["pos"] = pos_chip_data["pos"] + cumpos
ig_df = | pd.merge(ig_df, pos_chip_data, on="pos") | pandas.merge |
################################################################################
# Copyright 2016-2020 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
import os
import sys
import argparse
import re
import pandas as pd
from ExtractSizes import *
from TuningConfiguration import *
headers = ""
def MatchLine(headerPattern, linePattern, line):
global headers
if not headers:
matched = headerPattern.match(line)
if matched:
headers = line
return matched
else:
matched = linePattern.match(line)
return matched
def ResultsFilesList(inputPath, resultsName):
resultsFilePattern = re.compile(resultsName + "\.[0-9]*")
resultsFiles = [f for f in os.listdir(inputPath)]
filteredFiles = [f for f in resultsFiles if resultsFilePattern.match(f)]
return filteredFiles
def ParseResults(inputPath, outputPath, resultsName):
global headers
headers = ""
filteredFiles = ResultsFilesList(inputPath, resultsName)
headerPattern = re.compile("transA,transB")
linePattern = re.compile(r"(N|T),(N|T).*")
outfilename = resultsName + ".csv"
outputFilePath = os.path.join(outputPath, outfilename)
outfile = open(outputFilePath,'w')
for fl in filteredFiles:
flPath = os.path.join(inputPath,fl)
filteredLines = [ line for line in open(flPath) if MatchLine(headerPattern, linePattern, line)]
outfile.writelines(filteredLines)
outfile.flush()
outfile.close()
def getMultiplier(xdl):
if xdl == "true":
return 2
return 1
def getCuCount(gpu):
gpuMap = {'vega10':64, 'mi25':64, 'vega20':64, 'v340l':56,'mi50':60,'arcturus':120,'mi60':64}
for key in gpuMap.keys():
if gpu == key:
return gpuMap[key]
return 64
def fillCallCounts(problemMapper, callCounts, callCount, callCountStrided, isOne):
for i in problemMapper:
for klist in i:
midList = list()
for key in klist:
if key == "transposeA" or key == "transposeB" or key == "f" or key == "i":
if klist[key] == 10 and isOne == "true":
klist[key] = 1
midList.append(klist[key])
if len(midList) == 4:
callCounts.append(midList)
for line in callCounts:
if line[0] == "gemm":
callCount.append(line[3])
elif line[0] == "gemm_strided_batched":
callCountStrided.append(line[3])
def chooseCallCount(resultsName, callCount, callCountStrided):
if "strided" in resultsName:
return callCountStrided
return callCount
def ProcessResults(outputPath, resultsName, freqM, sz, call_count, gpu = 'vega20', xdl = False):
global headers
resultsFilename = resultsName + ".csv"
resultsFilePath = os.path.join(outputPath, resultsFilename)
data = None
data = pd.read_csv(resultsFilePath)
multiplier = getMultiplier(xdl)
cus = getCuCount(gpu)
headerValues = headers.strip().split(",")
headerLength = len(headerValues)
key = headerValues[0:headerLength-2]
key.append('us')
performanceField = "rocblas-Gflops"
timingField = "us"
df = data.groupby(key,sort=False)
results = df[performanceField].mean().to_frame()
timingResults = df[timingField].mean().to_frame()
freq=freqM
factor=sz * 64 * multiplier * cus
results['eff'] = 100*1e3*results['rocblas-Gflops'] / (factor * freq)
results['us_w'] = timingResults['us']*call_count
aggregateFileName = resultsName + "-aggregated.csv"
aggregateFilePath = os.path.join(outputPath, aggregateFileName)
results.to_csv(aggregateFilePath, header=True)
resultsBad = results[results['eff'] < 70]
badResultsFileName = resultsName + "-bad.csv"
badResultsFilePath = os.path.join(outputPath, badResultsFileName)
resultsBad.sort_values(by='us_w',ascending=False).to_csv(badResultsFilePath, header=True)
large1 = data
large1['N'] = pd.to_numeric(large1['N'])
large1['M'] = | pd.to_numeric(large1['M']) | pandas.to_numeric |
import yaml
import pandas as pd
import numpy as np
from os.path import join
from os import makedirs
import glob
import sys
import re
def parse_samplesheet(fp_samplesheet):
#print(fp_samplesheet.split('/')[-1])
# in a first iteration, open the file, read line by line and determine start
# of sample information by looking for a line starting with "[Data]".
# the following lines will be sample information, about lines are header infos.
row_sampleinformation = None
row_reads = None
with open(fp_samplesheet, "r") as f:
for linenumber, line in enumerate(f.readlines()):
if line.startswith("[Data]"):
row_sampleinformation = linenumber+1
elif line.startswith("[Reads]"):
row_reads = linenumber+1
if row_sampleinformation is None:
raise ValueError("Could not find [Data] line in file '%s'." % fp_samplesheet)
if row_reads is None:
raise ValueError("Could not find [Reads] line in file '%s'." % fp_samplesheet)
header = pd.read_csv(fp_samplesheet, sep=",", nrows=row_reads-2, index_col=0).dropna(axis=1, how="all").dropna(axis=0, how="all")
#header = header.set_index(header.columns[0])
header.index = list(map(lambda x: 'header_%s' % x, header.index))
header = header.dropna(axis=0, how="any")
header = header.T.reset_index()
del header['index']
# a xxx iteration parses sample information via pandas
ss = | pd.read_csv(fp_samplesheet, sep=",", skiprows=row_sampleinformation, dtype={'Sample_Name': str, 'Sample_ID': str, 'spike_entity_id': str}) | pandas.read_csv |
from minder_utils.models.feature_selectors import Intrinsic_Selector, Wrapper_Selector, \
Supervised_Filter, Unsupervised_Filter
from minder_utils.configurations import config
from minder_utils.evaluate import evaluate_features
from minder_utils.models.classifiers.classifiers import Classifiers
from minder_utils.formatting.label import label_by_week
from minder_utils.feature_engineering import Feature_engineer
from minder_utils.formatting import Formatting
from minder_utils.visualisation import Visual_Evaluation
import os
import pandas as pd
import numpy as np
import datetime
os.chdir('..')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
visual = Visual_Evaluation()
test_data = | pd.read_csv('./minder_utils/data/weekly_test/fe.csv') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Vatsal's Code
# This notebook shows you how to build a model for predicting degradation at various locations along RNA sequence.
# * We will first pre-process and tokenize the sequence, secondary structure and loop type.
# * Then, we will use all the information to train a model on degradations recorded by the researchers from OpenVaccine.
# * Finally, we run our model on the public test set (shorter sequences) and the private test set (longer sequences), and submit the predictions.
#
# In[1]:
# %%capture
# !pip install forgi
# !yes Y |conda install -c bioconda viennarna
# In[2]:
import json,os, math
import subprocess
# from forgi.graph import bulge_graph
# import forgi.visual.mplotlib as fvm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow.keras.backend as K
import plotly.express as px
import tensorflow.keras.layers as L
import tensorflow as tf
import warnings
warnings.filterwarnings('ignore')
import tensorflow_addons as tfa
from itertools import combinations_with_replacement
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold,GroupKFold
from keras.utils import plot_model
from colorama import Fore, Back, Style
# ### Configuration
# In[3]:
###### USE DIFFERENT SEED FOR DIFFERENT STRATIFIED KFOLD
SEED = 53
###### NUMBER OF FOLDS. USE 3, 5, 7,...
n_folds=5
###### TRAIN DEBUG
debug=True
###### APPLY WINDOW FEATURES
Window_features = True
###### Number of Feature Given to Model
# cat_feature = 3 ## ( Categorical Features Only)
# num_features = 1 ## ( Numerical Features Only)
###### Model Configuration ######
model_name="GG" ## MODEL NAME (Files will save according to this )
epochs=100 ## NUMBER OF EPOCHS MODEL TRAIN IN EACH FOLD. USE 3, 5, 7,...
BATCH_SIZE = 32 ## NUMBER OF BATCH_SIZE USE 16, 32, 64, 128,...
n_layers = 2 ## Number of Layers Present in model # ex. 3 Layer of GRU Model
layers = ["GRU","GRU"] ## Stacking sequence of GRU and LSTM (list of length == n_layers)
hidden_dim = [128, 128] ## Hidden Dimension in Model (Default : [128,128]) (list of length == n_layers)
dropout = [0.5, 0.5] ## 1.0 means no dropout, and 0.0 means no outputs from the layer.
sp_dropout = 0.2 ## SpatialDropout1D (Fraction of the input units to drop) [https://stackoverflow.com/a/55244985]
embed_dim = 250 ## Output Dimention of Embedding Layer (Default : 75)
num_hidden_units = 8 ## Number of GRU units after num_input layer
###### LR Schedular ######
Cosine_Schedule = True ## cosine_schedule Rate
Rampup_decy_lr = False ## Rampup decy lr Schedule
# ### Set Seed
# In[4]:
def seed_everything(seed=1234):
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
seed_everything(SEED)
# ### Used Columns
#
# In[5]:
target_cols = ['reactivity', 'deg_Mg_pH10', 'deg_Mg_50C', 'deg_pH10', 'deg_50C']
window_columns = ['sequence','structure','predicted_loop_type']
categorical_features = ['sequence', 'structure', 'predicted_loop_type',]
# 'predicted_loop_index']
cat_feature = len(categorical_features)
if Window_features:
cat_feature += len(window_columns)
numerical_features = ['BPPS_Max','BPPS_nb', 'BPPS_sum',
'positional_entropy',
'stems', 'interior_loops', 'multiloops',#'hairpin loops', 'fiveprimes', 'threeprimes',
'A_percent', 'G_percent','C_percent', 'U_percent',
'U-G', 'C-G', 'U-A', 'G-C', 'A-U', 'G-U',
# 'E', 'S', 'H', 'B', 'X', 'I', 'M',
'pair_map', 'pair_distance', ]
num_features = len(numerical_features) ## ( Numerical Features Only)
feature_cols = categorical_features + numerical_features
pred_col_names = ["pred_"+c_name for c_name in target_cols]
target_eval_col = ['reactivity','deg_Mg_pH10','deg_Mg_50C']
pred_eval_col = ["pred_"+c_name for c_name in target_eval_col]
# ### Load and preprocess data
# In[6]:
data_dir = '/kaggle/input/stanford-covid-vaccine/'
fearure_data_path = '../input/openvaccine/'
# train = pd.read_csv(fearure_data_path+'train.csv')
# test = pd.read_csv(fearure_data_path+'test.csv')
train = pd.read_json(fearure_data_path+'train.json')
test = pd.read_json(fearure_data_path+'test.json')
# train_j = pd.read_json(data_dir + 'train.json', lines=True)
# test_j = pd.read_json(data_dir + 'test.json', lines=True)
sample_sub = pd.read_csv(data_dir + 'sample_submission.csv')
# In[7]:
train[target_cols] = train[target_cols].applymap(lambda x: x[1:-1].split(", "))
# In[8]:
# train = train[train['SN_filter'] == 1]
train = train[train['signal_to_noise'] >= 0.5]
# In[9]:
def pair_feature(row):
arr = list(row)
its = [iter(['_']+arr[:]) ,iter(arr[1:]+['_'])]
list_touple = list(zip(*its))
return list(map("".join,list_touple))
# In[10]:
def preprocess_categorical_inputs(df, cols=categorical_features,Window_features=Window_features):
if Window_features:
for c in window_columns:
df["pair_"+c] = df[c].apply(pair_feature)
cols.append("pair_"+c)
cols = list(set(cols))
return np.transpose(
np.array(
df[cols]
.applymap(lambda seq: [token2int[x] for x in seq])
.values
.tolist()
),
(0, 2, 1)
)
# In[11]:
def preprocess_numerical_inputs(df, cols=numerical_features):
return np.transpose(
np.array(
df[cols].values.tolist()
),
(0, 2, 1)
)
# In[12]:
# We will use this dictionary to map each character to an integer
# so that it can be used as an input in keras
# ().ACGUBEHIMSXshftim0123456789[]{}'_,
token_list = list("().<KEY>")
if Window_features:
comb = combinations_with_replacement(list('_().<KEY>'*2), 2)
token_list += list(set(list(map("".join,comb))))
token2int = {x:i for i, x in enumerate(list(set(token_list)))}
print("token_list Size :",len(token_list))
train_inputs_all_cat = preprocess_categorical_inputs(train,cols=categorical_features)
train_inputs_all_num = preprocess_numerical_inputs(train,cols=numerical_features)
train_labels_all = np.array(train[target_cols].values.tolist(),dtype =np.float32).transpose((0, 2, 1))
print("Train categorical Features Shape : ",train_inputs_all_cat.shape)
print("Train numerical Features Shape : ",train_inputs_all_num.shape)
print("Train labels Shape : ",train_labels_all.shape)
# ### Reduce Train Data
# In[13]:
# train_inputs_all_cat = train_inputs_all_cat[:,:68,:]
# train_inputs_all_num = train_inputs_all_num[:,:68,:]
# train_labels_all = train_labels_all[:,:68,:]
# print("Train categorical Features Shape : ",train_inputs_all_cat.shape)
# print("Train numerical Features Shape : ",train_inputs_all_num.shape)
# print("Train labels Shape : ",train_labels_all.shape)
# #### Public and private sets have different sequence lengths, so we will preprocess them separately and load models of different tensor shapes.
# In[14]:
public_df = test.query("seq_length == 107")
private_df = test.query("seq_length == 130")
print("public_df : ",public_df.shape)
print("private_df : ",private_df.shape)
public_inputs_cat = preprocess_categorical_inputs(public_df)
private_inputs_cat = preprocess_categorical_inputs(private_df)
public_inputs_num = preprocess_numerical_inputs(public_df,cols=numerical_features)
private_inputs_num = preprocess_numerical_inputs(private_df,cols=numerical_features)
print("Public categorical Features Shape : ",public_inputs_cat.shape)
print("Public numerical Features Shape : ",public_inputs_num.shape)
print("Private categorical Features Shape : ",private_inputs_cat.shape)
print("Private numerical Features Shape : ",private_inputs_num.shape)
# ### loss Function
# In[15]:
### Custom Loss Function for ['reactivity','deg_Mg_pH10','deg_Mg_50C'] target Columns
# def rmse(y_actual, y_pred):
# mse = tf.keras.losses.mean_squared_error(y_actual, y_pred)
# return K.sqrt(mse)
# def MCRMSE(y_actual, y_pred, num_scored=3):
# score = 0
# for i in range(num_scored):
# score += rmse(y_actual[:,:, i], y_pred[:,:, i]) / num_scored
# return score
def MCRMSE(y_true, y_pred):
colwise_mse = tf.reduce_mean(tf.square(y_true[:,:,:3] - y_pred[:,:,:3]), axis=1)
return tf.reduce_mean(tf.sqrt(colwise_mse), axis=1)
# ### Learning Rate Schedulars
# ### Rampup decy lr Schedule
# In[16]:
def get_lr_callback(batch_size=8):
lr_start = 0.00001
lr_max = 0.004
lr_min = 0.00005
lr_ramp_ep = 45
lr_sus_ep = 2
lr_decay = 0.8
def lrfn(epoch):
if epoch < lr_ramp_ep:
lr = (lr_max - lr_start) / lr_ramp_ep * epoch + lr_start
elif epoch < lr_ramp_ep + lr_sus_ep:
lr = lr_max
else:
lr = (lr_max - lr_min) * lr_decay**(epoch - lr_ramp_ep - lr_sus_ep) + lr_min
return lr
lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False)
return lr_callback
# ### Cosine schedule with warmup
# In[17]:
def get_cosine_schedule_with_warmup(lr,num_warmup_steps, num_training_steps, num_cycles=3.5):
"""
Modified version of the get_cosine_schedule_with_warmup from huggingface.
(https://huggingface.co/transformers/_modules/transformers/optimization.html#get_cosine_schedule_with_warmup)
Create a schedule with a learning rate that decreases following the
values of the cosine function between 0 and `pi * cycles` after a warmup
period during which it increases linearly between 0 and 1.
"""
def lrfn(epoch):
if epoch < num_warmup_steps:
return (float(epoch) / float(max(1, num_warmup_steps))) * lr
progress = float(epoch - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) * lr
return tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False)
# ### Different Layers
# In[18]:
def lstm_layer(hidden_dim, dropout):
return tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(hidden_dim,
dropout=dropout,
return_sequences=True,
kernel_initializer = 'orthogonal'))
# In[19]:
def gru_layer(hidden_dim, dropout):
return L.Bidirectional(
L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal')
)
# ### Model Building
# In[20]:
# def build_model(embed_size,
# seq_len = 107,
# pred_len = 68,
# dropout = dropout,
# sp_dropout = sp_dropout,
# num_features = num_features,
# num_hidden_units = num_hidden_units,
# embed_dim = embed_dim,
# layers = layers,
# hidden_dim = hidden_dim,
# n_layers = n_layers,
# cat_feature = cat_feature):
# inputs = L.Input(shape=(seq_len, cat_feature),name='category_input')
# embed = L.Embedding(input_dim=embed_size, output_dim=embed_dim)(inputs)
# reshaped = tf.reshape(embed, shape=(-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
# reshaped_conv = tf.keras.layers.Conv1D(filters=512, kernel_size=3,strides=1, padding='same', activation='elu')(reshaped)
# numerical_input = L.Input(shape=(seq_len, num_features), name='numeric_input')
# n_Dense_1 = L.Dense(64)(numerical_input)
# n_Dense_2 = L.Dense(128)(n_Dense_1)
# numerical_conv = tf.keras.layers.Conv1D(filters=256, kernel_size=4,strides=1, padding='same', activation='elu')(n_Dense_2)
# hidden = L.concatenate([reshaped_conv, numerical_conv])
# hidden = L.SpatialDropout1D(sp_dropout)(hidden)
# for x in range(n_layers):
# if layers[x] == "GRU":
# hidden = gru_layer(hidden_dim[x], dropout[x])(hidden)
# else:
# hidden = lstm_layer(hidden_dim[x], dropout[x])(hidden)
# # Since we are only making predictions on the first part of each sequence,
# # we have to truncate it
# truncated = hidden[:, :pred_len]
# out = L.Dense(5)(truncated)
# model = tf.keras.Model(inputs=[inputs] + [numerical_input], outputs=out)
# adam = tf.optimizers.Adam()
# radam = tfa.optimizers.RectifiedAdam()
# lookahead = tfa.optimizers.Lookahead(adam, sync_period=6)
# ranger = tfa.optimizers.Lookahead(radam, sync_period=6)
# model.compile(optimizer=radam, loss=MCRMSE)
# return model
# In[21]:
def build_model(embed_size,
seq_len = 107,
pred_len = 68,
dropout = dropout,
sp_dropout = sp_dropout,
num_features = num_features,
num_hidden_units = num_hidden_units,
embed_dim = embed_dim,
layers = layers,
hidden_dim = hidden_dim,
n_layers = n_layers,
cat_feature = cat_feature):
inputs = L.Input(shape=(seq_len, cat_feature),name='category_input')
embed = L.Embedding(input_dim=embed_size, output_dim=embed_dim)(inputs)
reshaped = tf.reshape(embed, shape=(-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
reshaped = L.SpatialDropout1D(sp_dropout)(reshaped)
reshaped_conv = tf.keras.layers.Conv1D(filters=512, kernel_size=3,strides=1, padding='same', activation='elu')(reshaped)
numerical_input = L.Input(shape=(seq_len, num_features), name='numeric_input')
# n_Dense_1 = L.Dense(64)(numerical_input)
# n_Dense_2 = L.Dense(128)(n_Dense_1)
# numerical_conv = tf.keras.layers.Conv1D(filters=256, kernel_size=4,strides=1, padding='same', activation='elu')(n_Dense_2)
hidden = L.concatenate([reshaped_conv, numerical_input])
hidden_1 = tf.keras.layers.Conv1D(filters=256, kernel_size=4,strides=1, padding='same', activation='elu')(hidden)
hidden = gru_layer(128, 0.5)(hidden_1)
hidden = L.concatenate([hidden, hidden_1])
# hidden = L.SpatialDropout1D(sp_dropout)(hidden)
for x in range(n_layers):
if layers[x] == "GRU":
hidden = gru_layer(hidden_dim[x], dropout[x])(hidden)
else:
hidden = lstm_layer(hidden_dim[x], dropout[x])(hidden)
hidden = L.concatenate([hidden, hidden_1])
# Since we are only making predictions on the first part of each sequence,
# we have to truncate it
truncated = hidden[:, :pred_len]
out = L.Dense(5)(truncated)
model = tf.keras.Model(inputs=[inputs] + [numerical_input], outputs=out)
adam = tf.optimizers.Adam()
radam = tfa.optimizers.RectifiedAdam()
lookahead = tfa.optimizers.Lookahead(adam, sync_period=6)
ranger = tfa.optimizers.Lookahead(radam, sync_period=6)
model.compile(optimizer=radam, loss=MCRMSE)
return model
# ### Build and train model
#
# We will train a bi-directional GRU model. It has three layer and has dropout. To learn more about RNNs, LSTM and GRU, please see [this blog post](https://colah.github.io/posts/2015-08-Understanding-LSTMs/).
# In[22]:
model = build_model(embed_size=len(token_list))
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
# ### Add Augmentation Data
# ### stratify_group Based on structure and SN_Filter
# In[23]:
def get_stratify_group(row):
snf = row['SN_filter']
snr = row['signal_to_noise']
cnt = row['cnt']
id_ = row['id']
structure = row['structure']
if snf == 0:
if snr<0:
snr_c = 0
elif 0<= snr < 2:
snr_c = 1
elif 2<= snr < 4:
snr_c = 2
elif 4<= snr < 5.5:
snr_c = 3
elif 5.5<= snr < 10:
snr_c = 4
elif snr >= 10:
snr_c = 5
else: # snf == 1
if snr<0:
snr_c = 6
elif 0<= snr < 1:
snr_c = 7
elif 1<= snr < 2:
snr_c = 8
elif 2<= snr < 3:
snr_c = 9
elif 3<= snr < 4:
snr_c = 10
elif 4<= snr < 5:
snr_c = 11
elif 5<= snr < 6:
snr_c = 12
elif 6<= snr < 7:
snr_c = 13
elif 7<= snr < 8:
snr_c = 14
elif 8<= snr < 9:
snr_c = 15
elif 9<= snr < 10:
snr_c = 15
elif snr >= 10:
snr_c = 16
return '{}_{}'.format(id_,snr_c)
train['stratify_group'] = train.apply(get_stratify_group, axis=1)
train['stratify_group'] = train['stratify_group'].astype('category').cat.codes
skf = StratifiedKFold(n_folds, shuffle=True, random_state=SEED)
gkf = GroupKFold(n_splits=n_folds)
fig, ax = plt.subplots(n_folds,3,figsize=(20,5*n_folds))
for Fold, (train_index, val_index) in enumerate(gkf.split(train_inputs_all_cat, groups=train['stratify_group'])):
print(Fore.YELLOW);print('#'*45);print("### Fold : ", str(Fold+1));print('#'*45);print(Style.RESET_ALL)
train_data = train.iloc[train_index]
val_data = train.iloc[val_index]
print("Augmented data Present in Val Data : ",len(val_data[val_data['cnt'] != 1]))
print("Augmented data Present in Train Data : ",len(train_data[train_data['cnt'] != 1]))
val_data = val_data[val_data['cnt'] == 1]
print("Data Lekage : ",len(val_data[val_data['id'].isin(train_data['id'])]))
# print(train_data['stratify_group'].unique(),val_data['stratify_group'].unique())
print("number of Train Data points : ",len(train_data))
print("number of val_data Data points : ",len(val_data))
print("number of unique Structure in Train data : ", len(train_data.structure.unique()))
print("number of unique Structure in val data : ",len(val_data.structure.unique()), val_data.structure.value_counts()[:5].values)
print("Train SN_Filter == 1 : ", len(train_data[train_data['SN_filter']==1]))
print("val_data SN_Filter == 1 : ", len(val_data[val_data['SN_filter']==1]))
print("Train SN_Filter == 0 : ", len(train_data[train_data['SN_filter']==0]))
print("val_data SN_Filter == 0 : ", len(val_data[val_data['SN_filter']==0]))
print("Unique ID :",len(train_data.id.unique()))
sns.kdeplot(train[train['SN_filter']==0]['signal_to_noise'],ax=ax[Fold][0],color="Red",label='Train All')
sns.kdeplot(train_data[train_data['SN_filter']==0]['signal_to_noise'],ax=ax[Fold][0],color="Blue",label='Train')
sns.kdeplot(val_data[val_data['SN_filter']==0]['signal_to_noise'],ax=ax[Fold][0],color="Green",label='Validation')
ax[Fold][0].set_title(f'Fold : {Fold+1} Signal/Noise & SN_filter == 0')
sns.kdeplot(train[train['SN_filter']==1]['signal_to_noise'],ax=ax[Fold][1],color="Red",label='Train All')
sns.kdeplot(train_data[train_data['SN_filter']==1]['signal_to_noise'],ax=ax[Fold][1],color="Blue",label='Train')
sns.kdeplot(val_data[val_data['SN_filter']==1]['signal_to_noise'],ax=ax[Fold][1],color="Green",label='Validation')
ax[Fold][1].set_title(f'Fold : {Fold+1} Signal/Noise & SN_filter == 1')
sns.kdeplot(train['signal_to_noise'],ax=ax[Fold][2],color="Red",label='Train All')
sns.kdeplot(train_data['signal_to_noise'],ax=ax[Fold][2],color="Blue",label='Train')
sns.kdeplot(val_data['signal_to_noise'],ax=ax[Fold][2],color="Green",label='Validation')
ax[Fold][2].set_title(f'Fold : {Fold+1} Signal/Noise')
plt.show()
# In[24]:
submission = pd.DataFrame(index=sample_sub.index, columns=target_cols).fillna(0) # test dataframe with 0 values
val_losses = []
historys = []
oof_preds_all = []
stacking_pred_all = []
kf = KFold(n_folds, shuffle=True, random_state=SEED)
skf = StratifiedKFold(n_folds, shuffle=True, random_state=SEED)
gkf = GroupKFold(n_splits=n_folds)
for Fold, (train_index, val_index) in enumerate(gkf.split(train_inputs_all_cat, groups=train['stratify_group'])):
print(Fore.YELLOW);print('#'*45);print("### Fold : ", str(Fold+1));print('#'*45);print(Style.RESET_ALL)
print(f"|| Batch_size: {BATCH_SIZE} \n|| n_layers: {n_layers} \n|| embed_dim: {embed_dim}")
print(f"|| cat_feature: {cat_feature} \n|| num_features: {num_features}")
print(f"|| layers : {layers} \n|| hidden_dim: {hidden_dim} \n|| dropout: {dropout} \n|| sp_dropout: {sp_dropout}")
train_data = train.iloc[train_index]
val_data = train.iloc[val_index]
print("|| number Augmented data Present in Val Data : ",len(val_data[val_data['cnt'] != 1]))
print("|| number Augmented data Present in Train Data : ",len(train_data[train_data['cnt'] != 1]))
print("|| Data Lekage : ",len(val_data[val_data['id'].isin(train_data['id'])]))
val_data = val_data[val_data['cnt'] == 1]
model_train = build_model(embed_size=len(token_list))
model_short = build_model(embed_size=len(token_list),seq_len=107, pred_len=107)
model_long = build_model(embed_size=len(token_list),seq_len=130, pred_len=130)
train_inputs_cat = preprocess_categorical_inputs(train_data,cols=categorical_features)
train_inputs_num = preprocess_numerical_inputs(train_data,cols=numerical_features)
train_labels = np.array(train_data[target_cols].values.tolist(),dtype =np.float32).transpose((0, 2, 1))
val_inputs_cat = preprocess_categorical_inputs(val_data,cols=categorical_features)
val_inputs_num = preprocess_numerical_inputs(val_data,cols=numerical_features)
val_labels = np.array(val_data[target_cols].values.tolist(),dtype =np.float32).transpose((0, 2, 1))
# train_inputs_cat, train_labels = train_inputs_all_cat[train_index], train_labels_all[train_index]
# val_inputs_cat, val_labels = train_inputs_all_cat[val_index], train_labels_all[val_index]
# train_inputs_num, val_inputs_num = train_inputs_all_num[train_index],train_inputs_all_num[val_index]
# csv_logger
csv_logger = tf.keras.callbacks.CSVLogger(f'Fold_{Fold}_log.csv', separator=',', append=False)
# SAVE BEST MODEL EACH FOLD
checkpoint = tf.keras.callbacks.ModelCheckpoint(f'{model_name}_Fold_{Fold}.h5',
monitor='val_loss',
verbose=0,
mode='min',
save_freq='epoch')
if Cosine_Schedule:
#cosine Callback
lr_schedule= get_cosine_schedule_with_warmup(lr=0.001, num_warmup_steps=20, num_training_steps=epochs)
elif Rampup_decy_lr :
# Rampup decy lr
lr_schedule = get_lr_callback(BATCH_SIZE)
else:
lr_schedule = tf.keras.callbacks.ReduceLROnPlateau()
history = model_train.fit(
{'numeric_input': train_inputs_num,
'category_input': train_inputs_cat} , train_labels,
validation_data=({'numeric_input': val_inputs_num,
'category_input': val_inputs_cat}
,val_labels),
batch_size=BATCH_SIZE,
epochs=epochs,
callbacks=[lr_schedule, checkpoint, csv_logger,lr_schedule],
verbose=1 if debug else 0
)
print("Min Validation Loss : ", min(history.history['val_loss']))
print("Min Validation Epoch : ",np.argmin( history.history['val_loss'] )+1)
val_losses.append(min(history.history['val_loss']))
historys.append(history)
model_short.load_weights(f'{model_name}_Fold_{Fold}.h5')
model_long.load_weights(f'{model_name}_Fold_{Fold}.h5')
public_preds = model_short.predict({'numeric_input': public_inputs_num,
'category_input': public_inputs_cat})
private_preds = model_long.predict({'numeric_input': private_inputs_num,
'category_input': private_inputs_cat})
oof_preds = model_train.predict({'numeric_input': val_inputs_num,
'category_input': val_inputs_cat})
stacking_pred = model_short.predict({'numeric_input': val_inputs_num,
'category_input': val_inputs_cat})
preds_model = []
for df, preds in [(public_df, public_preds), (private_df, private_preds)]:
for i, uid in enumerate(df.id):
single_pred = preds[i]
single_df = pd.DataFrame(single_pred, columns=target_cols)
single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])]
preds_model.append(single_df)
preds_model_df = pd.concat(preds_model)
preds_model_df = preds_model_df.groupby(['id_seqpos'],as_index=True).mean()
submission[target_cols] += preds_model_df[target_cols].values / n_folds
for df, preds in [(val_data, oof_preds)]:
for i, uid in enumerate(df.id):
single_pred = preds[i]
single_label = val_labels[i]
single_label_df = | pd.DataFrame(single_label, columns=target_cols) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_split(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.split("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.split("__")
tm.assert_series_equal(result, exp)
result = values.str.split("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.split("[,_]")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
def test_split_object_mixed():
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.split("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_n(any_string_dtype, method):
s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype)
expected = Series([["a", "b"], pd.NA, ["b", "c"]])
result = getattr(s.str, method)(" ", n=None)
tm.assert_series_equal(result, expected)
result = getattr(s.str, method)(" ", n=0)
tm.assert_series_equal(result, expected)
def test_rsplit(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.rsplit("__")
tm.assert_series_equal(result, exp)
result = values.str.rsplit("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.rsplit("[,_]")
exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_", n=1)
exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]])
tm.assert_series_equal(result, exp)
def test_rsplit_object_mixed():
# mixed
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.rsplit("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
def test_split_blank_string(any_string_dtype):
# expand blank split GH 20067
values = Series([""], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df
tm.assert_frame_equal(result, exp)
values = Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame(
[
["a", "b", "c"],
["a", "b", np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_split_noargs(any_string_dtype):
# #1859
s = Series(["<NAME>", "Travis Oliphant"], dtype=any_string_dtype)
result = s.str.split()
expected = ["Travis", "Oliphant"]
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
@pytest.mark.parametrize(
"data, pat",
[
(["bd asdf jfg", "kjasdflqw asdfnfk"], None),
(["bd asdf jfg", "kjasdflqw asdfnfk"], "asdf"),
(["bd_asdf_jfg", "kjasdflqw_asdfnfk"], "_"),
],
)
def test_split_maxsplit(data, pat, any_string_dtype):
# re.split 0, str.split -1
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=-1)
xp = s.str.split(pat=pat)
tm.assert_series_equal(result, xp)
result = s.str.split(pat=pat, n=0)
tm.assert_series_equal(result, xp)
@pytest.mark.parametrize(
"data, pat, expected",
[
(
["split once", "split once too!"],
None,
Series({0: ["split", "once"], 1: ["split", "once too!"]}),
),
(
["split_once", "split_once_too!"],
"_",
Series({0: ["split", "once"], 1: ["split", "once_too!"]}),
),
],
)
def test_split_no_pat_with_nonzero_n(data, pat, expected, any_string_dtype):
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=1)
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)})
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_unequal_splits", "one_of_these_things_is_not"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{
0: ["some", "one"],
1: ["unequal", "of"],
2: ["splits", "these"],
3: [np.nan, "things"],
4: [np.nan, "is"],
5: [np.nan, "not"],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
with pytest.raises(ValueError, match="expand must be"):
s.str.split("_", expand="not_a_boolean")
def test_split_to_multiindex_expand():
# https://github.com/pandas-dev/pandas/issues/23677
idx = Index(["nosplit", "alsonosplit", np.nan])
result = idx.str.split("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "equal", "splits"),
("with", "no", "nans"),
[np.nan, np.nan, np.nan],
[None, None, None],
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_unequal_splits", "one_of_these_things_is_not", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "unequal", "splits", np.nan, np.nan, np.nan),
("one", "of", "these", "things", "is", "not"),
(np.nan, np.nan, np.nan, np.nan, np.nan, np.nan),
(None, None, None, None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with pytest.raises(ValueError, match="expand must be"):
idx.str.split("_", expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}, dtype=any_string_dtype)
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=2)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=1)
exp = DataFrame(
{0: ["some_equal", "with_no"], 1: ["splits", "nans"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand():
idx = Index(["nosplit", "alsonosplit"])
result = idx.str.rsplit("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True)
exp = MultiIndex.from_tuples([("some", "equal", "splits"), ("with", "no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True, n=1)
exp = MultiIndex.from_tuples([("some_equal", "splits"), ("with_no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(any_string_dtype):
# gh-18450
s = Series(["foo,bar,baz", np.nan], dtype=any_string_dtype)
result = s.str.split(",", expand=True)
exp = DataFrame(
[["foo", "bar", "baz"], [np.nan, np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan/pd.NA and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
if any_string_dtype == "object":
assert all(np.isnan(x) for x in result.iloc[1])
else:
assert all(x is pd.NA for x in result.iloc[1])
def test_split_with_name(any_string_dtype):
# GH 12617
# should preserve name
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
res = s.str.split(",")
exp = Series([["a", "b"], ["c", "d"]], name="xxx")
tm.assert_series_equal(res, exp)
res = s.str.split(",", expand=True)
exp = DataFrame([["a", "b"], ["c", "d"]], dtype=any_string_dtype)
tm.assert_frame_equal(res, exp)
idx = Index(["a,b", "c,d"], name="xxx")
res = idx.str.split(",")
exp = Index([["a", "b"], ["c", "d"]], name="xxx")
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(",", expand=True)
exp = MultiIndex.from_tuples([("a", "b"), ("c", "d")])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series(
[("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series(
[("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h"), None]
)
tm.assert_series_equal(result, expected)
# more than one char
s = Series(["a__b__c", "c__d__e", np.nan, "f__g__h", None])
result = s.str.partition("__", expand=False)
expected = Series(
[
("a", "__", "b__c"),
("c", "__", "d__e"),
np.nan,
("f", "__", "g__h"),
None,
],
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("__", expand=False)
expected = Series(
[
("a__b", "__", "c"),
("c__d", "__", "e"),
np.nan,
("f__g", "__", "h"),
None,
],
)
tm.assert_series_equal(result, expected)
# None
s = Series(["a b c", "c d e", np.nan, "f g h", None], dtype=any_string_dtype)
result = s.str.partition(expand=False)
expected = Series(
[("a", " ", "b c"), ("c", " ", "d e"), np.nan, ("f", " ", "g h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition(expand=False)
expected = Series(
[("a b", " ", "c"), ("c d", " ", "e"), np.nan, ("f g", " ", "h"), None]
)
tm.assert_series_equal(result, expected)
# Not split
s = Series(["abc", "cde", np.nan, "fgh", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("abc", "", ""), ("cde", "", ""), np.nan, ("fgh", "", ""), None])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series([("", "", "abc"), ("", "", "cde"), np.nan, ("", "", "fgh"), None])
tm.assert_series_equal(result, expected)
# unicode
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h")])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series([("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h")])
tm.assert_series_equal(result, expected)
# compare to standard lib
s = Series(["A_B_C", "B_C_D", "E_F_G", "EFGHEF"], dtype=any_string_dtype)
result = s.str.partition("_", expand=False).tolist()
assert result == [v.partition("_") for v in s]
result = s.str.rpartition("_", expand=False).tolist()
assert result == [v.rpartition("_") for v in s]
def test_partition_index():
# https://github.com/pandas-dev/pandas/issues/23558
values = Index(["a_b_c", "c_d_e", "f_g_h", np.nan, None])
result = values.str.partition("_", expand=False)
exp = Index(
np.array(
[("a", "_", "b_c"), ("c", "_", "d_e"), ("f", "_", "g_h"), np.nan, None],
dtype=object,
)
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition("_", expand=False)
exp = Index(
np.array(
[("a_b", "_", "c"), ("c_d", "_", "e"), ("f_g", "_", "h"), np.nan, None],
dtype=object,
)
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition("_")
exp = Index(
[
("a", "_", "b_c"),
("c", "_", "d_e"),
("f", "_", "g_h"),
(np.nan, np.nan, np.nan),
(None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition("_")
exp = Index(
[
("a_b", "_", "c"),
("c_d", "_", "e"),
("f_g", "_", "h"),
(np.nan, np.nan, np.nan),
(None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_")
expected = DataFrame(
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
result = s.str.rpartition("_")
expected = DataFrame(
{
0: ["a_b", "c_d", np.nan, "f_g", None],
1: ["_", "_", np.nan, "_", None],
2: ["c", "e", np.nan, "h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=True)
expected = DataFrame(
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
result = s.str.rpartition("_", expand=True)
expected = DataFrame(
{
0: ["a_b", "c_d", np.nan, "f_g", None],
1: ["_", "_", np.nan, "_", None],
2: ["c", "e", np.nan, "h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_partition_with_name(any_string_dtype):
# GH 12617
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
result = s.str.partition(",")
expected = DataFrame(
{0: ["a", "c"], 1: [",", ","], 2: ["b", "d"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# should preserve name
result = s.str.partition(",", expand=False)
expected = Series([("a", ",", "b"), ("c", ",", "d")], name="xxx")
tm.assert_series_equal(result, expected)
def test_partition_index_with_name():
idx = Index(["a,b", "c,d"], name="xxx")
result = idx.str.partition(",")
expected = MultiIndex.from_tuples([("a", ",", "b"), ("c", ",", "d")])
assert result.nlevels == 3
tm.assert_index_equal(result, expected)
# should preserve name
result = idx.str.partition(",", expand=False)
expected = Index(np.array([("a", ",", "b"), ("c", ",", "d")]), name="xxx")
assert result.nlevels == 1
tm.assert_index_equal(result, expected)
def test_partition_sep_kwarg(any_string_dtype):
# GH 22676; depr kwarg "pat" in favor of "sep"
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
expected = s.str.partition(sep="_")
result = s.str.partition("_")
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import io
import logging
import pandas as pd
import requests
from zvt.api import china_stock_code_to_id
from zvt.recorders.consts import DEFAULT_HEADER
from zvt.utils import now_pd_timestamp
logger = logging.getLogger(__name__)
original_page_url = "http://www.csindex.com.cn/zh-CN/downloads/indices"
url = "http://www.csindex.com.cn/uploads/file/autofile/cons/{}cons.xls"
def get_cs_index_stock(code, timestamp, name=None):
entity_type = "index"
exchange = "sh"
entity_id = f"{entity_type}_{exchange}_{code}"
response = requests.get(url.format(code), headers=DEFAULT_HEADER)
response.raise_for_status()
df = pd.read_excel(io.BytesIO(response.content))
df = df[["日期Date", "成分券代码Constituent Code", "成分券名称Constituent Name"]].rename(
columns={"日期Date": "timestamp", "成分券代码Constituent Code": "stock_code", "成分券名称Constituent Name": "stock_name"}
)
df["entity_id"] = entity_id
df["entity_type"] = "index"
df["exchange"] = "sh"
df["code"] = code
df["name"] = name
df["stock_id"] = df["stock_code"].apply(lambda x: china_stock_code_to_id(str(x)))
# id format: {entity_id}_{timestamp}_{stock_id}
df["id"] = df[["entity_id", "timestamp", "stock_id"]].apply(lambda x: "_".join(x.astype(str)), axis=1)
df["timestamp"] = | pd.to_datetime(df["timestamp"]) | pandas.to_datetime |
from django.http import JsonResponse
import requests
import asyncio
import aiohttp
import numpy as np
import pandas as pd
from pandas import json_normalize
import json
from functools import reduce
import unidecode
from random import randint
from time import sleep
import traceback
import sys
import random
import logging
def get_spotify_music_profile(request):
spotifyAPI = SpotifyAPI(request)
try:
music_profile = spotifyAPI.get_music_profile()
return music_profile
except Exception as e:
# traceback.format_exc()
print('GLOBAL EXCEPTION - BAD. RETURNING ERROR TO FRONT END')
logging.exception("music profile refresh exception")
error_report = {
'error': {
'message': str(e),
'status': 500,
}
}
return error_report
class SpotifyAPI:
REQUEST_EXCEPTION_MSG = "Spotify API Request Exception while fetching "
SAVE_PROFILE_AS_CSV = False
USER_PLAYLISTS_ONLY = True # don't change unless you want playlists a user follows to also be included
def __init__(self, access_token):
self.header = {'Authorization' : "Bearer "+access_token}
self.user_id = self.fetch_user_id()
self.artist_columns = []
self.track_columns = []
self.artists_dataframes = []
self.tracks_dataframes = []
def get_music_profile(self):
asyncio.run(self.collect_artists_and_tracks_dataframes())
print("converting dataframes to JSON...")
print(f'returning { self.artists_df.shape[0] } artists and { self.tracks_df.shape[0] } tracks')
if self.SAVE_PROFILE_AS_CSV:
self.artists_df.to_csv('artists_df.csv')
self.tracks_df.to_csv('tracks_df.csv')
artists_json = self.get_artists_json(self.artists_df)
tracks_json = self.get_tracks_json(self.tracks_df)
music_profile = {
"artists" : artists_json,
"tracks" : tracks_json,
}
return music_profile
def get_artists_json(self, artists_df):
return artists_df.to_json(orient='records')
def get_tracks_json(self, tracks_df):
return tracks_df.to_json(orient='records')
async def collect_artists_and_tracks_dataframes(self):
# fetch artists and tracks together, due to how the Spotify API returns both
print("collect_artists_and_tracks_dataframes()...")
tasks = [self.fetch_top_artists("long_term"), self.fetch_top_artists("medium_term"), self.fetch_top_artists("short_term")
, self.fetch_top_tracks("long_term"), self.fetch_top_tracks("medium_term"), self.fetch_top_tracks("short_term")
, self.fetch_followed_artists(), self.fetch_saved_tracks(), self.get_all_playlists()]
await asyncio.gather(*tasks)
print("initial tasks (fetches) have finishing gathering..")
print("initiating get_artists_master_df(), where full artist objects will be fetched..")
self.artists_df = await self.get_artists_master_df()
print("finished fetching full objects.")
self.tracks_df = self.get_tracks_master_df()
async def get_artists_master_df(self):
if self.artists_dataframes == []:
return pd.DataFrame()
artists_df = None
if len(self.artists_dataframes) > 1:
artists_df = reduce(lambda left, right: pd.merge(left, right, how="outer"), self.artists_dataframes)
else:
artists_df = self.artists_dataframes[0]
artists_df = artists_df.drop_duplicates()
if 'id' not in artists_df:
return pd.DataFrame()
# add all columns needed if we don't have them yet
for col in self.artist_columns:
if col not in artists_df:
artists_df[col] = np.NaN
if 'track.id' not in artists_df:
artists_df['track.id'] = np.NaN
# here, i fill in missing values
# with a second gather operation
if 'image' in artists_df:
artists_missing = artists_df[artists_df['image'].isnull()]
else:
artists_missing = artists_df
missing_ids = artists_missing['id'].tolist()
missing_ids = list(set(missing_ids))
if len(missing_ids) > 0:
artists_full_df = await self.get_full_artist_dataframes(missing_ids)
artists_df = pd.merge(artists_df, artists_full_df, how="outer")
artists_df = artists_df.drop_duplicates()
artists_df['smallImage'] = artists_df['image']
artists_df['bigImage'] = artists_df['image']
artists_df.drop('image', axis = 1)
artists_df_transform = {}
for column in self.artist_columns:
artists_df_transform[column] = 'max'
artists_df_transform['bigImage'] = 'first'
artists_df_transform['smallImage'] = 'last'
artists_df_transform['uri'] = 'first'
def agg_track_list(tracks): # set to remove duplicates
track_list = [x for x in list(set(tracks)) if str(x) != 'nan']
return track_list
artists_df_transform['track.id'] = agg_track_list
def agg_genres_list(genres):
genre_list = [x for x in list(set(genres)) if str(x) != 'nan']
return genre_list
artists_df_transform['genres'] = agg_genres_list
artists_df = artists_df.groupby(['id', 'name']).agg(artists_df_transform)
artists_df.rename(columns = {'track.id': 'tracks'}, inplace = True)
artists_df[self.artist_columns] = artists_df[self.artist_columns].fillna(value=False)
artists_df.reset_index(level=['id', 'name'], inplace = True)
# add artist's tracks_length
def get_tracks_len(row):
return len(list(row['tracks']))
artists_df['tracks_length'] = artists_df.apply(get_tracks_len, axis=1)
# add artist's genres_length
def get_genres_len(row):
return len(list(row['genres']))
artists_df['genres_length'] = artists_df.apply(get_genres_len, axis=1)
def get_ascii_artist_name(row):
return unidecode.unidecode(row['name'])
artists_df['name_ascii'] = artists_df.apply(get_ascii_artist_name, axis=1)
return artists_df
def get_tracks_master_df(self):
if self.tracks_dataframes == []:
return pd.DataFrame()
tracks_df = reduce(lambda left, right: pd.merge(left, right, how="outer"), self.tracks_dataframes)
tracks_df = tracks_df.drop_duplicates()
if 'id' not in tracks_df:
return | pd.DataFrame() | pandas.DataFrame |
import re
from collections import defaultdict
import numpy as np
import pandas as pd
userid = 'userId'
algs = ['PureSVD', 'PSI']
alg_files = {'PureSVD': 'SVD', 'PSI': 'PSI', 'MP': 'MPRec', 'RND': 'RRec'}
metrics = ['HR', 'MRR', 'Coverage']
metric_files = {'HR': 'StepHirate', 'MRR': 'StepMRR', 'Coverage': 'COVRatio'}
display_ranks = range(10, 100, 10)
def read_metric(alg, metric, folder, label):
file = f'{folder}/{label}_{alg_files[alg]}_{metric_files[metric]}.csv'
return pd.read_csv(file).set_index('Rank').filter(regex=r'Step_\d+$')
def read_stability(folder, label, alg):
file = f'{folder}/{label}_{alg_files[alg]}_ALLUsersCORR'
try:
return np.load(f'{file}.npy')
except FileNotFoundError:
return np.load(f'{file}.npz')['arr_0']
def read_data(folder, label):
data = defaultdict(dict)
scores = defaultdict(dict)
try:
test_users = pd.read_csv(f'{folder}/{label}_Userlist.gz', index_col=[0, 1])
except FileNotFoundError:
test_users = pd.read_csv(f'{folder}/{label}_User_list.gz', index_col=[0, 1])
for alg in algs:
for metric in metrics:
data[alg][metric] = read_metric(alg, metric, folder, label)
for metric in metrics:
scores[metric]['wide'] = (
| pd.concat({alg:data[alg][metric] for alg in algs}, axis=1) | pandas.concat |
import numpy as np
from flask import Flask, request, jsonify, render_template
import joblib
import pandas as pd
import datetime as dt
app = Flask(__name__)
model=joblib.load(open("Employee_attrition.joblib", 'rb'))
@app.route('/')
def home():
return render_template('index.html')
def age(DOB):
DOB = | pd.to_datetime(DOB) | pandas.to_datetime |
from TingYu import measurement_table
import pandas as pd
def combine_raw_data(
data_red_dir: str, data_white_dir: str
) -> (pd.DataFrame, pd.DataFrame):
data_white = | pd.read_csv(data_white_dir) | pandas.read_csv |
import unittest
import numpy as np
from pandas import Index
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as common
import pandas._tseries as lib
class TestTseriesUtil(unittest.TestCase):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isnull(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_merge_indexer(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.merge_indexer_object(new, old.indexMap)
expect_filler = [-1, 0, -1, -1, -1, 1, -1, -1, -1, -1, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.merge_indexer_object(new, old.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_pad(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.pad_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([5, 10])
new = Index(range(5))
filler = lib.pad_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = lib.left_join_indexer_int64(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype='i4')
assert(np.array_equal(result, expected))
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = lib.inner_join_indexer_int64(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([2, 4])
bexp = np.array([1, 2])
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = lib.outer_join_indexer_int64(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int32)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4])
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0])]
assert(not lib.is_lexsorted(failure))
# def test_get_group_index():
# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype='i4')
# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype='i4')
# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype='i4')
# result = lib.get_group_index([a, b], (3, 4))
# assert(np.array_equal(result, expected))
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype('i4')
b = np.random.randint(0, 1000, 100).astype('i4')
result = lib.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
expected = np.argsort(a, kind='mergesort')
assert(np.array_equal(result, expected))
# compare with lexsort
key = a * 1000 + b
result = lib.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
assert(np.array_equal(result, expected))
def test_duplicated_with_nas():
keys = [0, 1, np.nan, 0, 2, np.nan]
result = lib.duplicated(keys)
expected = [False, False, False, True, False, True]
assert(np.array_equal(result, expected))
result = lib.duplicated(keys, take_last=True)
expected = [True, False, True, False, False, False]
assert(np.array_equal(result, expected))
keys = [(0, 0), (0, np.nan), (np.nan, 0), (np.nan, np.nan)] * 2
result = lib.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = falses + trues
assert(np.array_equal(result, expected))
result = lib.duplicated(keys, take_last=True)
expected = trues + falses
assert(np.array_equal(result, expected))
def test_convert_objects():
arr = np.array(['a', 'b', np.nan, np.nan, 'd', 'e', 'f'], dtype='O')
result = lib.maybe_convert_objects(arr)
assert(result.dtype == np.object_)
def test_convert_objects_ints():
# test that we can detect many kinds of integers
dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
for dtype_str in dtypes:
arr = np.array(list(np.arange(20, dtype=dtype_str)), dtype='O')
assert(arr[0].dtype == np.dtype(dtype_str))
result = | lib.maybe_convert_objects(arr) | pandas._tseries.maybe_convert_objects |
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(Week(weekday=0), datetime(2008, 1, 1), False),
(Week(weekday=0), datetime(2008, 1, 2), False),
(Week(weekday=0), datetime(2008, 1, 3), False),
(Week(weekday=0), datetime(2008, 1, 4), False),
(Week(weekday=0), datetime(2008, 1, 5), False),
(Week(weekday=0), datetime(2008, 1, 6), False),
(Week(weekday=0), datetime(2008, 1, 7), True),
(Week(weekday=1), datetime(2008, 1, 1), True),
(Week(weekday=1), datetime(2008, 1, 2), False),
(Week(weekday=1), datetime(2008, 1, 3), False),
(Week(weekday=1), datetime(2008, 1, 4), False),
(Week(weekday=1), datetime(2008, 1, 5), False),
(Week(weekday=1), datetime(2008, 1, 6), False),
(Week(weekday=1), datetime(2008, 1, 7), False),
(Week(weekday=2), datetime(2008, 1, 1), False),
(Week(weekday=2), datetime(2008, 1, 2), True),
(Week(weekday=2), datetime(2008, 1, 3), False),
(Week(weekday=2), datetime(2008, 1, 4), False),
(Week(weekday=2), datetime(2008, 1, 5), False),
(Week(weekday=2), datetime(2008, 1, 6), False),
(Week(weekday=2), datetime(2008, 1, 7), False),
(Week(weekday=3), datetime(2008, 1, 1), False),
(Week(weekday=3), datetime(2008, 1, 2), False),
(Week(weekday=3), datetime(2008, 1, 3), True),
(Week(weekday=3), datetime(2008, 1, 4), False),
(Week(weekday=3), datetime(2008, 1, 5), False),
(Week(weekday=3), datetime(2008, 1, 6), False),
(Week(weekday=3), datetime(2008, 1, 7), False),
(Week(weekday=4), datetime(2008, 1, 1), False),
(Week(weekday=4), datetime(2008, 1, 2), False),
(Week(weekday=4), datetime(2008, 1, 3), False),
(Week(weekday=4), datetime(2008, 1, 4), True),
(Week(weekday=4), datetime(2008, 1, 5), False),
(Week(weekday=4), datetime(2008, 1, 6), False),
(Week(weekday=4), datetime(2008, 1, 7), False),
(Week(weekday=5), datetime(2008, 1, 1), False),
(Week(weekday=5), datetime(2008, 1, 2), False),
(Week(weekday=5), datetime(2008, 1, 3), False),
(Week(weekday=5), datetime(2008, 1, 4), False),
(Week(weekday=5), datetime(2008, 1, 5), True),
(Week(weekday=5), datetime(2008, 1, 6), False),
(Week(weekday=5), datetime(2008, 1, 7), False),
(Week(weekday=6), datetime(2008, 1, 1), False),
(Week(weekday=6), datetime(2008, 1, 2), False),
(Week(weekday=6), datetime(2008, 1, 3), False),
(Week(weekday=6), datetime(2008, 1, 4), False),
(Week(weekday=6), datetime(2008, 1, 5), False),
(Week(weekday=6), datetime(2008, 1, 6), True),
(Week(weekday=6), datetime(2008, 1, 7), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((BMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((BMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((MonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((MonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBQuarterEnd(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, BQuarterEnd, startingMonth=4)
self.assertRaises(Exception, BQuarterEnd, startingMonth=-1)
def test_isAnchored(self):
self.assert_(BQuarterEnd(startingMonth=1).isAnchored())
self.assert_(BQuarterEnd().isAnchored())
self.assert_(not BQuarterEnd(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((BQuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31),}))
tests.append((BQuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),}))
tests.append((BQuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 1, 31) + offset, datetime(2010, 1, 29))
def test_onOffset(self):
tests = [(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
( | BQuarterEnd(1, startingMonth=1) | pandas.core.datetools.BQuarterEnd |
from datetime import datetime, date
import sys
if sys.version_info >= (2, 7):
from nose.tools import assert_dict_equal
import xlwings as xw
try:
import numpy as np
from numpy.testing import assert_array_equal
def nparray_equal(a, b):
try:
assert_array_equal(a, b)
except AssertionError:
return False
return True
except ImportError:
np = None
try:
import pandas as pd
from pandas import DataFrame, Series
from pandas.util.testing import assert_frame_equal, assert_series_equal
def frame_equal(a, b):
try:
assert_frame_equal(a, b)
except AssertionError:
return False
return True
def series_equal(a, b):
try:
assert_series_equal(a, b)
except AssertionError:
return False
return True
except ImportError:
pd = None
def dict_equal(a, b):
try:
assert_dict_equal(a, b)
except AssertionError:
return False
return True
# Defaults
@xw.func
def read_float(x):
return x == 2.
@xw.func
def write_float():
return 2.
@xw.func
def read_string(x):
return x == 'xlwings'
@xw.func
def write_string():
return 'xlwings'
@xw.func
def read_empty(x):
return x is None
@xw.func
def read_date(x):
return x == datetime(2015, 1, 15)
@xw.func
def write_date():
return datetime(1969, 12, 31)
@xw.func
def read_datetime(x):
return x == datetime(1976, 2, 15, 13, 6, 22)
@xw.func
def write_datetime():
return datetime(1976, 2, 15, 13, 6, 23)
@xw.func
def read_horizontal_list(x):
return x == [1., 2.]
@xw.func
def write_horizontal_list():
return [1., 2.]
@xw.func
def read_vertical_list(x):
return x == [1., 2.]
@xw.func
def write_vertical_list():
return [[1.], [2.]]
@xw.func
def read_2dlist(x):
return x == [[1., 2.], [3., 4.]]
@xw.func
def write_2dlist():
return [[1., 2.], [3., 4.]]
# Keyword args on default converters
@xw.func
@xw.arg('x', ndim=1)
def read_ndim1(x):
return x == [2.]
@xw.func
@xw.arg('x', ndim=2)
def read_ndim2(x):
return x == [[2.]]
@xw.func
@xw.arg('x', transpose=True)
def read_transpose(x):
return x == [[1., 3.], [2., 4.]]
@xw.func
@xw.ret(transpose=True)
def write_transpose():
return [[1., 2.], [3., 4.]]
@xw.func
@xw.arg('x', dates=date)
def read_dates_as1(x):
return x == [[1., date(2015, 1, 13)], [date(2000, 12, 1), 4.]]
@xw.func
@xw.arg('x', dates=date)
def read_dates_as2(x):
return x == date(2005, 1, 15)
@xw.func
@xw.arg('x', dates=datetime)
def read_dates_as3(x):
return x == [[1., datetime(2015, 1, 13)], [datetime(2000, 12, 1), 4.]]
@xw.func
@xw.arg('x', empty='empty')
def read_empty_as(x):
return x == [[1., 'empty'], ['empty', 4.]]
if sys.version_info >= (2, 7):
# assert_dict_equal isn't available on nose for PY 2.6
# Dicts
@xw.func
@xw.arg('x', dict)
def read_dict(x):
return dict_equal(x, {'a': 1., 'b': 'c'})
@xw.func
@xw.arg('x', dict, transpose=True)
def read_dict_transpose(x):
return dict_equal(x, {1.0: 'c', 'a': 'b'})
@xw.func
def write_dict():
return {'a': 1., 'b': 'c'}
# Numpy Array
if np:
@xw.func
@xw.arg('x', np.array)
def read_scalar_nparray(x):
return nparray_equal(x, np.array(1.))
@xw.func
@xw.arg('x', np.array)
def read_empty_nparray(x):
return nparray_equal(x, np.array(np.nan))
@xw.func
@xw.arg('x', np.array)
def read_horizontal_nparray(x):
return nparray_equal(x, np.array([1., 2.]))
@xw.func
@xw.arg('x', np.array)
def read_vertical_nparray(x):
return nparray_equal(x, np.array([1., 2.]))
@xw.func
@xw.arg('x', np.array)
def read_date_nparray(x):
return nparray_equal(x, np.array(datetime(2000, 12, 20)))
# Keyword args on Numpy arrays
@xw.func
@xw.arg('x', np.array, ndim=1)
def read_ndim1_nparray(x):
return nparray_equal(x, np.array([2.]))
@xw.func
@xw.arg('x', np.array, ndim=2)
def read_ndim2_nparray(x):
return nparray_equal(x, np.array([[2.]]))
@xw.func
@xw.arg('x', np.array, transpose=True)
def read_transpose_nparray(x):
return nparray_equal(x, np.array([[1., 3.], [2., 4.]]))
@xw.func
@xw.ret(transpose=True)
def write_transpose_nparray():
return np.array([[1., 2.], [3., 4.]])
@xw.func
@xw.arg('x', np.array, dates=date)
def read_dates_as_nparray(x):
return nparray_equal(x, np.array(date(2000, 12, 20)))
@xw.func
@xw.arg('x', np.array, empty='empty')
def read_empty_as_nparray(x):
return nparray_equal(x, np.array('empty'))
@xw.func
def write_np_scalar():
return np.float64(2)
# Pandas Series
if pd:
@xw.func
@xw.arg('x', pd.Series, header=False, index=False)
def read_series_noheader_noindex(x):
return series_equal(x, pd.Series([1., 2.]))
@xw.func
@xw.arg('x', pd.Series, header=False, index=True)
def read_series_noheader_index(x):
return series_equal(x, pd.Series([1., 2.], index=[10., 20.]))
@xw.func
@xw.arg('x', pd.Series, header=True, index=False)
def read_series_header_noindex(x):
return series_equal(x, pd.Series([1., 2.], name='name'))
@xw.func
@xw.arg('x', pd.Series, header=True, index=True)
def read_series_header_named_index(x):
return series_equal(x, pd.Series([1., 2.], name='name', index=pd.Index([10., 20.], name='ix')))
@xw.func
@xw.arg('x', pd.Series, header=True, index=True)
def read_series_header_nameless_index(x):
return series_equal(x, pd.Series([1., 2.], name='name', index=[10., 20.]))
@xw.func
@xw.arg('x', pd.Series, header=True, index=2)
def read_series_header_nameless_2index(x):
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return series_equal(x, pd.Series([1., 2.], name='name', index=ix))
@xw.func
@xw.arg('x', pd.Series, header=True, index=2)
def read_series_header_named_2index(x):
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]], names=['ix1', 'ix2'])
return series_equal(x, pd.Series([1., 2.], name='name', index=ix))
@xw.func
@xw.arg('x', pd.Series, header=False, index=2)
def read_series_noheader_2index(x):
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return series_equal(x, pd.Series([1., 2.], index=ix))
@xw.func
@xw.ret(pd.Series, index=False)
def write_series_noheader_noindex():
return pd.Series([1., 2.])
@xw.func
@xw.ret(pd.Series, index=True)
def write_series_noheader_index():
return pd.Series([1., 2.], index=[10., 20.])
@xw.func
@xw.ret(pd.Series, index=False)
def write_series_header_noindex():
return pd.Series([1., 2.], name='name')
@xw.func
def write_series_header_named_index():
return pd.Series([1., 2.], name='name', index=pd.Index([10., 20.], name='ix'))
@xw.func
@xw.ret(pd.Series, index=True, header=True)
def write_series_header_nameless_index():
return | pd.Series([1., 2.], name='name', index=[10., 20.]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # Python test script to read cross tabulation results and produce sankey plots
## conda install -c plotly plotly-orca
# First we import the required libraries
import numpy as np
import pandas as pd
#import plotly as py
import plotly.graph_objects as go
from pathlib import Path
import os
##from plotly_svg import to_svg, to_pdf
# Tables were produced by grass **r.stats** command
# I am using the *read_csv* function in pandas library to read the data for one single Ecosystem Functional Group. Columns are not labelled, so it is important to check the order of the maps in the original command to guarantee they are interpreted correctly. For terrestrial EFG, I crossed first the four Anthrome layers, then the two protected area layers, Human Foot Print and finally the EFG indicative map.
## a first function definition for reading and formatting the files
## filter out the rows with *NA* values in the *map* column (areas outside the range of the indicative maps), but *NA* in other columns are transformed to a value of zero (0).
def read_rstat(arch,cols,filter1,filter2):
y = pd.read_csv(arch,sep=" ",header=None,
names=cols,
na_values="*",
low_memory=False)
y = y[~pd.isnull(y[filter1])]
y = y[~pd.isnull(y[filter2])]
for col in cols:
if col != filter1:
y[col].fillna(value=0, inplace = True)
return y
## defining the color palette:
mi_colors={"EFG":"#A6CEE3",
"wild":"#B2DF8A",
"degraded":"#FFFF99",
"protected":"#33A02C",
"seminatural":"#FFFF99",
"transformed":"#FB9A99",
"mosaic":"#FFFF99",
"urban":"#6A3D9A",
"pastures":"#CAB2D6",
"crops":"#FDBF6F",
"rice":"#FF7F00"
}
## now a function to make the links for the simple case (freshwater and marine)
def make_links_simple(t_name,t_df):
d={"source": | pd.Series([t_name,t_name,"wild","wild","degraded","degraded"]) | pandas.Series |
"""Tests for the SQLite DatabaseManager `SQLiteDb`.
Tests all methods of the DatabaseManager because it is easy to test with SQLite.
"""
# =================================================
# Imports
# =================================================
# Standard Library
from pathlib import Path
# Third Party
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
from sqlalchemy.sql import text
import sqlalchemy
# Local
import pandemy
from .dependencies import PANDAS_VERSION
# =================================================
# Setup
# =================================================
class SQLiteSQLContainer(pandemy.SQLContainer):
r"""A correctly defined pandemy.SQLContainer subclass"""
my_query = 'SELECT * FROM MyTable;'
class SQLiteFakeSQLContainer:
r"""
SQLContainer class that does not inherit from `pandemy.SQLContainer`.
This class is not a valid input to the container parameter of
`pandemy.DatabaseManager`.
"""
my_query = 'SELECT * FROM MyTable;'
# =================================================
# Tests
# =================================================
class TestInitSQLiteDb:
r"""Test the initalization of the SQLite DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db_file : Path
Path to a SQLite database that exists on disk.
"""
def test_all_defaults(self):
r"""Create an instance of SQLiteDb that lives in memory with all default values."""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
db = pandemy.SQLiteDb()
# Verify
# ===========================================================
assert db.file == ':memory:'
assert db.must_exist is False
assert db.container is None
assert db.engine_config is None
assert db.conn_str == r'sqlite://'
assert isinstance(db.engine, sqlalchemy.engine.base.Engine)
# Clean up - None
# ===========================================================
def test_in_memory(self):
r"""Create an instance of SQLiteDb that lives in memory."""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
db = pandemy.SQLiteDb(file=':memory:')
# Verify
# ===========================================================
assert db.file == ':memory:'
assert db.must_exist is False
assert db.conn_str == r'sqlite://'
assert isinstance(db.engine, sqlalchemy.engine.base.Engine)
# Clean up - None
# ===========================================================
@pytest.mark.parametrize('file_as_str', [pytest.param(True, id='str'), pytest.param(False, id='Path')])
def test_file_must_exist(self, file_as_str, sqlite_db_file):
r"""Create an instance with a file supplied as a string and pathlib.Path object.
The default option `must_exist` is set to True.
The file exists on disk.
Parameters
----------
file_as_str : bool
True if the file should be supplied as a string and False for pathlib.Path.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
if file_as_str:
db = pandemy.SQLiteDb(file=str(sqlite_db_file), must_exist=True)
else:
db = pandemy.SQLiteDb(file=sqlite_db_file, must_exist=True)
# Verify
# ===========================================================
assert db.file == sqlite_db_file
assert db.must_exist is True
assert db.conn_str == fr'sqlite:///{str(sqlite_db_file)}'
assert isinstance(db.engine, sqlalchemy.engine.base.Engine)
# Clean up - None
# ===========================================================
@pytest.mark.raises
@pytest.mark.parametrize('file', [pytest.param('does not exist', id='str'),
pytest.param(Path('does not exist'), id='Path')])
def test_on_file_must_exist_file_does_not_exist(self, file):
r"""Create an instance with a file supplied as a string and pathlib.Path object.
The default option `must_exist` is set to True.
The file does not exists on disk.
pandemy.DatabaseFileNotFoundError is expected to be raised.
Parameters
----------
file : str or Path
The file with the SQLite database.
"""
# Setup - None
# ===========================================================
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.DatabaseFileNotFoundError):
pandemy.SQLiteDb(file=file, must_exist=True)
# Clean up - None
# ===========================================================
def test_on_file_with_SQLContainer(self):
r"""Create an instance with a SQLContainer class.
The option `must_exist` is set to False.
The file does not exists on disk.
"""
# Setup
# ===========================================================
must_exist = False
file = 'mydb.db'
# Exercise
# ===========================================================
db = pandemy.SQLiteDb(file=file, must_exist=must_exist, container=SQLiteSQLContainer)
# Verify
# ===========================================================
assert db.file == Path(file)
assert db.must_exist is must_exist
assert db.container is SQLiteSQLContainer
# Clean up - None
# ===========================================================
# file, must_exist, container, engine_config, error_msg
input_test_bad_input = [
pytest.param(42, False, None, None, 'Received: 42', id='file=42'),
pytest.param('my_db.db', 'False', None, {'encoding': 'UTF-8'}, 'Received: False', id="must_exist='False'"),
pytest.param('my_db.db', False, [42], None, 'container must be a subclass of pandemy.SQLContainer',
id="container=[42]"),
pytest.param(Path('my_db.db'), False, SQLiteFakeSQLContainer, None,
'container must be a subclass of pandemy.SQLContainer', id="container=FakeSQLContainer"),
pytest.param('my_db.db', False, None, 42, 'engine_config must be a dict', id="engine_config=42"),
]
@pytest.mark.raises
@pytest.mark.parametrize('file, must_exist, container, engine_config, error_msg', input_test_bad_input)
def test_bad_input_parameters(self, file, must_exist, container, engine_config, error_msg):
r"""Test bad input parameters.
pandemy.InvalidInputError is expected to be raised.
Parameters
----------
file : str or Path, default ':memory:'
The file (with path) to the SQLite database.
The default creates an in memory database.
must_exist : bool, default True
If True validate that file exists unless file = ':memory:'.
If it does not exist FileNotFoundError is raised.
If False the validation is omitted.
container : pandemy.SQLContainer or None, default None
A container of database statements that the SQLite DatabaseManager can use.
engine_config : dict or None
Additional keyword arguments passed to the SQLAlchemy create_engine function.
"""
# Setup - None
# ===========================================================
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.InvalidInputError, match=error_msg):
pandemy.SQLiteDb(file=file, must_exist=must_exist, container=container, engine_config=engine_config)
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_invalid_parameter_to_create_engine(self):
r"""Test to supply an invalid parameter to the SQLAlchemy create_engine function.
pandemy.CreateEngineError is expected to be raised.
Also supply a keyword argument that is not used for anything.
It should not affect the initialization.
"""
# Setup
# ===========================================================
error_msg = 'invalid_param'
engine_config = {'invalid_param': True}
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.CreateEngineError, match=error_msg):
pandemy.SQLiteDb(file='my_db.db', must_exist=False, container=None,
engine_config=engine_config, kwarg='kwarg')
# Clean up - None
# ===========================================================
class TestExecuteMethod:
r"""Test the `execute` method of the SQLite DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db : pandemy.SQLiteDb
An instance of the test database.
sqlite_db_empty : pandemy.SQLiteDb
An instance of the test database where all tables are empty.
df_owner : pd.DataFrame
The owner table of the test database.
"""
# The query for test_select_all_owners
select_all_owners = """SELECT OwnerId, OwnerName, BirthDate FROM Owner;"""
@pytest.mark.parametrize('query', [pytest.param(select_all_owners, id='query: str'),
pytest.param(text(select_all_owners), id='query: sqlalchemy TextClause')])
def test_select_all_owners(self, query, sqlite_db, df_owner):
r"""Test to execute a SELECT query.
Query all rows from the Owner table.
Parameters
----------
query : str or text
The SQL query to execute.
"""
# Setup
# ===========================================================
with sqlite_db.engine.connect() as conn:
# Exercise
# ===========================================================
result = sqlite_db.execute(sql=query, conn=conn)
# Verify
# ===========================================================
for idx, row in enumerate(result):
assert row.OwnerId == df_owner.index[idx]
assert row.OwnerName == df_owner.loc[row.OwnerId, 'OwnerName']
assert row.BirthDate == df_owner.loc[row.OwnerId, 'BirthDate'].strftime(r'%Y-%m-%d')
# Clean up - None
# ===========================================================
# The query for test_select_owner_by_id
select_owner_by_id = """SELECT OwnerId, OwnerName
FROM Owner
WHERE OwnerId = :id;
"""
# query, id, owner_exp
input_test_select_owner_by_id = [pytest.param(select_owner_by_id, 1,
id='query: str, id=1'),
pytest.param(text(select_owner_by_id), 2,
id='query: sqlalchemy TextClause, id=2')]
@pytest.mark.parametrize('query, owner_id', input_test_select_owner_by_id)
def test_select_owner_by_id(self, query, owner_id, sqlite_db, df_owner):
r"""Test to execute a SELECT query with a query parameter.
Parameters
----------
query : str or sqlalchemy.sql.elements.TextClause
The SQL query to execute.
owner_id : int
The parameter representing OwnerId in `query`.
"""
# Setup
# ===========================================================
with sqlite_db.engine.connect() as conn:
# Exercise
# ===========================================================
result = sqlite_db.execute(sql=query, conn=conn, params={'id': owner_id})
# Verify
# ===========================================================
for row in result:
assert row.OwnerId == owner_id
assert row.OwnerName == df_owner.loc[owner_id, 'OwnerName']
# Clean up - None
# ===========================================================
def test_select_owner_by_2_params(self, sqlite_db, df_owner):
r"""Test to execute a SELECT query with 2 query parameters."""
# Setup
# ===========================================================
query = text("""SELECT OwnerId, OwnerName, BirthDate
FROM Owner
WHERE OwnerName = :name OR
DATE(BirthDate) > DATE(:bdate)
ORDER BY OwnerName ASC;
""")
df_exp_result = df_owner.loc[[3, 1], :]
with sqlite_db.engine.connect() as conn:
# Exercise
# ===========================================================
result = sqlite_db.execute(sql=query, conn=conn, params={'name': 'John', 'bdate': '1941-12-07'})
# Verify
# ===========================================================
for idx, row in enumerate(result):
assert row.OwnerId == df_exp_result.index[idx]
assert row.OwnerName == df_exp_result.loc[row.OwnerId, 'OwnerName']
assert row.BirthDate == df_exp_result.loc[row.OwnerId, 'BirthDate'].strftime(r'%Y-%m-%d')
# Clean up - None
# ===========================================================
input_test_insert_owner = [
pytest.param([{'id': 1, 'name': '<NAME>', 'bdate': '2021-07-07'}], id='1 Owner'),
pytest.param([{'id': 1, 'name': '<NAME>', 'bdate': '2021-07-07'},
{'id': 2, 'name': '<NAME>', 'bdate': '1987-07-21'}], id='2 Owners'),
]
@pytest.mark.parametrize('params', input_test_insert_owner)
def test_insert_into_owner(self, params, sqlite_db_empty):
r"""Test to insert new owner(s) into the Owner table of the empty test database.
Parameters
----------
params : list of dict
The parameters to pass to the insert statement.
"""
# Setup
# ===========================================================
statement = text("""INSERT INTO Owner (OwnerId, OwnerName, BirthDate)
VALUES (:id, :name, :bdate);
""")
# The query to read back the inserted owners
query_exp = """SELECT OwnerId, OwnerName, BirthDate FROM Owner;"""
with sqlite_db_empty.engine.connect() as conn:
# Exercise
# ===========================================================
sqlite_db_empty.execute(sql=statement, conn=conn, params=params)
# Verify
# ===========================================================
result = sqlite_db_empty.execute(sql=query_exp, conn=conn)
for idx, row in enumerate(result):
assert row.OwnerId == params[idx]['id']
assert row.OwnerName == params[idx]['name']
assert row.BirthDate == params[idx]['bdate']
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_invalid_select_syntax(self, sqlite_db):
r"""Execute a SELECT query with invalid syntax.
No query parameters are supplied. It should raise pandemy.ExecuteStatementError.
"""
# Setup
# ===========================================================
query = 'SELE * FROM Owner'
with sqlite_db.engine.connect() as conn:
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.ExecuteStatementError):
sqlite_db.execute(sql=query, conn=conn)
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_invalid_query_param(self, sqlite_db):
r"""
Execute a SELECT query with a parameter (:id) and the name of the supplied
parameter (:di) to the query does not match the parameter name in the query.
It should raise pandemy.ExecuteStatementError.
"""
# Setup
# ===========================================================
with sqlite_db.engine.connect() as conn:
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.ExecuteStatementError):
sqlite_db.execute(sql=self.select_owner_by_id, conn=conn, params={'di': 1})
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_invalid_sql_param(self, sqlite_db):
r"""Supply and invalid type to the `sql` parameter.
It should raise pandemy.InvalidInputError.
"""
# Setup
# ===========================================================
with sqlite_db.engine.connect() as conn:
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.InvalidInputError, match='list'):
sqlite_db.execute(sql=['Invalid query'], conn=conn, params={'di': 1})
# Clean up - None
# ===========================================================
class TestIsValidTableName:
r"""Test the `_is_valid_table_name` method of the SQLiteDb DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db_empty : pandemy.SQLiteDb
An instance of the test database where all tables are empty.
"""
@pytest.mark.parametrize('table', [pytest.param('Customer', id='Customer'),
pytest.param('1', id='1'),
pytest.param('', id='empty string'),
pytest.param('DELETE', id='DELETE'),
pytest.param('"DROP"', id='DROP'),
pytest.param('""DELETEFROMTABLE""', id='""DELETEFROMTABLE""')])
def test_is_valid_table_name_valid_table_names(self, table, sqlite_db_empty):
r"""Test that valid table names can pass the validation.
The `_is_valid_table_name method` checks that the table name consists
of a single word. If the table name is valid the method returns None
and no exception should be raised.
Parameters
----------
table : str
The name of the table.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
result = sqlite_db_empty._is_valid_table_name(table=table)
# Verify
# ===========================================================
assert result is None
# Clean up - None
# ===========================================================
@pytest.mark.raises
@pytest.mark.parametrize('table, spaces', [pytest.param('Customer DELETE', '1',
id='2 words, 1 space'),
pytest.param(' Customer DELETE', '3',
id='2 words, 3 spaces'),
pytest.param('"DROP TABLE Customer"', '2',
id='3 words, 2 spaces'),
pytest.param(';""DELETE FROM TABLE Customer;"', '3',
id='4 words, 3 spaces')])
def test_is_valid_table_name_invalid_table_names(self, table, spaces, sqlite_db_empty):
r"""Test that invalid table names can be detected correctly.
The `_is_valid_table_name method` checks that the table name consists
of a single word.
pandemy.InvalidTableNameError is expected to be raised
if the table name is invalid.
Parameters
----------
table : str
The name of the table.
spaces : str
The number of space characters in `table`.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
with pytest.raises(pandemy.InvalidTableNameError) as exc_info:
sqlite_db_empty._is_valid_table_name(table=table)
# Verify
# ===========================================================
assert exc_info.type is pandemy.InvalidTableNameError
assert table in exc_info.value.args[0]
assert spaces in exc_info.value.args[0]
assert table == exc_info.value.data
# Clean up - None
# ===========================================================
@pytest.mark.raises
@pytest.mark.parametrize('table', [pytest.param(1, id='int'),
pytest.param(3.14, id='float'),
pytest.param([1, '1'], id='list'),
pytest.param({'table': 'name'}, id='dict')])
def test_is_valid_table_name_invalid_input(self, table, sqlite_db_empty):
r"""Test invalid input to the `table` parameter.
If `table` is not a string pandemy.InvalidInputError should be raised.
Parameters
----------
table : str
The name of the table.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
with pytest.raises(pandemy.InvalidInputError) as exc_info:
sqlite_db_empty._is_valid_table_name(table=table)
# Verify
# ===========================================================
assert exc_info.type is pandemy.InvalidInputError
assert str(table) in exc_info.value.args[0]
assert table == exc_info.value.data
# Clean up - None
# ===========================================================
class TestDeleteAllRecordsFromTable:
r"""Test the `delete_all_records_from_table` method of the SQLiteDb DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db_empty : pandemy.SQLiteDb
An instance of the test database where all tables are empty.
df_customer : pd.DataFrame
The Customer table of the test database.
"""
def test_delete_all_records(self, sqlite_db_empty, df_customer):
r"""Delete all records from the table Customer in the test database."""
# Setup
# ===========================================================
query = """SELECT * FROM Customer;"""
df_exp_result = pd.DataFrame(columns=df_customer.columns)
df_exp_result.index.name = df_customer.index.name
with sqlite_db_empty.engine.begin() as conn:
# Write data to the empty table
df_customer.to_sql(name='Customer', con=conn, if_exists='append')
# Exercise
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.delete_all_records_from_table(table='Customer', conn=conn)
# Verify
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
df_result = pd.read_sql(sql=query, con=conn, index_col='CustomerId', parse_dates=['BirthDate'])
assert_frame_equal(df_result, df_exp_result, check_dtype=False, check_index_type=False)
@pytest.mark.raises
def test_delete_all_records_table_does_not_exist(self, sqlite_db_empty):
r"""Try to delete all records from the table Custom that does not exist in the database.
pandemy.DeleteFromTableError is expected to be raised.
"""
# Setup
# ===========================================================
table = 'Custom'
# Exercise
# ===========================================================
with pytest.raises(pandemy.DeleteFromTableError) as exc_info:
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.delete_all_records_from_table(table=table, conn=conn)
# Verify
# ===========================================================
assert exc_info.type is pandemy.DeleteFromTableError
assert table in exc_info.value.args[0]
assert table in exc_info.value.data[0]
# Clean up - None
# ===========================================================
@pytest.mark.raises
@pytest.mark.parametrize('table', [pytest.param('Customer DELETE', id='table name = 2 words'),
pytest.param('"DROP TABLE Customer"', id='table name = 3 words'),
pytest.param(';""DELETE FROM TABLE Customer;"', id='table name = 4 words')])
def test_delete_all_records_invalid_table_name(self, table, sqlite_db_empty):
r"""Try to delete all records from specified table when supplying and invalid table name.
pandemy.InvalidTableNameError is expected to be raised.
Parameters
----------
table: str
The name of the table to delete records from.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
with pytest.raises(pandemy.InvalidTableNameError) as exc_info:
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.delete_all_records_from_table(table=table, conn=conn)
# Verify
# ===========================================================
assert exc_info.type is pandemy.InvalidTableNameError
assert table in exc_info.value.args[0]
assert table == exc_info.value.data
# Clean up - None
# ===========================================================
class TestSaveDfMethod:
r"""Test the `save_df` method of the SQLiteDb DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db : pandemy.SQLiteDb
An instance of the test database.
sqlite_db_empty : pandemy.SQLiteDb
An instance of the test database where all tables are empty.
df_customer : pd.DataFrame
The Customer table of the test database.
"""
@pytest.mark.parametrize('chunksize', [pytest.param(None, id='chunksize=None'),
pytest.param(2, id='chunksize=2')])
def test_save_to_existing_empty_table(self, chunksize, sqlite_db_empty, df_customer):
r"""Save a DataFrame to an exisitng empty table.
Parameters
----------
chunksize : int or None
The number of rows in each batch to be written at a time.
If None, all rows will be written at once.
"""
# Setup
# ===========================================================
query = """SELECT * FROM Customer;"""
# Exercise
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.save_df(df=df_customer, table='Customer', conn=conn,
if_exists='append', chunksize=chunksize)
# Verify
# ===========================================================
df_result = pd.read_sql(sql=query, con=conn, index_col='CustomerId', parse_dates=['BirthDate'])
| assert_frame_equal(df_result, df_customer, check_dtype=False, check_index_type=False) | pandas.testing.assert_frame_equal |
import requests
from io import StringIO
import pandas as pd
import numpy as np
from tqdm import tqdm
from .financial_statement import html2db
from requests.exceptions import ConnectionError
from requests.exceptions import ReadTimeout
import warnings
def requests_get(*args1, **args2):
i = 3
while i >= 0:
try:
return requests.get(*args1, **args2)
except (ConnectionError, ReadTimeout) as error:
print(error)
print('retry one more time after 60s', i, 'times left')
time.sleep(60)
i -= 1
return pd.DataFrame()
def requests_post(*args1, **args2):
i = 3
while i >= 0:
try:
return requests.post(*args1, **args2)
except (ConnectionError, ReadTimeout) as error:
print(error)
print('retry one more time after 60s', i, 'times left')
time.sleep(60)
i -= 1
return pd.DataFrame()
warnings.simplefilter(action='ignore', category=FutureWarning)
def crawl_price(date):
datestr = date.strftime('%Y%m%d')
try:
r = requests_post('https://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=' + datestr + '&type=ALLBUT0999')
except Exception as e:
print('**WARRN: cannot get stock price at', datestr)
print(e)
return None
content = r.text.replace('=', '')
lines = content.split('\n')
lines = list(filter(lambda l:len(l.split('",')) > 10, lines))
content = "\n".join(lines)
if content == '':
return None
df = pd.read_csv(StringIO(content))
df = df.astype(str)
df = df.apply(lambda s: s.str.replace(',', ''))
df['date'] = pd.to_datetime(date)
df = df.rename(columns={'證券代號':'stock_id'})
df = df.set_index(['stock_id', 'date'])
df = df.apply(lambda s: | pd.to_numeric(s, errors='coerce') | pandas.to_numeric |
import json
import logging
from pathlib import Path
from typing import List, Optional, Tuple
import numpy as np
import pandas as pd
from tensorflow.keras.utils import Sequence
from tqdm.auto import tqdm
# TODO: use https://www.tensorflow.org/guide/data_performance instead
class DataGenerator(Sequence):
@classmethod
def from_json(cls, parameter_file: Path) -> "DataGenerator":
"""Create a DataGenerator instance from a json file.
Args:
parameter_file: file from which to read the parameters.
Returns:
The DataGenerator instance.
"""
with open(parameter_file, "r") as fp:
param_dict = json.load(fp)
param_dict["data_file"] = Path(param_dict["data_file"])
param_dict["BLM_dcum"] = pd.Series(param_dict["BLM_dcum"])
if param_dict["indices"] is not None:
param_dict["indices"] = np.array(param_dict["indices"])
return cls(**param_dict)
def __init__(
self,
data_file: Path,
key: str = "STABLE",
shuffle: bool = True,
batch_size: int = 1024,
seed: int = 42,
norm_method: str = "min_max",
norm_axis: int = 0,
norm_kwargs: dict = {},
BLM_names: Optional[List[str]] = None,
BLM_dcum: Optional[pd.Series] = None,
return_dataframe: bool = False,
ndim: int = 3,
indices: Optional[np.ndarray] = None,
):
"""Lossmap data hdf5 data generator.
Args:
data_file: path of the hdf file.
key: key within the hdf file.
shuffle: shuffle the order of the data samples within the datafile,
this is ignored if `indices` is provided.
batch_size: the number of samples to load at each iteration.
seed: the random seed.
norm_method: the normalization method to use.
norm_axis: the normalization axis, 0 to normaliza each BLM across
the entire dataset. 1 to normalize each loss map.
norm_kwargs: passed to the normalization method.
BLM_names: The names of the BLM columns in the `data_file`.
BLM_dcum: BLM position data.
return_dataframe: Return `pd.DataFrame`s.
ndim: expand the number of dimension in the numpy array.
indices: the indices of the samples to load from the data file.
"""
self._log = logging.getLogger(__name__)
self.data_file = data_file
self._store = None
self.key = key
self.shuffle = shuffle
self.batch_size = batch_size
self.seed = seed
self._rng = np.random.default_rng(self.seed)
self.norm_method = norm_method
self.norm_axis = norm_axis
self.norm_kwargs = norm_kwargs
self.BLM_names = BLM_names
self.BLM_dcum = BLM_dcum
self.return_dataframe = return_dataframe
self.ndim = ndim
self._data_len = self.get_data_length()
if indices is None:
self._log.debug("Creating indices.")
indices = np.arange(self._data_len)
if self.shuffle:
self._log.debug("Shuffling indices, seed %s", self.seed)
self._rng.shuffle(indices)
self.indices = indices # type: np.ndarray
self._mins_maxes = None
self._blm_sorted = None
# self._indices = np.arange(self._data_len)
norm_methods = {"min_max": self.norm_min_max}
self._norm_func = norm_methods[self.norm_method]
def _compute_min_max(self) -> Tuple[np.ndarray, np.ndarray]:
"""Compute the min and max across the entire dataset.
Returns:
An array of minimas and an array of maximas.
"""
mins = []
maxes = []
for chunk in tqdm(
self.store.select(self.key, chunksize=self.batch_size, iterator=True),
total=int(self._data_len // self.batch_size),
desc=f"Computing mins & maxes, axis={self.norm_axis}",
):
maxes.append(chunk.max(axis=self.norm_axis))
mins.append(chunk.min(axis=self.norm_axis))
return (
pd.concat(mins, axis=1).min(axis=1).to_numpy(),
pd.concat(maxes, axis=1).max(axis=1).to_numpy(),
)
@property
def store(self) -> pd.HDFStore:
if self._store is None:
self._log.debug("Opening hdf file.")
self._store = | pd.HDFStore(self.data_file, "r") | pandas.HDFStore |
from distributions import EqualChooseDistribution,OneDDistribution,TwoDDistribution
import random
from baseProgram import *
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
#returns maximum support of any candidate
def maxSupport(V,m):
#compute the number of supporters for all candidates
counts = [len([v for v in V if c in V[v]]) for c in range(m)]
return max(counts)
#returns most popular candidate
def mostPopular(V,m):
#compute the number of supporters for all candidates
counts = [len([v for v in V if c in V[v]]) for c in range(m)]
return counts.index(max(counts))
#returns most popular candidate. input is not an integer but a list (i.e. we do not assume the set of candidates to be range(1,m))
def mostPopular2(V,candidates):
#compute the number of supporters for all candidates
counts = {c:len([v for v in V if c in V[v]]) for c in candidates}
return max(counts, key=counts.get)
#Function checks for JR
def isJR(V,W,m,k,n):
#remove all voters which already approve one candidate in W
V_new = dict((v,V[v]) for v in V if set(V[v]).intersection(set(W)) == set())
if len(V_new) == 0:
return True
if maxSupport(V_new,m) >= float(n)/float(k):
return False
return True
#runs GreedyCC until JR is satisfied
def GreedyCC(V,m,k):
W = []
n = len(V)
while(isJR(V,W,m,k,n)==False):
c = mostPopular(V,m)
W = W + [c]
#remove all candidates that approve c
V = dict((v,V[v]) for v in V if c not in V[v])
return W
#runs greedyCC until committee contains k candidates
#if all candidates are easily covered fills up with arbitrary candidates
def GreedyCCFull(V,m,k):
W = []
n = len(V)
while(len(W) < k and len(V)>0):
c = mostPopular(V,m)
W = W + [c]
#remove all candidates that approve c
V = dict((v,V[v]) for v in V if c not in V[v])
if(len(V)==0 and len(W)<k):
while(len(W)<k):
print("all voters covered, GreedyCC is now filling up with arbirtrary candidates")
W = W + list([[c for c in range(m) if c not in W][0]])
return W
#greedy algorithm as described in experimental section of the paper
#max_coverage_app is a greedy estimate of maximum coverage
def egalitarianGreedy(V,m,k,max_coverage_app,max_approval):
W = GreedyCC(V,m,k)
C_rest = [item for item in range(m) if item not in W]
#check which approximation is currently worse
app_score = approvalScore(V,W)
cov_score = coverageScore(V,W)
while(len(W)<k):
app_score = approvalScore(V,W)
cov_score = coverageScore(V,W)
cov_dict = {c:(len([v for v in V if c in V[v] and not bool(set(W)&set(V[v]))]),
len([v for v in V if c in V[v]])) for c in C_rest}
if (float(app_score)/float(max_approval) >= float(cov_score)/float(max_coverage_app)):
#maximize for coverage
c = sorted(cov_dict.items(),key=lambda t: (t[1][0],t[1][1]),reverse=True)[0][0]
else:
#maximize for approval
c = sorted(cov_dict.items(),key=lambda t: (t[1][1],t[1][0]),reverse=True)[0][0]
W = W + [c]
C_rest = [item for item in C_rest if item != c]
return W
def approvalScore(V,W):
return sum([len(set(V[v]).intersection(set(W))) for v in V])
def coverageScore(V,W):
return len([v for v in V if len(set(V[v]).intersection(set(W))) != 0])
####################################################
################# START EXPERIMENTS ################
####################################################
#Goal: Show Trade-off between Social Welfare and Coverage
#Step0 - initiate parameter
m = 100
n = 100
k = 10
model_list = [('IC',0.1),('1D',0.054),('2D',0.195)]
#set to false if data is already available and only plots should be created
create_data = True
if create_data:
random.seed(200)
for model_tuple in model_list:
model = model_tuple[0]
p = model_tuple[1]
#contains data which is relevant for paretocurve 1 (cov on x-axis)
exp1_data = | pd.DataFrame(columns=['cov','z','sw','JR'],dtype=float) | pandas.DataFrame |
#mport nibabel as nb
import numpy as np
import pandas as pd
from statsmodels.stats.outliers_influence import variance_inflation_factor
import os
from decouple import config
data = config('data')
os.chdir(data)
print(data)
img = nb.load('eres.mgh')
df = pd.read_csv('behavioural_results.csv')
data = img.get_fdata()
voxel_data = np.array(data[10000].flatten(),dtype=float)
voxel_df = pd.DataFrame(voxel_data)
for covariate in ['BMI_baseline', 'Age']:
model = pd.concat([df[['G-Number','age_adjusted_group', covariate]],voxel_df],axis=1).rename(columns={0:'voxel'})
groups = pd.get_dummies(model['age_adjusted_group'])
model = | pd.concat([model,groups], axis=1) | pandas.concat |
"""
This module contains all classes and functions necessary to work with
the ALEPH-2 output file.
"""
__author__ = "ALEPH development team"
__all__ = [
"OutputFile",
"read_output",
]
import re
import io
import logging
import itertools
from collections import namedtuple
import numpy as np
import pandas as pd
from sandy.utils import grouper
summary_header = "\n".join((
"\*" * 80,
"\* ALEPH problem summary \*",
"\*" * 80,
))
table_header = "\n".join((
"\*" * 80,
"\*\-\-\- Table (?P<table_number>[0-9\s]{2}) \-\-\- (?P<table_name>.*?)\*",
"\*" * 80,
))
material_header = "\*\*\*\*\* Material (?P<material_number>[0-9\s]{7})\n"
table_footer = "^\s+Total"
PATTERN_TIMEKEFF = re.compile("\s+Global neutronics parameters\n\s+\-+\n\s+Time\s+\(days\)\s+(?P<data>.*?)\n")
PATTERN_BURNUP = re.compile("^\sFuel burnup \(MWd/kg HM\)\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_KEFF = re.compile("^\s+Keff eff. mult. factor\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_DKEFF = re.compile("^\s+Relative std. deviation\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_BEGIN = re.compile("\s+Irradiated materials\n\s+\-{20}")
PATTERN_END = re.compile("\s+\*{28}\n\s+\* Total over all materials \*\n\s+\*{28}")
PATTERN_MATERIAL = re.compile("^\*{5}\s+Material\s+(?P<mat>.*?)$", flags=re.MULTILINE)
PATTERN_CELLS = re.compile("\n\n\s+Cells\s+=\s+(?P<data>(?:[0-9, ]+\n)+)\n")
PATTERN_VOLUME = re.compile("^\s+Volumes \(cm3\)\s+=\s+(?P<data>.*?) $", flags=re.MULTILINE)
PATTERN_TIME = re.compile("^\s+Time\s+\(days\)\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_WDENSITY = re.compile("^\s+Density\s+\(g/cm3\)\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_ADENSITY = re.compile("^\s+Density\s+\(at/\(b\*cm\)\)\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_TEMPERATURE = re.compile("^\s+Temperature\s+\(K\)\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_SOURCE = re.compile("^\s+Source strength\s+\(part/s\)\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_NFLUX = re.compile("^\s+Neutron flux\s+\(n/\(cm2\*s\)\)\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_POWER = re.compile("^\s+Thermal power\s+\(MW\)\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_BURNUP = re.compile("^\s+Fuel burnup\s+\(MWd/kg HM\)\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_TOTBURNUP = re.compile("^\s+Total burnup \(MWd/kg HM\):\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_FISSIONS = re.compile("^\s+Integral number of fissions\s+(?P<data>.*?)$", flags=re.MULTILINE)
PATTERN_TOTFISSIONS = re.compile("^\s+Total number of fissions:\s+(?P<data>.*?)$", flags=re.MULTILINE)
MATERIAL_KEYS = [
"ID",
"cells",
"volume",
"time",
"weight_density",
"atomic_density",
"temperature",
"source_strenght",
"neutron_flux",
"power",
]
FISS_MATERIAL_KEYS = MATERIAL_KEYS + [
"burnup",
"cumulative_burnup",
"total_burnup",
"fissions",
"total_fissions",
]
Mat = namedtuple("Material", MATERIAL_KEYS)
FissMat = namedtuple("FissileMaterial", FISS_MATERIAL_KEYS)
class OutputFile():
"""
Class dedicated to store the content of the `'output'` ile produced by
in any succesfull ALEPH-2 run.
Attributes
----------
data : `dict`
dictionary of ALEPH-2 output sections
burnup : `pandas.Series`
series with time-dependent burnup
cumulative_burnup : `pandas.Sereis`
series with time-dependent cumulative burnup
keff : `pandas. DataFrame`
dataframe with time-dependent keff and associated statistical
error
summary : `str`
summary of the ALEPH-2 output (first output section before the tables)
text : `str`
full ASCII text of ALEPH-2 output
Methods
-------
from_file
initialize object reading ALEPH-2 output from file
from_string
initialize object reading ALEPH-2 output from string
get_burnup
get array of burnup values
get_keff
get array of keff values
get_keff_staterr
get array of keff statistical errors
get_table
for a specified table number, return a dictionary of tables
indexed by material number
get_time
get array of irradiation/decay time steps (in days)
parse_tables
"""
def __init__(self, text):
self.text = text
def parse_output(self, table_index="ZAM"):
self.data = parse_output_for_tables(self.text, index=table_index)
@property
def summary(self):
"""
Returns
-------
`str`
summary of the ALEPH-2 output (first output section before the
tables).
"""
return self.data[0]
def get_burnup(self):
"""
Returns
-------
`pandas.Series`
series with time-dependent burnup.
"""
mats = self.get_materials()
bu = pd.DataFrame({
m: mat.burnup for m, mat in mats.items()
if hasattr(mat, "burnup")
})
days = self.get_time()
bu.index = pd.Index(days, name="days")
bu.columns.name = "material"
return bu
def get_cumulative_burnup(self):
"""
Returns
-------
`pandas.Series`
series with time-dependent cumulative burnup.
"""
mats = self.get_materials()
bu = pd.DataFrame({
m: mat.cumulative_burnup for m, mat in mats.items()
if hasattr(mat, "cumulative_burnup")
})
days = self.get_time()
bu.index = | pd.Index(days, name="days") | pandas.Index |
"""Dataloader classes and utilities.
"""
import os
import random
import torch
from torch.utils.data import SubsetRandomSampler, DataLoader, Subset
from sklearn.model_selection import train_test_split as split
import en_core_web_sm
from spacy.lang.en.stop_words import STOP_WORDS
from datasets import load_dataset
from transformers import AutoTokenizer
from active_learning.model_classes import load_embedding_model
# from model_classes import load_embedding_model
def init_data(dataset_config: dict):
"""Download (or load from disk) and apply dataset specific preprocessing.
Params:
- dataset_config (dict): dataset config dict
Returns:
- train (Dataset): Full training dataset
- dev (Dataset):
- test (Dataset):
- num_classes (int): number of classes
- labelled_pool (List(int)): indices of seed labelled datapoints in train
- unlabelled_pool (List(int)): unlabelled indices of train
"""
# train and dev will be in random order, test may be ordered according to labels
if dataset_config["name"] == "CoLA":
train, dev, test, num_classes = load_cola(dataset_config)
elif dataset_config["name"] == "AGNews":
train, dev, test, num_classes = load_ag_news(dataset_config)
elif dataset_config["name"] == "DBPedia":
train, dev, test, num_classes = load_dbpedia(dataset_config)
elif dataset_config["name"] == "YRF":
train, dev, test, num_classes = load_yrf(dataset_config)
else:
raise NameError(f"Dataset {dataset_config['name']} not implemented.")
# etc.
# shrink size if debugging
if dataset_config["debug"]:
# choose a random subset using huggingface select function
train = train.select(random.sample(range(len(train)), k=200))
dev = dev.select(random.sample(range(len(dev)), k=40))
test = test.select(random.sample(range(len(test)), k=200))
# create class imbalance
random.seed(dataset_config["seed"])
if dataset_config["pool_balance"] == "balanced":
pass
elif dataset_config["pool_balance"] == "imbalanced":
train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config["imbalance_prop"], dataset_config['imbalance_cls']))
else:
NameError(f"pool_balance = {dataset_config['pool_balance']} not allowed")
if dataset_config["dev_balance"] == "balanced":
pass
elif dataset_config["dev_balance"] == "imbalanced":
dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config["imbalance_prop"], dataset_config['imbalance_cls']))
else:
NameError(f"dev_balance = {dataset_config['dev_balance']} not allowed")
# get seed labelled pool indices (using the same seed data every time)
random.seed(dataset_config["seed"])
if dataset_config["seed_balance"] == "balanced":
# this is random (will have some variance vs pool)
indices = list(range(len(train)))
unlabelled_pool_idx, labelled_pool_idx = split(
indices,
random_state=dataset_config["seed"],
test_size=dataset_config["seed_size"]
)
elif dataset_config["seed_balance"] == "stratified":
# this is the same as the underlying train set (which may be unbalanced)
indices = list(range(len(train)))
unlabelled_pool_idx, labelled_pool_idx = split(
indices,
random_state=dataset_config["seed"],
test_size=dataset_config["seed_size"],
stratify=train['label']
)
elif dataset_config["seed_balance"] == "imbalanced":
# artificially sample an imbalanced seed set from the pool
unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(
train,
num_classes,
dataset_config["seed_size"],
dataset_config['imbalance_prop'],
dataset_config['imbalance_cls']
)
else:
raise NameError(f"seed_balance = {dataset_config['seed_balance']} not allowed")
return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx
def create_imbalanced_seed(data, num_classes, seed_size, prop, label):
"""Artificially make an imbalanced seed set.
Chooses examples from the pool at random and adds them to the list of seed examples until the desired proportion is reached.
Params:
- data (dataset): train pool dataset (Huggingface)
- num_classes (int): number of classes
- seed_size (int): number of examples to include in seed
- prop (float): proportion of examples of imbalanced label to include (weight other classes as 1, weight label as prop)
- label (int): imbalanced label
Returns:
- unlabelled_pool_idx (list(int)): list of unlabelled datapoint indices in train
- labelled_pool_idx (list(int)): list of labelled datapoint indices in train
"""
labelled_pool_idx = []
unlabelled_pool_idx = [i for i in range(len(data))]
label_weights = [1 if x != label else prop for x in range(num_classes)]
total_weight = sum(label_weights)
# these are the number of labelled examples of each class we would like to include.
# these are floats as we can exceed some classes by 1 to get desired seed size
desired_seed_label_count = [x*seed_size/total_weight for x in label_weights]
# TODO change counts to defaultdicts to avoid key errors
current_seed_label_count = [0 for _ in range(num_classes)]
while len(labelled_pool_idx) < seed_size:
sample_idx = random.choice(unlabelled_pool_idx)
example = data[sample_idx]
if current_seed_label_count[example['label']] < desired_seed_label_count[example['label']]:
# add to labelled pool
labelled_pool_idx.append(sample_idx)
current_seed_label_count[example['label']] += 1
# remove from unlabelled pool. TODO more efficient?
unlabelled_pool_idx = [i for i in range(len(data)) if i not in labelled_pool_idx]
return unlabelled_pool_idx, labelled_pool_idx
def create_imbalanced_dataset(example: dict, prop: float, label: int):
"""Filtering function to randomly remove some examples
of a particular class from the dataset.
Params:
- example (dict): A document in the train pool
- prop (float): proportion of the examples of label cls to keep
- label (int): class to subsample
Returns:
- keep (bool): whether or not to keep this example
"""
if example["label"] == label:
return True if random.random() < prop else False
else:
return True
def load_yrf(dataset_config: dict):
train_and_dev = load_dataset('yelp_review_full', split='train')
train_and_dev = train_and_dev.train_test_split(test_size=dataset_config['val_prop'], seed=dataset_config["seed"])
train = train_and_dev['train']
dev = train_and_dev['test']
test = load_dataset('yelp_review_full', split='test')
# change to same columns and preprocess
train = train.map(lambda examples: {'text': preprocess_text(examples['text'])})
dev = dev.map(lambda examples: {'text': preprocess_text(examples['text'])})
test = test.map(lambda examples: {'text': preprocess_text(examples['text'])})
num_classes = test.features['label'].num_classes
return train, dev, test, num_classes
def load_ag_news(dataset_config: dict):
train_and_dev = load_dataset('ag_news', split='train')
train_and_dev = train_and_dev.train_test_split(test_size=dataset_config['val_prop'], seed=dataset_config["seed"])
train = train_and_dev['train']
dev = train_and_dev['test']
test = load_dataset('ag_news', split='test')
# change to same columns and preprocess
train = train.map(lambda examples: {'text': preprocess_text(examples['text'])})
dev = dev.map(lambda examples: {'text': preprocess_text(examples['text'])})
test = test.map(lambda examples: {'text': preprocess_text(examples['text'])})
num_classes = test.features['label'].num_classes
return train, dev, test, num_classes
def load_dbpedia(dataset_config: dict):
train_and_dev = load_dataset('dbpedia_14', split='train')
train_and_dev = train_and_dev.train_test_split(test_size=dataset_config['val_prop'], seed=dataset_config["seed"])
train = train_and_dev['train']
dev = train_and_dev['test']
test = load_dataset('dbpedia_14', split='test')
# change to same columns and preprocess
train = train.map(lambda examples: {'text': preprocess_text(examples['content'])}, remove_columns=['content'])
dev = dev.map(lambda examples: {'text': preprocess_text(examples['content'])}, remove_columns=['content'])
test = test.map(lambda examples: {'text': preprocess_text(examples['content'])}, remove_columns=['content'])
num_classes = test.features['label'].num_classes
return train, dev, test, num_classes
def load_cola(dataset_config: dict):
# TODO all the test data labels are -1 for some reason?? (should be 0 or 1)
train_and_dev = load_dataset('glue', 'cola', split='train')
train_and_dev = train_and_dev.train_test_split(test_size=dataset_config['val_prop'], seed=dataset_config["seed"])
train = train_and_dev['train']
dev = train_and_dev['test']
test = load_dataset('glue', 'cola', split='test')
# change to same columns and preprocess
train = train.map(lambda examples: {'text': preprocess_text(examples['sentence'])}, remove_columns=['sentence'])
dev = dev.map(lambda examples: {'text': preprocess_text(examples['sentence'])}, remove_columns=['sentence'])
test = test.map(lambda examples: {'text': preprocess_text(examples['sentence'])}, remove_columns=['sentence'])
num_classes = test.features['label'].num_classes
return train, dev, test, num_classes
def preprocess_text(text: str):
"""Preprocessing function for strings.
Call in dataset mapping.
"""
# remove trailing/leading whitespace
text = text.strip()
# .lower() depends on model so doing this in collate function
# TODO other preprocessing - punctuation/ascii etc.
text = text.replace("\\n", " ")
# text = text.replace("\\'", "\'")
# text = text.replace('\\"', "\'")
text = text.encode('ascii', 'ignore').decode()
return text
class Collate:
"""Collate function class for dataloaders. Tokenizes data appropriately for the model.
TODO might be a better place to do this?
"""
def __init__(self, model_config: dict):
self.model_type = model_config["model_type"]
if self.model_type == "BERT":
self.tokenizer = AutoTokenizer.from_pretrained(model_config["model_hypers"]["architecture"]["pretrained_model"])
elif self.model_type in ["RNN", "RNN-hid", "logistic", "MLP"]:
self.max_length = model_config["model_hypers"]["architecture"]["max_length"]
word_model = load_embedding_model(model_config["model_hypers"]["architecture"]["pretrained_emb"])
self.dictionary = {k: v+1 for (k, v) in word_model.key_to_index.items()}
self.dictionary["<PAD>"] = 0 # add pad token
self.oov_id = len(self.dictionary)
self.dictionary["<OOV>"] = self.oov_id # add OOV token
# Create a Tokenizer with spacy
nlp = en_core_web_sm.load()
self.tokenizer = nlp.tokenizer
else:
# tokenize in other ways for other models
raise NameError(f"model type: {self.model_type} not allowed")
def __call__(self, batch):
text = [x['text'] for x in batch]
labels = torch.tensor([x['label'] for x in batch])
# tokenize
if self.model_type == "BERT":
inputs = self.tokenizer(
text, return_token_type_ids=False,
return_tensors="pt", padding=True, truncation=True
)
elif self.model_type in ["RNN", "RNN-hid", "logistic", "MLP"]:
# find max length sequence in batch
lengths = [len(doc) for doc in self.tokenizer.pipe(text, batch_size=len(batch))]
max_length = max(lengths)
# truncate if too long
max_length = min([max_length, self.max_length])
# TODO get rid of excess padding after stopword removal
encoded_text = torch.zeros((len(batch), max_length), dtype=torch.int64)
# tokenise and encode each document. TODO can we batch this better?
for i, tokenized_doc in enumerate(self.tokenizer.pipe(text, batch_size=len(batch))):
# remove stop words and punctuation
if self.model_type != "RNN":
doc = [word.text.lower() for word in tokenized_doc if (word.text.lower() not in STOP_WORDS) and (word.text.isalpha())]
else:
# keep them for RNN
doc = [word.text.lower() for word in tokenized_doc]
length = len(doc)
# pad
if length < max_length:
padded = doc + ["<PAD>" for _ in range(max_length - length)]
else:
padded = doc[:max_length]
# TODO could do this in one step (how much time does this save?)
int_encoded = [self.dictionary[word]
if word in self.dictionary
else self.oov_id
for word in padded]
encoded_text[i, :] = torch.tensor(int_encoded)
# return a dict for inputs to match BERT style {"input_ids": [101, 2, ...]}
inputs = {"input_ids": encoded_text}
else:
raise NameError(f"model {self.model_type} not defined")
return inputs, labels
if __name__ == "__main__":
# check data loading correctly
# train, dev, test, num_classes = load_cola({"seed": 123, "val_prop": 0.2})
# train, dev, test, num_classes = load_ag_news({"seed": 123, "val_prop": 0.2})
train, dev, test, num_classes = load_yrf({"seed": 123, "val_prop": 0.2})
# train, dev, test, num_classes = load_dbpedia({"seed": 123, "val_prop": 0.2})
print("len train/dev/test/classes", len(train), len(dev), len(test), num_classes)
indices = list(range(len(train)))
unlabelled_pool, labelled_pool = split(indices, random_state=123, test_size=50)
print(labelled_pool[:5], len(labelled_pool))
print(train[labelled_pool[:5]])
# get stats for this batch and full pool
import pandas as pd
size_of_batch = 50
# this batch
df_full_labelled = | pd.DataFrame(train[labelled_pool]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A template main script.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright IBM Corp. 2020, 2021"
# set luigi_config_path BEFORE importing luigi
import os
from pathlib import Path
import sys
from luigine.abc import (AutoNamingTask,
main,
MultipleRunBase,
LinePlotMultipleRun)
from copy import deepcopy
from datetime import datetime
from time import time
import glob
import logging
from luigi.util import requires
import luigi
import numpy as np
import pandas as pd
import torch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from diffsnn.pp.poisson import MultivariatePoissonProcess
from diffsnn.pp.snn import FullyObsSigmoidSNN, FullyObsHardtanhSNN
from diffsnn.popp.snn import HardtanhPOSNN, SigmoidPOSNN
from diffsnn.diffpopp.snn import (HardtanhDiffSNN,
SigmoidDiffSNN,
HeuristicHardtanhDiffSNN,
HeuristicSigmoidDiffSNN)
from diffsnn.data import delete_hidden
try:
working_dir = Path(sys.argv[1:][sys.argv[1:].index("--working-dir")
+ 1]).resolve()
except ValueError:
raise ValueError("--working-dir option must be specified.")
# load parameters from `INPUT/param.py`
sys.path.append(str((working_dir / 'INPUT').resolve()))
from param import (DataGeneration_params,
Train_params,
PerformanceEvaluation_params,
EvaluateGradientVariance_params,
PlotTrainTime_params,
PlotTrainTimeMultipleRun_params,
PlotTestLoss_params,
PlotTestLossMultipleRun_params)
logger = logging.getLogger('luigi-interface')
AutoNamingTask._working_dir = working_dir
AutoNamingTask.working_dir = luigi.Parameter(default=str(working_dir))
# ----------- preamble ------------
model_dict = {'MultivariatePoissonProcess': MultivariatePoissonProcess,
'FullyObsSigmoidSNN': FullyObsSigmoidSNN,
'FullyObsHardtanhSNN': FullyObsHardtanhSNN,
'HardtanhPOSNN': HardtanhPOSNN,
'HardtanhDiffSNN': HardtanhDiffSNN,
'HeuristicHardtanhDiffSNN': HeuristicHardtanhDiffSNN,
'SigmoidPOSNN': SigmoidPOSNN,
'SigmoidDiffSNN': SigmoidDiffSNN,
'HeuristicSigmoidDiffSNN': HeuristicSigmoidDiffSNN}
def dict_param2dict(dict_param, prefix=''):
output_dict = {}
for each_key in dict_param:
if isinstance(
dict_param[each_key],
(dict, luigi.freezing.FrozenOrderedDict)):
dict_repl = dict_param2dict(
dict_param[each_key],
prefix=prefix + '_' + each_key if prefix != '' else each_key)
output_dict.update(dict_repl)
elif isinstance(dict_param[each_key], (list, tuple)):
pass
else:
output_dict[prefix + '_' + each_key] = dict_param[each_key]
return output_dict
def get_initial_model(params, train_seed):
model = model_dict[params['model_name']](
seed=train_seed,
**params['model_kwargs'])
model.randomize_params(params['randomize_kernel_weight']['high'],
params['randomize_kernel_weight']['low'],
except_for=['bias'])
model.randomize_params(params['randomize_bias']['high'],
params['randomize_bias']['low'],
except_for=['kernel_weight'])
model.randomize_diagonal(params['randomize_diagonal']['high'],
params['randomize_diagonal']['low'])
return model
# Define tasks
class DataGeneration(AutoNamingTask):
DataGeneration_params = luigi.DictParameter()
data_seed = luigi.IntParameter()
def requires(self):
return []
def run_task(self, input_list):
torch.manual_seed(self.data_seed)
if self.DataGeneration_params.get('init', 'non-random') == 'random':
gen_model = get_initial_model(
self.DataGeneration_params,
self.data_seed)
elif self.DataGeneration_params.get('init', 'non-random') == 'non-random':
gen_model = model_dict[self.DataGeneration_params['model_name']](
seed=self.data_seed,
**self.DataGeneration_params['model_kwargs'])
gen_model.params['bias'].data \
= torch.tensor(self.DataGeneration_params['model_params']['bias'])
gen_model.params['kernel_weight'].data \
= torch.tensor(self.DataGeneration_params['model_params']['kernel_weight'])
else:
raise ValueError('initialization can be either random or non-random')
train_history_list = gen_model.simulate(self.DataGeneration_params['train_sample_size'],
[0, self.DataGeneration_params['length']])
train_po_history_list = [delete_hidden(
each_history,
self.DataGeneration_params['model_kwargs']['n_obs_neurons']) \
for each_history in train_history_list]
test_history_list = gen_model.simulate(self.DataGeneration_params['test_sample_size'],
[0, self.DataGeneration_params['length']])
test_po_history_list = [delete_hidden(
each_history,
self.DataGeneration_params['model_kwargs']['n_obs_neurons']) \
for each_history in test_history_list]
gen_model.base_pp = None
return [train_history_list,
train_po_history_list,
test_history_list,
test_po_history_list,
gen_model.state_dict()]
@requires(DataGeneration)
class Train(AutoNamingTask):
output_ext = luigi.Parameter(default='pth')
Train_params = luigi.DictParameter()
train_seed = luigi.IntParameter()
def run_task(self, input_list):
torch.manual_seed(self.train_seed)
_, train_po_history_list, _, _, _ = input_list[0]
train_model = get_initial_model(self.Train_params,
self.train_seed)
var_model = get_initial_model(self.Train_params,
self.train_seed+1)
train_model.fit(train_po_history_list,
variational_dist=var_model,
use_variational=self.Train_params['use_variational'],
n_epochs=self.Train_params['n_epochs'],
optimizer_kwargs={'lr': self.Train_params['lr']},
obj_func_kwargs=self.Train_params['obj_func_kwargs'],
logger=logger.info,
print_freq=max(self.Train_params['n_epochs'] // 10, 1),
**self.Train_params.get('fit_kwargs', {}))
return train_model, var_model
def save_output(self, res):
train_model, var_model = res
var_model.base_pp = None
torch.save(var_model.state_dict(),
self.output().path.replace('.pth', '_var.pth'))
train_model.base_pp = None
torch.save(train_model.state_dict(),
self.output().path)
def load_output(self):
state_dict = torch.load(self.output().path)
train_model = model_dict[self.Train_params['model_name']](
**self.Train_params['model_kwargs'])
train_model.load_state_dict(state_dict)
state_dict = torch.load(self.output().path.replace('.pth', '_var.pth'))
var_model = model_dict[self.Train_params['model_name']](
**self.Train_params['model_kwargs'])
var_model.load_state_dict(state_dict)
return train_model, var_model
@requires(DataGeneration)
class CalculateTrainTime(AutoNamingTask):
Train_params = luigi.DictParameter()
train_seed = luigi.IntParameter()
def run_task(self, input_list):
torch.manual_seed(self.train_seed)
_, train_po_history_list, _, _, _ = input_list[0]
train_model = get_initial_model(self.Train_params,
self.train_seed)
var_model = get_initial_model(self.Train_params,
self.train_seed+1)
start_time = time()
train_model.fit(train_po_history_list,
variational_dist=var_model,
use_variational=self.Train_params['use_variational'],
n_epochs=self.Train_params['n_epochs'],
optimizer_kwargs={'lr': self.Train_params['lr']},
obj_func_kwargs=self.Train_params['obj_func_kwargs'],
logger=logger.info,
print_freq=max(self.Train_params['n_epochs'] // 10, 1),
**self.Train_params.get('fit_kwargs', {}))
end_time = time()
return (end_time - start_time) / self.Train_params['n_epochs']
class CollectTrainTime(MultipleRunBase):
MultipleRun_params = luigi.DictParameter()
score_name = luigi.Parameter(default='Computation time')
def obj_task(self, **kwargs):
return CalculateTrainTime(**kwargs)
class CollectTestLoss(MultipleRunBase):
MultipleRun_params = luigi.DictParameter()
score_name = luigi.Parameter(default='Test loss')
def obj_task(self, **kwargs):
return PerformanceEvaluation(**kwargs)
class PerformanceEvaluation(AutoNamingTask):
DataGeneration_params = luigi.DictParameter(
default=DataGeneration_params)
Train_params = luigi.DictParameter(
default=Train_params)
seed = luigi.IntParameter()
n_trials = luigi.IntParameter()
#use_mlflow = luigi.BoolParameter(default=False)
PerformanceEvaluation_params = luigi.DictParameter(
default=PerformanceEvaluation_params)
def requires(self):
np.random.seed(self.seed)
data_seed_list = np.random.randint(4294967295, size=self.n_trials)
train_seed_list = np.random.randint(4294967295, size=self.n_trials)
eval_seed_list = np.random.randint(4294967295, size=self.n_trials)
logger.info(' * data seed: {}'.format(data_seed_list))
logger.info(' * train seed: {}'.format(train_seed_list))
logger.info(' * eval seed: {}'.format(eval_seed_list))
return [SinglePerformanceEvaluation(
DataGeneration_params=self.DataGeneration_params,
Train_params=self.Train_params,
PerformanceEvaluation_params=self.PerformanceEvaluation_params,
data_seed=data_seed_list[each_idx],
train_seed=train_seed_list[each_idx],
eval_seed=eval_seed_list[each_idx],
use_mlflow=self.use_mlflow)
for each_idx in range(self.n_trials)]
def run_task(self, input_list):
res_df = | pd.DataFrame(input_list) | pandas.DataFrame |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.common import is_scalar
from pandas.core.common import _values_from_object, _maybe_match_name
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, _ensure_index, InvalidIndexError
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.internals import SingleBlockManager
from pandas.core import generic
import pandas.core.common as com
import pandas.core.ops as ops
import pandas._libs.index as _index
from pandas.util._decorators import Appender
from pandas.core.sparse.array import (
make_sparse, _sparse_array_op, SparseArray,
_make_index)
from pandas._libs.sparse import BlockIndex, IntIndex
import pandas._libs.sparse as splib
from pandas.core.sparse.scipy_sparse import (
_sparse_series_to_coo,
_coo_to_sparse_series)
_shared_doc_kwargs = dict(axes='index', klass='SparseSeries',
axes_single_arg="{0, 'index'}",
optional_labels='', optional_axis='')
# -----------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are
present for compatibility.
"""
def wrapper(self, other):
if isinstance(other, Series):
if not isinstance(other, SparseSeries):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_series_op(self, other, op, name)
elif isinstance(other, DataFrame):
return NotImplemented
elif is_scalar(other):
with np.errstate(all='ignore'):
new_values = op(self.values, other)
return self._constructor(new_values,
index=self.index,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with {other} not supported'
.format(other=type(other)))
wrapper.__name__ = name
if name.startswith("__"):
# strip special method names, e.g. `__add__` needs to be `add` when
# passed to _sparse_series_op
name = name[2:-2]
return wrapper
def _sparse_series_op(left, right, op, name):
left, right = left.align(right, join='outer', copy=False)
new_index = left.index
new_name = _maybe_match_name(left, right)
result = _sparse_array_op(left.values, right.values, op, name,
series=True)
return left._constructor(result, index=new_index, name=new_name)
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseSeries objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
_subtyp = 'sparse_series'
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, copy=False,
fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
if isinstance(data, SparseArray):
if index is not None:
assert (len(index) == len(data))
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
if fill_value is None:
fill_value = data.fill_value
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
data = Series(data, index=index)
index = data.index.view()
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
else:
assert (len(data) == sparse_index.npoints)
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isna(data) and isna(fill_value)):
if kind == 'block':
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == 'block':
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.length)
index = _ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype,
copy=copy)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
@property
def values(self):
""" return the array """
return self.block.values
def __array__(self, result=None):
""" the array interface, return my values """
return self.block.values
def get_values(self):
""" same as values """
return self.block.to_dense().view()
@property
def block(self):
return self._data._block
@property
def fill_value(self):
return self.block.fill_value
@fill_value.setter
def fill_value(self, v):
self.block.fill_value = v
@property
def sp_index(self):
return self.block.sp_index
@property
def sp_values(self):
return self.values.sp_values
@property
def npoints(self):
return self.sp_index.npoints
@classmethod
def from_array(cls, arr, index=None, name=None, copy=False,
fill_value=None, fastpath=False):
"""
DEPRECATED: use the pd.SparseSeries(..) constructor instead.
"""
warnings.warn("'from_array' is deprecated and will be removed in a "
"future version. Please use the pd.SparseSeries(..) "
"constructor instead.", FutureWarning, stacklevel=2)
return cls._from_array(arr, index=index, name=name, copy=copy,
fill_value=fill_value, fastpath=fastpath)
@classmethod
def _from_array(cls, arr, index=None, name=None, copy=False,
fill_value=None, fastpath=False):
return cls(arr, index=index, name=name, copy=copy,
fill_value=fill_value, fastpath=fastpath)
@property
def _constructor(self):
return SparseSeries
@property
def _constructor_expanddim(self):
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def as_sparse_array(self, kind=None, fill_value=None, copy=False):
""" return my self as a sparse array, do not copy by default """
if fill_value is None:
fill_value = self.fill_value
if kind is None:
kind = self.kind
return SparseArray(self.values, sparse_index=self.sp_index,
fill_value=fill_value, kind=kind, copy=copy)
def __len__(self):
return len(self.block)
@property
def shape(self):
return self._data.shape
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
series_rep = Series.__unicode__(self)
rep = '{series}\n{index!r}'.format(series=series_rep,
index=self.sp_index)
return rep
def __array_wrap__(self, result, context=None):
"""
Gets called prior to a ufunc (and after)
See SparseArray.__array_wrap__ for detail.
"""
if isinstance(context, tuple) and len(context) == 3:
ufunc, args, domain = context
args = [getattr(a, 'fill_value', a) for a in args]
with np.errstate(all='ignore'):
fill_value = ufunc(self.fill_value, *args[1:])
else:
fill_value = self.fill_value
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=fill_value,
copy=False).__finalize__(self)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.name = getattr(obj, 'name', None)
self.fill_value = getattr(obj, 'fill_value', None)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform a reduction operation """
return op(self.get_values(), skipna=skipna, **kwds)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
fill_value=self.fill_value, name=self.name)
def _unpickle_series_compat(self, state):
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
index, fill_value, sp_index = own_state[:3]
name = None
if len(own_state) > 3:
name = own_state[3]
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sp_index,
fill_value=fill_value, copy=False)
# recreate
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data)
self._set_axis(0, index)
self.name = name
def __iter__(self):
""" forward to the array """
return iter(self.values)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'sparse_time_series')
else:
object.__setattr__(self, '_subtyp', 'sparse_series')
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis)
else:
return self._get_val_at(i)
def _get_val_at(self, loc):
""" forward to the array """
return self.block.values._get_val_at(loc)
def __getitem__(self, key):
try:
return self.index.get_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
elif key is Ellipsis:
return self
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item, must be array-like?
pass
key = _values_from_object(key)
if self.index.nlevels > 1 and isinstance(key, tuple):
# to handle MultiIndex labels
key = self.index.get_loc(key)
return self._constructor(self.values[key],
index=self.index[key]).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self[indexer]
def _set_with_engine(self, key, value):
return self._set_value(key, value)
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are all numeric
Returns
-------
abs: type of caller
"""
return self._constructor(np.abs(self.values),
index=self.index).__finalize__(self)
def get(self, label, default=None):
"""
Returns value occupying requested label, default to specified
missing value if not present. Analogous to dict.get
Parameters
----------
label : object
Label value looking for
default : object, optional
Value to return if label not in index
Returns
-------
y : scalar
"""
if label in self.index:
loc = self.index.get_loc(label)
return self._get_val_at(loc)
else:
return default
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(label, takeable=takeable)
def _get_value(self, label, takeable=False):
loc = label if takeable is True else self.index.get_loc(label)
return self._get_val_at(loc)
_get_value.__doc__ = get_value.__doc__
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Series
Returns
-------
series : SparseSeries
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(label, value, takeable=takeable)
def _set_value(self, label, value, takeable=False):
values = self.to_dense()
# if the label doesn't exist, we will create a new object here
# and possibily change the index
new_values = values._set_value(label, value, takeable=takeable)
if new_values is not None:
values = new_values
new_index = values.index
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, new_index)
self._index = new_index
_set_value.__doc__ = set_value.__doc__
def _set_values(self, key, value):
# this might be inefficient as we have to recreate the sparse array
# rather than setting individual elements, but have to convert
# the passed slice/boolean that's in dense space into a sparse indexer
# not sure how to do that!
if isinstance(key, Series):
key = key.values
values = self.values.to_dense()
values[key] = _index.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
def to_dense(self, sparse_only=False):
"""
Convert SparseSeries to a Series.
Parameters
----------
sparse_only: bool, default False
DEPRECATED: this argument will be removed in a future version.
If True, return just the non-sparse values, or the dense version
of `self.values` if False.
Returns
-------
s : Series
"""
if sparse_only:
warnings.warn(("The 'sparse_only' parameter has been deprecated "
"and will be removed in a future version."),
FutureWarning, stacklevel=2)
int_index = self.sp_index.to_int_index()
index = self.index.take(int_index.indices)
return Series(self.sp_values, index=index, name=self.name)
else:
return Series(self.values.to_dense(), index=self.index,
name=self.name)
@property
def density(self):
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
new_data = self._data
if deep:
new_data = self._data.copy()
return self._constructor(new_data, sparse_index=self.sp_index,
fill_value=self.fill_value).__finalize__(self)
@Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, method=None, copy=True, limit=None,
**kwargs):
return super(SparseSeries, self).reindex(index=index, method=method,
copy=copy, limit=limit,
**kwargs)
def sparse_reindex(self, new_index):
"""
Conform sparse values to new SparseIndex
Parameters
----------
new_index : {BlockIndex, IntIndex}
Returns
-------
reindexed : SparseSeries
"""
if not isinstance(new_index, splib.SparseIndex):
raise TypeError('new index must be a SparseIndex')
block = self.block.sparse_reindex(new_index)
new_data = SingleBlockManager(block, self.index)
return self._constructor(new_data, index=self.index,
sparse_index=new_index,
fill_value=self.fill_value).__finalize__(self)
@Appender(generic._shared_docs['take'])
def take(self, indices, axis=0, convert=None, *args, **kwargs):
if convert is not None:
msg = ("The 'convert' parameter is deprecated "
"and will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
convert = True
nv.validate_take_with_convert(convert, args, kwargs)
new_values = SparseArray.take(self.values, indices)
new_index = self.index.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of non-NA/null values.
When performing the cumulative summation, any non-NA/null values will
be skipped. The resulting SparseSeries will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : {0}
Returns
-------
cumsum : SparseSeries
"""
nv.validate_cumsum(args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
new_array = self.values.cumsum()
return self._constructor(
new_array, index=self.index,
sparse_index=new_array.sp_index).__finalize__(self)
@Appender(generic._shared_docs['isna'])
def isna(self):
arr = SparseArray( | isna(self.values.sp_values) | pandas.core.dtypes.missing.isna |
""" Data classes for internal BIDS data hierarchy. """
from itertools import chain
from collections import namedtuple
import pandas as pd
from . import collections as clc
from bids.utils import matches_entities
class Node(object):
"""Base class for objects that represent a single object in the BIDS
hierarchy.
Parameters
----------
id : int or str
A value uniquely identifying this node. Typically the
entity value extracted from the filename via layout.
"""
def __init__(self, level, entities):
self.level = level.lower()
self.entities = entities
self.variables = {}
def add_variable(self, variable):
"""Adds a BIDSVariable to the current Node's list.
Parameters
----------
variable : BIDSVariable
The Variable to add to the list.
"""
self.variables[variable.name] = variable
class RunNode(Node):
"""Represents a single Run in a BIDS project.
Parameters
----------
id : int
The index of the run.
entities : dict
Dictionary of entities for this Node.
image_file : str
The full path to the corresponding nifti image.
duration : float
Duration of the run, in seconds.
repetition_time : float
TR for the run.
task : str
The task name for this run.
"""
def __init__(self, entities, image_file, duration, repetition_time, n_vols):
self.image_file = image_file
self.duration = duration
self.repetition_time = repetition_time
self.n_vols = n_vols
super(RunNode, self).__init__('run', entities)
def get_info(self):
# Note: do not remove the dict() call! self.entities is a SQLAlchemy
# association_proxy mapping, and without the conversion, the connection
# to the DB persists, causing problems on Python 3.5 if we try to clone
# a RunInfo or any containing object.
entities = dict(self.entities)
return RunInfo(entities, self.duration,
self.repetition_time, self.image_file, self.n_vols)
# Stores key information for each Run.
RunInfo_ = namedtuple('RunInfo', ['entities', 'duration', 'tr', 'image', 'n_vols'])
# Wrap with class to provide docstring
class RunInfo(RunInfo_):
""" A namedtuple storing run-related information.
Properties include 'entities', 'duration', 'tr', and 'image', 'n_vols'.
"""
pass
class NodeIndex(object):
"""Represents the top level in a BIDS hierarchy. """
def __init__(self):
super(NodeIndex, self).__init__()
self.index = pd.DataFrame()
self.nodes = []
def get_collections(self, unit, names=None, merge=False,
sampling_rate=None, **entities):
"""Retrieve variable data for a specified level in the Dataset.
Parameters
----------
unit : str
The unit of analysis to return variables for. Must be
one of 'run', 'session', 'subject', or 'dataset'.
names : list
Optional list of variables names to return. If
None, all available variables are returned.
merge : bool
If True, variables are merged across all observations
of the current unit. E.g., if unit='subject' and return_type=
'collection', variables from all subjects will be merged into a
single collection. If False, each observation is handled
separately, and the result is returned as a list.
sampling_rate : int or str
If unit='run', the sampling rate to
pass onto the returned BIDSRunVariableCollection.
entities : dict
Optional constraints used to limit what gets returned.
Returns
-------
A list of BIDSVariableCollections if merge=False; a single
BIDSVariableCollection if merge=True.
"""
nodes = self.get_nodes(unit, entities)
var_sets = []
for n in nodes:
var_set = list(n.variables.values())
var_set = [v for v in var_set if matches_entities(v, entities)]
if names is not None:
var_set = [v for v in var_set if v.name in names]
# Additional filtering on Variables past run level, because their
# contents are extracted from TSV files containing rows from
# multiple observations
if unit != 'run':
var_set = [v.filter(entities) for v in var_set]
var_sets.append(var_set)
if merge:
var_sets = [list(chain(*var_sets))]
results = []
for vs in var_sets:
if not vs:
continue
if unit == 'run':
vs = clc.BIDSRunVariableCollection(vs, sampling_rate)
else:
vs = clc.BIDSVariableCollection(vs)
results.append(vs)
if merge:
return results[0] if results else None
return results
def get_nodes(self, level=None, entities=None, strict=False):
"""Retrieves all nodes that match the specified criteria.
Parameters
----------
level : str
The level of analysis of nodes to return.
entities : dict
Entities to filter on. All nodes must have
matching values on all defined keys to be included.
strict : bool
If True, an exception will be raised if the entities
dict contains any keys that aren't contained in the current
index.
Returns
-------
A list of Node instances.
"""
entities = {} if entities is None else entities.copy()
if level is not None:
entities['level'] = level
if entities is None:
return self.nodes
match_ents = set(entities.keys())
common_cols = list(match_ents & set(self.index.columns))
if strict and match_ents - common_cols:
raise ValueError("Invalid entities: ", match_ents - common_cols)
if not common_cols:
return self.nodes
# Construct query string that handles both single values and iterables
query = []
for col in common_cols:
oper = 'in' if isinstance(entities[col], (list, tuple)) else '=='
q = '{name} {oper} {val}'.format(name=col, oper=oper,
val=repr(entities[col]))
query.append(q)
query = ' and '.join(query)
rows = self.index.query(query)
if rows.empty:
return []
# Sort and return
sort_cols = ['subject', 'session', 'task', 'run', 'node_index',
'suffix', 'level', 'datatype']
sort_cols = [sc for sc in sort_cols if sc in set(rows.columns)]
rows = rows.sort_values(sort_cols)
inds = rows['node_index'].astype(int)
return [self.nodes[i] for i in inds]
def create_node(self, level, entities, *args, **kwargs):
"""Creates a new child Node.
Parameters
----------
level : str
The level of analysis of the new Node.
entities : dict
Dictionary of entities belonging to Node.
args, kwargs : dict
Optional positional or named arguments to pass on to
class-specific initializers. These arguments are only used if
a Node that matches the passed entities doesn't already exist,
and a new one must be created.
Returns
-------
A Node instance.
"""
if level == 'run':
node = RunNode(entities, *args, **kwargs)
else:
node = Node(level, entities)
entities = dict(entities, node_index=len(self.nodes), level=level)
self.nodes.append(node)
# Because "entities" may have non-scalar values (such as `SliceTiming`)
# we need to first create a Series to avoid expansion
# From here we can concatenate
node_row = pd.DataFrame(pd.Series(entities)).T
self.index = | pd.concat([self.index, node_row], ignore_index=True) | pandas.concat |
"""Functions to calculate two-point correlations.
"""
import numpy as np
import pandas as pd
from scipy.fftpack import fft, ifft
from scipy.linalg import toeplitz
try:
from progress import getLogger
except ImportError:
from logging import getLogger
from .helpers import is_number_like, is_string_like, get_nfft
# Helpers
# ===========================================================================
def corr_mat(x, maxlag=None):
"""Return correlation matrix from correlation array.
Parameters:
===========
x: array-like
Correlation array in the form returned by e.g. acorr, xcorr.
NOT centered!
maxlag: int
Maximum lag to consider (should be < len(x) / 2).
"""
# | c_0 c_1 ... c_L |
# | c_-1 c_0 ... |
# | ... |
# | c_-L ... c_0 |
if maxlag:
# topeliz(
# first_column(l=0,-1,-2,...,-maxlag), first_row(l=0,1,2,...,+maxlag)
# )
return toeplitz(np.concatenate([[x[0]], x[:-maxlag:-1]]), x[:maxlag])
else:
return toeplitz(np.concatenate([[x[0]], x[:0:-1]]), x)
def xcorrshift(x, maxlag=None, as_pandas=False):
"""Return shifted (cross- / auto) correlation to center lag zero."""
if not maxlag:
maxlag = len(x) // 2
# force pandas output?
if as_pandas and not hasattr(x, 'iloc'):
if len(np.shape(x)) > 1:
x = pd.DataFrame(x)
else:
x = | pd.Series(x) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Zerodha Kite Connect - candlestick pattern scanner
@author: <NAME> (http://rasuquant.com/wp/)
"""
from kiteconnect import KiteConnect
import pandas as pd
import datetime as dt
import os
import time
import numpy as np
from technicalta import *
#cwd = os.chdir("D:\\Udemy\\Zerodha KiteConnect API\\1_account_authorization")
apikey = '<KEY>'
#generate trading session
'''access_token = open("access_token.txt",'r').read()
key_secret = open("api_key.txt",'r').read().split()
kite = KiteConnect(api_key=key_secret[0])
kite.set_access_token(access_token)
#get dump of all NSE instruments
instrument_dump = kite.instruments("NSE")
instrument_df = pd.DataFrame(instrument_dump)
'''
def instrumentLookup(instrument_df,symbol):
"""Looks up instrument token for a given script from instrument dump"""
try:
return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]
except:
return -1
def fetchOHLC(ticker,interval,duration):
"""extracts historical data and outputs in the form of dataframe"""
instrument = instrumentLookup(instrument_df,ticker)
data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))
data.set_index("date",inplace=True)
return data
def doji(ohlc_df):
"""returns dataframe with doji candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df['close']-df['open']).median()#abs(df["close"]- df["open"]).median()
df["doji"] = abs(df["close"] - df["open"]) <= (0.05 * avg_candle_size)
return df
def maru_bozu(ohlc_df):
"""returns dataframe with maru bozu candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["h-c"] = df["high"]-df["close"]
df["l-o"] = df["low"]-df["open"]
df["h-o"] = df["high"]-df["open"]
df["l-c"] = df["low"]-df["close"]
df["maru_bozu"] = np.where((df["close"] - df["open"] > 2*avg_candle_size) & \
(df[["h-c","l-o"]].max(axis=1) < 0.005*avg_candle_size),"maru_bozu_green",
np.where((df["open"] - df["close"] > 2*avg_candle_size) & \
(abs(df[["h-o","l-c"]]).max(axis=1) < 0.005*avg_candle_size),"maru_bozu_red",False))
df.drop(["h-c","l-o","h-o","l-c"],axis=1,inplace=True)
return df
def hammer(ohlc_df):
"""returns dataframe with hammer candle column"""
df = ohlc_df.copy()
df["hammer"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["close"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["open"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def shooting_star(ohlc_df):
"""returns dataframe with shooting star candle column"""
df = ohlc_df.copy()
df["sstar"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["high"] - df["close"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["high"] - df["open"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def levels(ohlc_day):
"""returns pivot point and support/resistance levels"""
high = round(ohlc_day["high"][-1],2)
low = round(ohlc_day["low"][-1],2)
close = round(ohlc_day["close"][-1],2)
pivot = round((high + low + close)/3,2)
r1 = round((2*pivot - low),2)
r2 = round((pivot + (high - low)),2)
r3 = round((high + 2*(pivot - low)),2)
s1 = round((2*pivot - high),2)
s2 = round((pivot - (high - low)),2)
s3 = round((low - 2*(high - pivot)),2)
return (pivot,r1,r2,r3,s1,s2,s3)
def trend(ohlc_df,n):
"function to assess the trend by analyzing each candle"
df = ohlc_df.copy()
df["up"] = np.where(df["low"]>=df["low"].shift(1),1,0)
df["dn"] = np.where(df["high"]<=df["high"].shift(1),1,0)
if df["close"][-1] > df["open"][-1]:
if df["up"][-1*n:].sum() >= 0.7*n:
return "uptrend"
elif df["open"][-1] > df["close"][-1]:
if df["dn"][-1*n:].sum() >= 0.7*n:
return "downtrend"
else:
return None
def res_sup(ohlc_df,ohlc_day):
"""calculates closest resistance and support levels for a given candle"""
level = ((ohlc_df["close"][-1] + ohlc_df["open"][-1])/2 + (ohlc_df["high"][-1] + ohlc_df["low"][-1])/2)/2
p,r1,r2,r3,s1,s2,s3 = levels(ohlc_day)
l_r1=level-r1
l_r2=level-r2
l_r3=level-r3
l_p=level-p
l_s1=level-s1
l_s2=level-s2
l_s3=level-s3
lev_ser = pd.Series([l_p,l_r1,l_r2,l_r3,l_s1,l_s2,l_s3],index=["p","r1","r2","r3","s1","s2","s3"])
sup = lev_ser[lev_ser>0].idxmin()
res = lev_ser[lev_ser>0].idxmax()
return (eval('{}'.format(res)), eval('{}'.format(sup)))
def candle_type(ohlc_df):
"""returns the candle type of the last candle of an OHLC DF"""
'''ohlc_df['open']=int(ohlc_df['open'])
ohlc_df['close']=int(ohlc_df['close'])
ohlc_df['high']=int(ohlc_df['high'])
ohlc_df['low']=int(ohlc_df['low'])'''
candle = None
if doji(ohlc_df)["doji"][-1] == True:
candle = "doji"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_green":
candle = "maru_bozu_green"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_red":
candle = "maru_bozu_red"
if shooting_star(ohlc_df)["sstar"][-1] == True:
candle = "shooting_star"
if hammer(ohlc_df)["hammer"][-1] == True:
candle = "hammer"
return candle
def candle_pattern(ohlc_df,ohlc_day):
"""returns the candle pattern identified"""
pattern = None
signi = "low"
avg_candle_size = abs(ohlc_df["close"] - ohlc_df["open"]).median()
sup, res = res_sup(ohlc_df,ohlc_day)
if (sup - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (sup + 1.5*avg_candle_size):
signi = "HIGH"
if (res - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (res + 1.5*avg_candle_size):
signi = "HIGH"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] > ohlc_df["close"][-2] \
and ohlc_df["close"][-1] > ohlc_df["open"][-1]:
pattern = "doji_bullish"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] < ohlc_df["close"][-2] \
and ohlc_df["close"][-1] < ohlc_df["open"][-1]:
pattern = "doji_bearish"
if candle_type(ohlc_df) == "maru_bozu_green":
pattern = "maru_bozu_bullish"
if candle_type(ohlc_df) == "maru_bozu_red":
pattern = "maru_bozu_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "hammer":
pattern = "hanging_man_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" and candle_type(ohlc_df) == "hammer":
pattern = "hammer_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "shooting_star":
pattern = "shooting_star_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["close"][-2] \
and ohlc_df["low"][-1] > ohlc_df["open"][-2]:
pattern = "harami_cross_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["open"][-2] \
and ohlc_df["low"][-1] > ohlc_df["close"][-2]:
pattern = "harami_cross_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["open"][-1] > ohlc_df["high"][-2] \
and ohlc_df["close"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["close"][-1] > ohlc_df["high"][-2] \
and ohlc_df["open"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bullish"
return "Significance - {}, Pattern - {}".format(signi,pattern)
##############################################################################################
tickers = ["ZEEL","WIPRO","VEDL","ULTRACEMCO","UPL","TITAN","TECHM","TATASTEEL",
"TATAMOTORS","TCS","SUNPHARMA","SBIN","SHREECEM","RELIANCE","POWERGRID",
"ONGC","NESTLEIND","NTPC","MARUTI","M&M","LT","KOTAKBANK","JSWSTEEL","INFY",
"INDUSINDBK","IOC","ITC","ICICIBANK","HDFC","HINDUNILVR","HINDALCO",
"HEROMOTOCO","HDFCBANK","HCLTECH","GRASIM","GAIL","EICHERMOT","DRREDDY",
"COALINDIA","CIPLA","BRITANNIA","INFRATEL","BHARTIARTL","BPCL","BAJAJFINSV",
"BAJFINANCE","BAJAJ-AUTO","AXISBANK","ASIANPAINT","ADANIPORTS","IDEA",
"MCDOWELL-N","UBL","NIACL","SIEMENS","SRTRANSFIN","SBILIFE","PNB",
"PGHH","PFC","PEL","PIDILITIND","PETRONET","PAGEIND","OFSS","NMDC","NHPC",
"MOTHERSUMI","MARICO","LUPIN","L&TFH","INDIGO","IBULHSGFIN","ICICIPRULI",
"ICICIGI","HINDZINC","HINDPETRO","HAVELLS","HDFCLIFE","HDFCAMC","GODREJCP",
"GICRE","DIVISLAB","DABUR","DLF","CONCOR","COLPAL","CADILAHC","BOSCHLTD",
"BIOCON","BERGEPAINT","BANKBARODA","BANDHANBNK","BAJAJHLDNG","DMART",
"AUROPHARMA","ASHOKLEY","AMBUJACEM","ADANITRANS","ACC",
"WHIRLPOOL","WABCOINDIA","VOLTAS","VINATIORGA","VBL","VARROC","VGUARD",
"UNIONBANK","UCOBANK","TRENT","TORNTPOWER","TORNTPHARM","THERMAX","RAMCOCEM",
"TATAPOWER","TATACONSUM","TVSMOTOR","TTKPRESTIG","SYNGENE","SYMPHONY",
"SUPREMEIND","SUNDRMFAST","SUNDARMFIN","SUNTV","STRTECH","SAIL","SOLARINDS",
"SHRIRAMCIT","SCHAEFFLER","SANOFI","SRF","SKFINDIA","SJVN","RELAXO",
"RAJESHEXPO","RECLTD","RBLBANK","QUESS","PRESTIGE","POLYCAB","PHOENIXLTD",
"PFIZER","PNBHOUSING","PIIND","OIL","OBEROIRLTY","NAM-INDIA","NATIONALUM",
"NLCINDIA","NBCC","NATCOPHARM","MUTHOOTFIN","MPHASIS","MOTILALOFS","MINDTREE",
"MFSL","MRPL","MANAPPURAM","MAHINDCIE","M&MFIN","MGL","MRF","LTI","LICHSGFIN",
"LTTS","KANSAINER","KRBL","JUBILANT","JUBLFOOD","JINDALSTEL","JSWENERGY",
"IPCALAB","NAUKRI","IGL","IOB","INDHOTEL","INDIANB","IBVENTURES","IDFCFIRSTB",
"IDBI","ISEC","HUDCO","HONAUT","HAL","HEXAWARE","HATSUN","HEG","GSPL",
"GUJGASLTD","GRAPHITE","GODREJPROP","GODREJIND","GODREJAGRO","GLENMARK",
"GLAXO","GILLETTE","GMRINFRA","FRETAIL","FCONSUMER","FORTIS","FEDERALBNK",
"EXIDEIND","ESCORTS","ERIS","ENGINERSIN","ENDURANCE","EMAMILTD","EDELWEISS",
"EIHOTEL","LALPATHLAB","DALBHARAT","CUMMINSIND","CROMPTON","COROMANDEL","CUB",
"CHOLAFIN","CHOLAHLDNG","CENTRALBK","CASTROLIND","CANBK","CRISIL","CESC",
"BBTC","BLUEDART","BHEL","BHARATFORG","BEL","BAYERCROP","BATAINDIA",
"BANKINDIA","BALKRISIND","ATUL","ASTRAL","APOLLOTYRE","APOLLOHOSP",
"AMARAJABAT","ALKEM","APLLTD","AJANTPHARM","ABFRL","ABCAPITAL","ADANIPOWER",
"ADANIGREEN","ADANIGAS","ABBOTINDIA","AAVAS","AARTIIND","AUBANK","AIAENG","3MINDIA"]
def main():
for ticker in tickers:
try:
ohlc = fetchOHLC(ticker, '5minute',5)
ohlc_day = fetchOHLC(ticker, 'day',30)
ohlc_day = ohlc_day.iloc[:-1,:]
cp = candle_pattern(ohlc,ohlc_day)
print(ticker, ": ",cp)
except:
print("skipping for ",ticker)
'''
# Continuous execution
starttime=time.time()
timeout = time.time() + 60*60*1 # 60 seconds times 60 meaning the script will run for 1 hr
while time.time() <= timeout:
try:
print("passthrough at ",time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
main()
time.sleep(300 - ((time.time() - starttime) % 300.0)) # 300 second interval between each new execution
except KeyboardInterrupt:
print('\n\nKeyboard exception received. Exiting.')
exit()'''
from pprint import pprint
def AlphaData_fxintraday(frombase,to,interval):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series FX ({}min)'.format(interval)]
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df=df.drop(['1. open','2. high','3. low', '4. close'], axis=1)
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
frombase=['EUR','USD','GBP','AUD','EUR']
to=['USD','JPY','CAD','CNY','CHF','HKD','GBP','KRW']
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
data=AlphaData_intraday(i,j,60)
pprint('{}/{} Done'.format(i,j))
time.sleep(30)
'''
def AlphaData_fxdaily(frombase,to):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=FX_DAILY&from_symbol={}&to_symbol={}&apikey={}".format(frombase,to,apikey)
#url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series FX (Daily)']
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df=df.drop(['1. open','2. high','3. low', '4. close'], axis=1)
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
dataintra=AlphaData_intraday(i,j,5)
datadaily=AlphaData_daily(i,j)
pprint(dataintra)
if len(dataintra) > 0:
if len(datadaily) > 0 :
pprint(candle_type(dataintra))
#cp = candle_pattern(dataintra,datadaily)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)'''
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
data=AlphaData_daily(i,j)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)
'''
def AlphaData_intraday(symbol,interval):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol={}&interval={}min&apikey={}".format(symbol,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series ({}min)'.format(interval)]
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df['volume']=df['5. volume']
df['volume']=df['5. volume']
df=df.drop(['1. open','2. high','3. low', '4. close','5. volume'], axis=1)
df['open']=pd.to_numeric(df['open'])
df['high']=pd.to_numeric(df['high'])
df['low']=pd.to_numeric(df['low'])
df['close']=pd.to_numeric(df['close'])
df['volume']=pd.to_numeric(df['volume'])
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
def AlphaData_daily(symbol):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&apikey={}".format(symbol,apikey)
#url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series (Daily)']
df= | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import re
import sys
from datetime import datetime
from random import randint
from time import sleep
import numpy as np
import pandas.util.testing as tm
import pytest
import pytz
from pandas import DataFrame, NaT, compat
from pandas.compat import range, u
from pandas.compat.numpy import np_datetime64_compat
from pandas_gbq import gbq
try:
import mock
except ImportError:
from unittest import mock
TABLE_ID = 'new_test'
def _skip_local_auth_if_in_travis_env():
if _in_travis_environment():
pytest.skip("Cannot run local auth in travis environment")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _skip_if_no_private_key_contents():
if not _get_private_key_contents():
raise pytest.skip("Cannot run integration tests without a "
"private key json contents")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_dataset_prefix_random():
return ''.join(['pandas_gbq_', str(randint(1, 100000))])
def _get_project_id():
project = os.environ.get('GBQ_PROJECT_ID')
if not project:
pytest.skip(
"Cannot run integration tests without a project id")
return project
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
else:
return os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
def _get_private_key_contents():
key_path = _get_private_key_path()
if key_path is None:
return None
with open(key_path) as f:
return f.read()
@pytest.fixture(autouse=True, scope='module')
def _test_imports():
try:
import pkg_resources # noqa
except ImportError:
raise ImportError('Could not import pkg_resources (setuptools).')
gbq._test_google_api_imports()
@pytest.fixture
def project():
return _get_project_id()
def _check_if_can_get_correct_default_credentials():
# Checks if "Application Default Credentials" can be fetched
# from the environment the tests are running in.
# See https://github.com/pandas-dev/pandas/issues/13577
import google.auth
from google.auth.exceptions import DefaultCredentialsError
try:
credentials, _ = google.auth.default(scopes=[gbq.GbqConnector.scope])
except (DefaultCredentialsError, IOError):
return False
return gbq._try_credentials(_get_project_id(), credentials) is not None
def clean_gbq_environment(dataset_prefix, private_key=None):
dataset = gbq._Dataset(_get_project_id(), private_key=private_key)
all_datasets = dataset.datasets()
retry = 3
while retry > 0:
try:
retry = retry - 1
for i in range(1, 10):
dataset_id = dataset_prefix + str(i)
if dataset_id in all_datasets:
table = gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
# Table listing is eventually consistent, so loop until
# all tables no longer appear (max 30 seconds).
table_retry = 30
all_tables = dataset.tables(dataset_id)
while all_tables and table_retry > 0:
for table_id in all_tables:
try:
table.delete(table_id)
except gbq.NotFoundException:
pass
sleep(1)
table_retry = table_retry - 1
all_tables = dataset.tables(dataset_id)
dataset.delete(dataset_id)
retry = 0
except gbq.GenericGBQException as ex:
# Build in retry logic to work around the following errors :
# An internal error occurred and the request could not be...
# Dataset ... is still in use
error_message = str(ex).lower()
if ('an internal error occurred' in error_message or
'still in use' in error_message) and retry > 0:
sleep(30)
else:
raise ex
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
def test_generate_bq_schema_deprecated():
# 11121 Deprecation of generate_bq_schema
with pytest.warns(FutureWarning):
df = make_mixed_dataframe_v2(10)
gbq.generate_bq_schema(df)
@pytest.fixture(params=['local', 'service_path', 'service_creds'])
def auth_type(request):
auth = request.param
if auth == 'local':
if _in_travis_environment():
pytest.skip("Cannot run local auth in travis environment")
elif auth == 'service_path':
if _in_travis_environment():
pytest.skip("Only run one auth type in Travis to save time")
_skip_if_no_private_key_path()
elif auth == 'service_creds':
_skip_if_no_private_key_contents()
else:
raise ValueError
return auth
@pytest.fixture()
def credentials(auth_type):
if auth_type == 'local':
return None
elif auth_type == 'service_path':
return _get_private_key_path()
elif auth_type == 'service_creds':
return _get_private_key_contents()
else:
raise ValueError
@pytest.fixture()
def gbq_connector(project, credentials):
return gbq.GbqConnector(project, private_key=credentials)
class TestGBQConnectorIntegration(object):
def test_should_be_able_to_make_a_connector(self, gbq_connector):
assert gbq_connector is not None, 'Could not create a GbqConnector'
def test_should_be_able_to_get_valid_credentials(self, gbq_connector):
credentials = gbq_connector.get_credentials()
assert credentials.valid
def test_should_be_able_to_get_a_bigquery_client(self, gbq_connector):
bigquery_client = gbq_connector.get_client()
assert bigquery_client is not None
def test_should_be_able_to_get_schema_from_query(self, gbq_connector):
schema, pages = gbq_connector.run_query('SELECT 1')
assert schema is not None
def test_should_be_able_to_get_results_from_query(self, gbq_connector):
schema, pages = gbq_connector.run_query('SELECT 1')
assert pages is not None
class TestGBQConnectorIntegrationWithLocalUserAccountAuth(object):
@pytest.fixture(autouse=True)
def setup(self, project):
_skip_local_auth_if_in_travis_env()
self.sut = gbq.GbqConnector(project, auth_local_webserver=True)
def test_get_application_default_credentials_does_not_throw_error(self):
if _check_if_can_get_correct_default_credentials():
# Can get real credentials, so mock it out to fail.
from google.auth.exceptions import DefaultCredentialsError
with mock.patch('google.auth.default',
side_effect=DefaultCredentialsError()):
credentials = self.sut.get_application_default_credentials()
else:
credentials = self.sut.get_application_default_credentials()
assert credentials is None
def test_get_application_default_credentials_returns_credentials(self):
if not _check_if_can_get_correct_default_credentials():
pytest.skip("Cannot get default_credentials "
"from the environment!")
from google.auth.credentials import Credentials
credentials = self.sut.get_application_default_credentials()
assert isinstance(credentials, Credentials)
def test_get_user_account_credentials_bad_file_returns_credentials(self):
from google.auth.credentials import Credentials
with mock.patch('__main__.open', side_effect=IOError()):
credentials = self.sut.get_user_account_credentials()
assert isinstance(credentials, Credentials)
def test_get_user_account_credentials_returns_credentials(self):
from google.auth.credentials import Credentials
credentials = self.sut.get_user_account_credentials()
assert isinstance(credentials, Credentials)
class TestGBQUnit(object):
def test_should_return_credentials_path_set_by_env_var(self):
env = {'PANDAS_GBQ_CREDENTIALS_FILE': '/tmp/dummy.dat'}
with mock.patch.dict('os.environ', env):
assert gbq._get_credentials_file() == '/tmp/dummy.dat'
@pytest.mark.parametrize(
('input', 'type_', 'expected'), [
(1, 'INTEGER', int(1)),
(1, 'FLOAT', float(1)),
pytest.param('false', 'BOOLEAN', False, marks=pytest.mark.xfail),
pytest.param(
'0e9', 'TIMESTAMP',
np_datetime64_compat('1970-01-01T00:00:00Z'),
marks=pytest.mark.xfail),
('STRING', 'STRING', 'STRING'),
])
def test_should_return_bigquery_correctly_typed(
self, input, type_, expected):
result = gbq._parse_data(
dict(fields=[dict(name='x', type=type_, mode='NULLABLE')]),
rows=[[input]]).iloc[0, 0]
assert result == expected
def test_to_gbq_should_fail_if_invalid_table_name_passed(self):
with pytest.raises(gbq.NotFoundException):
gbq.to_gbq(DataFrame(), 'invalid_table_name', project_id="1234")
def test_to_gbq_with_no_project_id_given_should_fail(self):
with pytest.raises(TypeError):
gbq.to_gbq(DataFrame(), 'dataset.tablename')
def test_read_gbq_with_no_project_id_given_should_fail(self):
with pytest.raises(TypeError):
gbq.read_gbq('SELECT 1')
def test_that_parse_data_works_properly(self):
from google.cloud.bigquery.table import Row
test_schema = {'fields': [
{'mode': 'NULLABLE', 'name': 'column_x', 'type': 'STRING'}]}
field_to_index = {'column_x': 0}
values = ('row_value',)
test_page = [Row(values, field_to_index)]
test_output = gbq._parse_data(test_schema, test_page)
correct_output = DataFrame({'column_x': ['row_value']})
tm.assert_frame_equal(test_output, correct_output)
def test_read_gbq_with_invalid_private_key_json_should_fail(self):
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x', private_key='y')
def test_read_gbq_with_empty_private_key_json_should_fail(self):
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x', private_key='{}')
def test_read_gbq_with_private_key_json_wrong_types_should_fail(self):
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq(
'SELECT 1', project_id='x',
private_key='{ "client_email" : 1, "private_key" : True }')
def test_read_gbq_with_empty_private_key_file_should_fail(self):
with tm.ensure_clean() as empty_file_path:
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x',
private_key=empty_file_path)
def test_read_gbq_with_corrupted_private_key_json_should_fail(self):
_skip_if_no_private_key_contents()
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq(
'SELECT 1', project_id='x',
private_key=re.sub('[a-z]', '9', _get_private_key_contents()))
def test_should_read(project, credentials):
query = 'SELECT "PI" AS valid_string'
df = gbq.read_gbq(query, project_id=project, private_key=credentials)
tm.assert_frame_equal(df, DataFrame({'valid_string': ['PI']}))
class TestReadGBQIntegration(object):
@pytest.fixture(autouse=True)
def setup(self, project, credentials):
# - PER-TEST FIXTURES -
# put here any instruction you want to be run *BEFORE* *EVERY* test is
# executed.
self.gbq_connector = gbq.GbqConnector(
project, private_key=credentials)
self.credentials = credentials
def test_should_properly_handle_valid_strings(self):
query = 'SELECT "PI" AS valid_string'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'valid_string': ['PI']}))
def test_should_properly_handle_empty_strings(self):
query = 'SELECT "" AS empty_string'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'empty_string': [""]}))
def test_should_properly_handle_null_strings(self):
query = 'SELECT STRING(NULL) AS null_string'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_string': [None]}))
def test_should_properly_handle_valid_integers(self):
query = 'SELECT INTEGER(3) AS valid_integer'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'valid_integer': [3]}))
def test_should_properly_handle_nullable_integers(self):
query = '''SELECT * FROM
(SELECT 1 AS nullable_integer),
(SELECT NULL AS nullable_integer)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_integer': [1, None]}).astype(object))
def test_should_properly_handle_valid_longs(self):
query = 'SELECT 1 << 62 AS valid_long'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'valid_long': [1 << 62]}))
def test_should_properly_handle_nullable_longs(self):
query = '''SELECT * FROM
(SELECT 1 << 62 AS nullable_long),
(SELECT NULL AS nullable_long)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_long': [1 << 62, None]}).astype(object))
def test_should_properly_handle_null_integers(self):
query = 'SELECT INTEGER(NULL) AS null_integer'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_integer': [None]}))
def test_should_properly_handle_valid_floats(self):
from math import pi
query = 'SELECT PI() AS valid_float'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame(
{'valid_float': [pi]}))
def test_should_properly_handle_nullable_floats(self):
from math import pi
query = '''SELECT * FROM
(SELECT PI() AS nullable_float),
(SELECT NULL AS nullable_float)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_float': [pi, None]}))
def test_should_properly_handle_valid_doubles(self):
from math import pi
query = 'SELECT PI() * POW(10, 307) AS valid_double'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame(
{'valid_double': [pi * 10 ** 307]}))
def test_should_properly_handle_nullable_doubles(self):
from math import pi
query = '''SELECT * FROM
(SELECT PI() * POW(10, 307) AS nullable_double),
(SELECT NULL AS nullable_double)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_double': [pi * 10 ** 307, None]}))
def test_should_properly_handle_null_floats(self):
query = 'SELECT FLOAT(NULL) AS null_float'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_float': [np.nan]}))
def test_should_properly_handle_timestamp_unix_epoch(self):
query = 'SELECT TIMESTAMP("1970-01-01 00:00:00") AS unix_epoch'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame(
{'unix_epoch': [np.datetime64('1970-01-01T00:00:00.000000Z')]}))
def test_should_properly_handle_arbitrary_timestamp(self):
query = 'SELECT TIMESTAMP("2004-09-15 05:00:00") AS valid_timestamp'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({
'valid_timestamp': [np.datetime64('2004-09-15T05:00:00.000000Z')]
}))
def test_should_properly_handle_null_timestamp(self):
query = 'SELECT TIMESTAMP(NULL) AS null_timestamp'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_timestamp': [NaT]}))
def test_should_properly_handle_true_boolean(self):
query = 'SELECT BOOLEAN(TRUE) AS true_boolean'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'true_boolean': [True]}))
def test_should_properly_handle_false_boolean(self):
query = 'SELECT BOOLEAN(FALSE) AS false_boolean'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'false_boolean': [False]}))
def test_should_properly_handle_null_boolean(self):
query = 'SELECT BOOLEAN(NULL) AS null_boolean'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_boolean': [None]}))
def test_should_properly_handle_nullable_booleans(self):
query = '''SELECT * FROM
(SELECT BOOLEAN(TRUE) AS nullable_boolean),
(SELECT NULL AS nullable_boolean)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_boolean': [True, None]}).astype(object))
def test_unicode_string_conversion_and_normalization(self):
correct_test_datatype = DataFrame(
{'unicode_string': [u("\xe9\xfc")]}
)
unicode_string = "\xc3\xa9\xc3\xbc"
if compat.PY3:
unicode_string = unicode_string.encode('latin-1').decode('utf8')
query = 'SELECT "{0}" AS unicode_string'.format(unicode_string)
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, correct_test_datatype)
def test_index_column(self):
query = "SELECT 'a' AS string_1, 'b' AS string_2"
result_frame = gbq.read_gbq(query, project_id=_get_project_id(),
index_col="string_1",
private_key=self.credentials)
correct_frame = DataFrame(
{'string_1': ['a'], 'string_2': ['b']}).set_index("string_1")
assert result_frame.index.name == correct_frame.index.name
def test_column_order(self):
query = "SELECT 'a' AS string_1, 'b' AS string_2, 'c' AS string_3"
col_order = ['string_3', 'string_1', 'string_2']
result_frame = gbq.read_gbq(query, project_id=_get_project_id(),
col_order=col_order,
private_key=self.credentials)
correct_frame = DataFrame({'string_1': ['a'], 'string_2': [
'b'], 'string_3': ['c']})[col_order]
tm.assert_frame_equal(result_frame, correct_frame)
def test_read_gbq_raises_invalid_column_order(self):
query = "SELECT 'a' AS string_1, 'b' AS string_2, 'c' AS string_3"
col_order = ['string_aaa', 'string_1', 'string_2']
# Column string_aaa does not exist. Should raise InvalidColumnOrder
with pytest.raises(gbq.InvalidColumnOrder):
gbq.read_gbq(query, project_id=_get_project_id(),
col_order=col_order,
private_key=self.credentials)
def test_column_order_plus_index(self):
query = "SELECT 'a' AS string_1, 'b' AS string_2, 'c' AS string_3"
col_order = ['string_3', 'string_2']
result_frame = gbq.read_gbq(query, project_id=_get_project_id(),
index_col='string_1', col_order=col_order,
private_key=self.credentials)
correct_frame = DataFrame(
{'string_1': ['a'], 'string_2': ['b'], 'string_3': ['c']})
correct_frame.set_index('string_1', inplace=True)
correct_frame = correct_frame[col_order]
tm.assert_frame_equal(result_frame, correct_frame)
def test_read_gbq_raises_invalid_index_column(self):
query = "SELECT 'a' AS string_1, 'b' AS string_2, 'c' AS string_3"
col_order = ['string_3', 'string_2']
# Column string_bbb does not exist. Should raise InvalidIndexColumn
with pytest.raises(gbq.InvalidIndexColumn):
gbq.read_gbq(query, project_id=_get_project_id(),
index_col='string_bbb', col_order=col_order,
private_key=self.credentials)
def test_malformed_query(self):
with pytest.raises(gbq.GenericGBQException):
gbq.read_gbq("SELCET * FORM [publicdata:samples.shakespeare]",
project_id=_get_project_id(),
private_key=self.credentials)
def test_bad_project_id(self):
with pytest.raises(gbq.GenericGBQException):
gbq.read_gbq("SELECT 1", project_id='001',
private_key=self.credentials)
def test_bad_table_name(self):
with pytest.raises(gbq.GenericGBQException):
gbq.read_gbq("SELECT * FROM [publicdata:samples.nope]",
project_id=_get_project_id(),
private_key=self.credentials)
def test_download_dataset_larger_than_200k_rows(self):
test_size = 200005
# Test for known BigQuery bug in datasets larger than 100k rows
# http://stackoverflow.com/questions/19145587/bq-py-not-paging-results
df = gbq.read_gbq("SELECT id FROM [publicdata:samples.wikipedia] "
"GROUP EACH BY id ORDER BY id ASC LIMIT {0}"
.format(test_size),
project_id=_get_project_id(),
private_key=self.credentials)
assert len(df.drop_duplicates()) == test_size
def test_zero_rows(self):
# Bug fix for https://github.com/pandas-dev/pandas/issues/10273
df = gbq.read_gbq("SELECT title, id, is_bot, "
"SEC_TO_TIMESTAMP(timestamp) ts "
"FROM [publicdata:samples.wikipedia] "
"WHERE timestamp=-9999999",
project_id=_get_project_id(),
private_key=self.credentials)
page_array = np.zeros(
(0,), dtype=[('title', object), ('id', np.dtype(int)),
('is_bot', np.dtype(bool)), ('ts', 'M8[ns]')])
expected_result = DataFrame(
page_array, columns=['title', 'id', 'is_bot', 'ts'])
tm.assert_frame_equal(df, expected_result)
def test_legacy_sql(self):
legacy_sql = "SELECT id FROM [publicdata.samples.wikipedia] LIMIT 10"
# Test that a legacy sql statement fails when
# setting dialect='standard'
with pytest.raises(gbq.GenericGBQException):
gbq.read_gbq(legacy_sql, project_id=_get_project_id(),
dialect='standard',
private_key=self.credentials)
# Test that a legacy sql statement succeeds when
# setting dialect='legacy'
df = gbq.read_gbq(legacy_sql, project_id=_get_project_id(),
dialect='legacy',
private_key=self.credentials)
assert len(df.drop_duplicates()) == 10
def test_standard_sql(self):
standard_sql = "SELECT DISTINCT id FROM " \
"`publicdata.samples.wikipedia` LIMIT 10"
# Test that a standard sql statement fails when using
# the legacy SQL dialect (default value)
with pytest.raises(gbq.GenericGBQException):
gbq.read_gbq(standard_sql, project_id=_get_project_id(),
private_key=self.credentials)
# Test that a standard sql statement succeeds when
# setting dialect='standard'
df = gbq.read_gbq(standard_sql, project_id=_get_project_id(),
dialect='standard',
private_key=self.credentials)
assert len(df.drop_duplicates()) == 10
def test_invalid_option_for_sql_dialect(self):
sql_statement = "SELECT DISTINCT id FROM " \
"`publicdata.samples.wikipedia` LIMIT 10"
# Test that an invalid option for `dialect` raises ValueError
with pytest.raises(ValueError):
gbq.read_gbq(sql_statement, project_id=_get_project_id(),
dialect='invalid',
private_key=self.credentials)
# Test that a correct option for dialect succeeds
# to make sure ValueError was due to invalid dialect
gbq.read_gbq(sql_statement, project_id=_get_project_id(),
dialect='standard', private_key=self.credentials)
def test_query_with_parameters(self):
sql_statement = "SELECT @param1 + @param2 AS valid_result"
config = {
'query': {
"useLegacySql": False,
"parameterMode": "named",
"queryParameters": [
{
"name": "param1",
"parameterType": {
"type": "INTEGER"
},
"parameterValue": {
"value": 1
}
},
{
"name": "param2",
"parameterType": {
"type": "INTEGER"
},
"parameterValue": {
"value": 2
}
}
]
}
}
# Test that a query that relies on parameters fails
# when parameters are not supplied via configuration
with pytest.raises(ValueError):
gbq.read_gbq(sql_statement, project_id=_get_project_id(),
private_key=self.credentials)
# Test that the query is successful because we have supplied
# the correct query parameters via the 'config' option
df = gbq.read_gbq(sql_statement, project_id=_get_project_id(),
private_key=self.credentials,
configuration=config)
tm.assert_frame_equal(df, DataFrame({'valid_result': [3]}))
def test_query_inside_configuration(self):
query_no_use = 'SELECT "PI_WRONG" AS valid_string'
query = 'SELECT "PI" AS valid_string'
config = {
'query': {
"query": query,
"useQueryCache": False,
}
}
# Test that it can't pass query both
# inside config and as parameter
with pytest.raises(ValueError):
gbq.read_gbq(query_no_use, project_id=_get_project_id(),
private_key=self.credentials,
configuration=config)
df = gbq.read_gbq(None, project_id=_get_project_id(),
private_key=self.credentials,
configuration=config)
tm.assert_frame_equal(df, | DataFrame({'valid_string': ['PI']}) | pandas.DataFrame |
# This function to classify the data in real time
import argparse
import pandas as pd
import numpy as np
import os
import random
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from scipy import stats
import pathlib
import DS_pkt_near_realtime_run_ml as run_ml
import settings
def read_data(dataset_dir):
df_list = []
video_list = os.listdir(dataset_dir)
for video in video_list:
if video == '.DS_Store':
continue
video_path = pathlib.Path(str(dataset_dir) + '/' + video)
t_df = | pd.read_csv(video_path) | pandas.read_csv |
from wf_core_data_dashboard import core
import fastbridge_utils
import wf_core_data
import pandas as pd
import inflection
import urllib.parse
import os
def generate_fastbridge_table_data(
test_events_path,
student_info_path,
student_assignments_path
):
test_events = pd.read_pickle(test_events_path)
student_info = pd.read_pickle(student_info_path)
student_assignments = pd.read_pickle(student_assignments_path)
students = fastbridge_utils.summarize_by_student(
test_events=test_events,
student_info=student_info,
student_assignments=student_assignments
)
groups = fastbridge_utils.summarize_by_group(
students=students
)
return students, groups
def groups_page_html(
groups,
school_year=None,
school=None,
test=None,
subtest=None,
title=None,
subtitle=None,
include_details_link=True
):
if title is None:
title = 'FastBridge results'
if subtitle is None:
subtitle = ':'.join(filter(
lambda x: x is not None,
[
school_year,
school,
test,
subtest
]
))
table_html = groups_table_html(
groups,
school_year=school_year,
school=school,
test=test,
subtest=subtest,
include_details_link=include_details_link
)
template = core.get_template("groups_table.html")
return template.render(
title=title,
subtitle=subtitle,
table_html=table_html
)
def students_page_html(
students,
school_year=None,
school=None,
test=None,
subtest=None,
title=None,
subtitle=None
):
if title is None:
title = 'FastBridge results'
if subtitle is None:
subtitle = ':'.join(filter(
lambda x: x is not None,
[
school_year,
school,
test,
subtest
]
))
table_html = students_table_html(
students=students,
school_year=school_year,
school=school,
test=test,
subtest=subtest
)
template = core.get_template("students_table.html")
return template.render(
title=title,
subtitle=subtitle,
table_html=table_html
)
def groups_table_html(
groups,
school_year=None,
school=None,
test=None,
subtest=None,
include_details_link=True
):
groups = groups.copy()
groups['mean_ending_percentile_sem_range'] = groups.apply(
lambda row: '{:.1f} – {:.1f}'.format(
row['mean_ending_percentile'] - row['mean_ending_percentile_sem'],
row['mean_ending_percentile'] + row['mean_ending_percentile_sem'],
) if not | pd.isna(row['mean_ending_percentile']) | pandas.isna |
"""
dlc2kinematics
© <NAME>
https://github.com/AdaptiveMotorControlLab/dlc2kinematics/
"""
import pandas as pd
import numpy as np
from skinematics import quat, vector
import matplotlib.pyplot as plt
import os
import scipy as sc
from pathlib import Path
from sklearn.decomposition import PCA
from dlc2kinematics.utils import auxiliaryfunctions
def compute_joint_quaternions(
df,
joints_dict,
save=True,
destfolder=None,
output_filename=None,
dropnan=False,
smooth=False,
filter_window=3,
order=1,
use4d=True,
):
"""
Computes the joint quaternions for the bodyparts.
Parameters
----------
df: Pandas multiindex dataframe. Assumes the the dataframe is already smoothed. If not, adjust the filter_window and order to smooth the dataframe.
joints_dict: Dictionary
Keys of the dictionary specifies the joint angle and the corresponding values specify the bodyparts. e.g.
joint_dict = {'R-Elbow': ['R_shoulder', 'Right_elbow', 'Right_wrist']
save: Bool
Optional. Saves the joint angles as a pandas dataframe if set to True.
destfolder: string
Optional. Saves the joint angles in the specfied destination folder. If it is set to None, the joint angles are saved in the current working directory.
output_filename: string
Optional. Name of the output file. If it is set to None, the file is saved as joint_angles_<scorer_name>.h5, <scorer_name> is the name of the scorer in the input df.
dropnan: boolean
Optional. If you want to drop any NaN values, this is useful for some downstream analysis (like PCA).
smooth: boolean
Optional. If you want to smooth the data with a svagol filter, you can set this to true, and then also add filter_window and order.
filter_window: int
Optional. If smooth=True, window is set here, which needs to be a positive odd integer.
order: int
Optional. Only used if the optional argument `smooth` is set to True. Order of the polynomial to fit the data. The order must be less than the filter_window
use4d: boolean
Optional. Determines whether all 4 components of the quaternion are returned or just the quaternion vector (which uniquely determines quaternion of rotation due to the constraing mag = 1)
Outputs
-------
joint_quaternions: dataframe of joint angles.
Rows are time points, columns are multiindex with joint names ['R-Elbow', ...] and quaternion components ['a', 'b', 'c', 'd']
Example
-------
>>> joint_quaternions = dlc2kinematics.compute_joint_quaternions(df,joint_dict)
"""
scorer = df.columns.get_level_values(0)[0]
if use4d:
comps = ["a", "b", "c", "d"]
else:
comps = ["b", "c", "d"]
joints = list(joints_dict.keys())
quaternion_columns = pd.MultiIndex.from_product(
(joints, comps), names=["joint name", "comp"]
)
quaternions = pd.DataFrame(index=df.index, columns=quaternion_columns)
destfolder, output_filename = _get_filenames(destfolder, output_filename, scorer)
if os.path.isfile(os.path.join(destfolder, output_filename + ".h5")):
return _load_quaternions(destfolder, output_filename)
else:
for keys, vals in joints_dict.items():
a, b, c = vals[0], vals[1], vals[2]
jointname = keys
"""
if not use4d:
quatcompnames = [jointname + '-quat' + compname for compname in ['b', 'c', 'd']]
else:
quatcompnames = [jointname + '-quat' + compname for compname in ['a', 'b', 'c', 'd']]
"""
print("Computing joint quaternions for %s" % jointname)
# print(scorer, a, b, c)
# print(df[scorer].shape)
# print(df[scorer][[a,b,c]].shape)
tmpquat = np.squeeze(
np.stack(
df[scorer][[a, b, c]]
.apply(
auxiliaryfunctions.jointquat_calc,
axis=1,
result_type="reduce",
args=tuple([use4d]),
)
.values
)
)
quaternions[jointname] = tmpquat
"""
for compname, comp in zip(quatcompnames, tmpquat.transpose()):
print(comp)
quaternions[compname] = comp
"""
if smooth:
for col in list(quaternions.columns):
quaternions[col] = auxiliaryfunctions.smoothen_angles(
quaternions, col, filter_window, order
)
if dropnan:
quaternions = quaternions.dropna()
if save:
print(
"Saving the joint quaternions as a pandas array in %s "
% os.path.join(destfolder, output_filename + ".h5")
)
quaternions.to_hdf(
os.path.join(destfolder, output_filename + ".h5"),
"df_with_missing",
format="table",
mode="w",
)
print("saved")
return quaternions
def compute_joint_doubleangles(
df,
joints_dict,
save=True,
destfolder=None,
output_filename=None,
dropnan=False,
smooth=False,
filter_window=3,
order=1,
use4d=True,
):
"""
Computes the joint double angles for the bodyparts.
https://stackoverflow.com/questions/15101103/euler-angles-between-two-3d-vectors
Parameters
----------
df: Pandas multiindex dataframe. Assumes the the dataframe is already smoothed. If not, adjust the filter_window and order to smooth the dataframe.
joints_dict: Dictionary
Keys of the dictionary specifies the joint angle and the corresponding values specify the bodyparts. e.g.
joint_dict = {'R-Elbow': ['R_shoulder', 'Right_elbow', 'Right_wrist']
save: Bool
Optional. Saves the joint angles as a pandas dataframe if set to True.
destfolder: string
Optional. Saves the joint angles in the specfied destination folder. If it is set to None, the joint angles are saved in the current working directory.
output_filename: string
Optional. Name of the output file. If it is set to None, the file is saved as joint_angles_<scorer_name>.h5, <scorer_name> is the name of the scorer in the input df.
dropnan: boolean
Optional. If you want to drop any NaN values, this is useful for some downstream analysis (like PCA).
smooth: boolean
Optional. If you want to smooth the data with a svagol filter, you can set this to true, and then also add filter_window and order.
filter_window: int
Optional. If smooth=True, window is set here, which needs to be a positive odd integer.
order: int
Optional. Only used if the optional argument `smooth` is set to True. Order of the polynomial to fit the data. The order must be less than the filter_window
use4d: boolean
Optional. Determines whether all 4 components of the quaternion are returned or just the quaternion vector (which uniquely determines quaternion of rotation due to the constraing mag = 1)
Outputs
-------
doubleangles: dataframe of joint angles
Rows are time points, columns are multiindex with joint names ['R-Elbow', ...] and double angle components ['pitch', 'yaw']
Example
-------
>>> doubleangles = dlc2kinematics.compute_joint_angles(df,joint_dict)
"""
scorer = df.columns.get_level_values(0)[0]
comps = ["pitch", "yaw"]
joints = list(joints_dict.keys())
doubleangle_columns = pd.MultiIndex.from_product((joints, comps))
doubleangles = pd.DataFrame(index=df.index, columns=doubleangle_columns)
destfolder, output_filename = _get_filenames(
destfolder, output_filename, scorer, datatype="doubleangles"
)
if os.path.isfile(os.path.join(destfolder, output_filename + ".h5")):
return _load_quaternions(destfolder, output_filename)
else:
for keys, vals in joints_dict.items():
a, b, c = vals[0], vals[1], vals[2]
jointname = keys
"""
if not use4d:
quatcompnames = [jointname + '-quat' + compname for compname in ['b', 'c', 'd']]
else:
quatcompnames = [jointname + '-quat' + compname for compname in ['a', 'b', 'c', 'd']]
"""
print("Computing joint doubleangles for %s" % jointname)
# print(scorer, a, b, c)
# print(df[scorer].shape)
# print(df[scorer][[a,b,c]].shape)
tmpda = np.squeeze(
np.stack(
df[scorer][[a, b, c]]
.apply(
auxiliaryfunctions.doubleangle_calc,
axis=1,
result_type="reduce",
)
.values
)
)
doubleangles[jointname] = tmpda
"""
for compname, comp in zip(quatcompnames, tmpquat.transpose()):
print(comp)
quaternions[compname] = comp
"""
if smooth:
for col in list(doubleangles.columns):
doubleangles[col] = auxiliaryfunctions.smoothen_angles(
doubleangles, col, filter_window, order
)
if dropnan:
doubleangles = doubleangles.dropna()
if save:
print(
"Saving the joint quaternions as a pandas array in %s "
% os.path.join(destfolder, output_filename + ".h5")
)
doubleangles.to_hdf(
os.path.join(destfolder, output_filename + ".h5"),
"df_with_missing",
format="table",
mode="w",
)
print("saved")
return doubleangles
def plot_joint_quaternions(joint_quaternion, quats=[None], start=None, end=None):
"""
Plots the joint quaternions (or velocity, or acceleration)
Parameters
----------
joint_quaternion: Pandas dataframe of joint quaternions, matching output of compute_joint_quaternions ()
Rows are time points, columns are multiindex with joint names ['R-Elbow', ...] and quaternion components ['a', 'b', 'c', 'd']
quats: list
Optional. List of quats to plot, e.g. ['R-Elbow a', 'R-Elbow b', ... ] containing both the name of the joint and the component
start: int
Optional. Integer specifying the start of frame index to select. Default is set to 0.
end: int
Optional. Integer specifying the end of frame index to select. Default is set to length of dataframe.
Example
-------
>>> dlc2kinematics.plot_joint_quaternions(joint_quaternion)
"""
"""
try:
joint_quaternion = pd.read_hdf(joint_quaternion, "df_with_missing")
except:
pass
if start == None:
start = 0
if end == None:
end = len(joint_quaternion)
if quats[0] == None:
quats = list(joint_quaternion.columns.get_level_values(0))
ax = joint_quaternion[quats][start:end].plot(kind="line")
# plt.tight_layout()
plt.ylim([0, 180])
plt.xlabel("Frame numbers")
plt.ylabel("joint quaternions")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
plt.title("Joint Quaternion", loc="left")
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.show()
"""
try:
joint_quaternion = pd.read_hdf(joint_quaternion, "df_with_missing")
except:
pass
joint_quaternion = joint_quaternion.copy()
joint_quaternion.columns = [
" ".join(col).strip() for col in joint_quaternion.columns.values
]
if start == None:
start = 0
if end == None:
end = len(joint_quaternion)
if quats[0] == None:
angles = list(joint_quaternion.columns.get_level_values(0))
ax = joint_quaternion[angles][start:end].plot(kind="line")
# plt.tight_layout()
plt.xlabel("Frame numbers")
plt.ylabel("Quaternion Component Magnitude")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
plt.title("Joint Quaternions", loc="left")
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.show()
def compute_joint_quaternion_velocity(
joint_quaternion,
filter_window=3,
order=1,
):
"""
Computes the first derivative of the joint quaternions in each component
Parameters
----------
joint_quaternion: Pandas dataframe of joint quaternions, matching output of compute_joint_quaternions
Rows are time points, columns are multiindex with joint names ['R-Elbow', ...] and quaternion components ['a', 'b', 'c', 'd']
filter_window: int
Optional. The length of filter window which needs to be a positive odd integer
order: int
Optional. Order of the polynomial to fit the data. The order must be less than the filter_window
Outputs
-------
quaternion_vel: dataframe of joint angular velocity
Rows are time points, columns are multiindex with joint names ['R-Elbow', ...] and quaternion components ['a', 'b', 'c', 'd']
Example
-------
>>> joint_quaternion_vel = dlc2kinematics.compute_joint_quaternion_velocity(joint_quaternion)
"""
try:
joint_quaternion = pd.read_hdf(joint_quaternion, "df_with_missing")
except:
pass
"""
numCols = joint_quaternion.shape[2]
if numCols == 3 and use4d:
joint_quaternion = quat.unit_q(joint_quaternion.to_array())
else:
assert use4d, "cannot convert to 3d (either change input manually or set use4d==True)"
"""
quaternion_vel = pd.DataFrame(
columns=joint_quaternion.columns, index=joint_quaternion.index
)
for i in list(joint_quaternion.columns.values):
quaternion_vel[i] = sc.signal.savgol_filter(
joint_quaternion[i],
window_length=filter_window,
polyorder=order,
axis=0,
deriv=1,
)
return quaternion_vel
def compute_joint_quaternion_acceleration(joint_quaternion, filter_window=3, order=2):
"""
Computes the joint angular acceleration.
Parameters
----------
joint_quaternion: Pandas dataframe of joint quaternions, matching output of compute_joint_quaternions
filter_window: int
Optional. The length of filter window which needs to be a positive odd integer.
order: int
Optional. Order of the polynomial to fit the data. The order must be less than the filter_window.
Outputs
-------
joint_acc: dataframe of joint angular acceleration.
Rows are time points, columns are multiindex with joint names ['R-Elbow', ...] and quaternion components ['a', 'b', 'c', 'd']
Example
-------
>>> joint_acc = dlc2kinematics.compute_joint_acceleration(joint_angle)
"""
try:
joint_quaternion = | pd.read_hdf(joint_quaternion, "df_with_missing") | pandas.read_hdf |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
import pandas as pd
from scipy import stats
from datetime import datetime
import h5py
import logging
import global_settings
import data_loader
from futures_tools import get_futures_chain, get_futures_actual_ticker, get_generic_futures_hist_data
def construct_inter_commodity_spreads() -> None:
#read_file = os.path.join(global_settings.root_path, 'data/futures_meta.xlsx')
#df_config = pd.read_excel(read_file, keep_default_na=False, sheet_name='Spread')
#df_contracts = pd.read_excel(read_file, keep_default_na=False, sheet_name='Contracts')
df_config = pd.read_csv(os.path.join(global_settings.root_path, 'data/config/inter_comdty_spread_meta.csv'), keep_default_na=False)
df_futures_meta = pd.read_csv(os.path.join(global_settings.root_path, 'data/config/futures_meta.csv'), index_col=0, keep_default_na=False)
df_futures_contracts_meta = pd.read_csv(os.path.join(global_settings.root_path, 'data/config/futures_contract_meta.csv'), index_col=0, keep_default_na=False)
df_futures_contracts_meta['Last_Trade_Date'] = pd.to_datetime(df_futures_contracts_meta['Last_Trade_Date'])
df_futures_contracts_meta = df_futures_contracts_meta.groupby('Root')
start_year = 2000
end_year = datetime.today().year + 30 # 30 year, to be conservative
futures_data_dict = dict()
if os.path.isfile(os.path.join(global_settings.root_path, 'data/futures_historical_prices.h5')):
with h5py.File(os.path.join(global_settings.root_path, 'data/futures_historical_prices.h5'), 'r') as f:
for k in f.keys():
futures_data_dict[k] = None
for k in futures_data_dict.keys():
futures_data_dict[k] = pd.read_hdf(os.path.join(global_settings.root_path, 'data/futures_historical_prices.h5'), key=k)
inter_comdty_spread_hist_data_dict = {}
meta_data_spread = pd.DataFrame(columns=['First_Trade_Date', 'Last_Trade_Date'])
for _, row in df_config.iterrows():
Leg1 = row['Leg1'] + ' ' if len(row['Leg1']) == 1 else row['Leg1']
Leg2 = row['Leg2'] + ' ' if len(row['Leg2']) == 1 else row['Leg2']
Leg3 = row['Leg3'] + ' ' if len(row['Leg3']) == 1 else row['Leg3']
weight1 = float(row['Weight1'])
weight2 = float(row['Weight2'])
gen_month1 = df_futures_meta.loc[Leg1, 'FUT_GEN_MONTH']
gen_month2 = df_futures_meta.loc[Leg2, 'FUT_GEN_MONTH']
common_months = list(set(gen_month1).intersection(set(gen_month2)))
sym_root = '{}:{}:{}:{}:'.format(Leg1, Leg2, round(weight1, 4), round(weight2, 4))
hist_data_spread = pd.DataFrame()
if Leg3: #if isinstance(row['Leg3'], str):
weight3 = float(row['Weight3'])
sym_root = '{}:{}:{}:{}:{}:{}:'.format(Leg1, Leg2, Leg3, round(weight1, 4), round(weight2, 4), round(weight3, 4))
gen_month3 = df_futures_meta.loc[Leg3, 'FUT_GEN_MONTH']
common_months = list(set(common_months).intersection(set(gen_month3)))
# assemble common month, meta_data is the earliest last_trading_day
# name is A:B:C:w1:w2:w3:J06
for yr in range(start_year, end_year + 1):
for mth in common_months:
exist = (Leg1 + mth + str(yr) in df_futures_contracts_meta.get_group(Leg1).index) and (
Leg2 + mth + str(yr) in df_futures_contracts_meta.get_group(Leg2).index)
if Leg3:
exist = exist and (Leg3 + mth + str(yr) in df_futures_contracts_meta.get_group(Leg3).index)
if not exist:
continue
try:
row_dict = {}
s = futures_data_dict[Leg1][Leg1 + mth + str(yr)] * weight1 \
+ futures_data_dict[Leg2][Leg2 + mth + str(yr)] * weight2
if Leg3:
s = s + futures_data_dict[Leg3][Leg3 + mth + str(yr)] * weight3
# row_dict['First_Trade_Date'] = max(
# df_futures_contracts_meta.get_group(Leg1).loc[Leg1 + mth + str(yr), 'First_Trade_Date'],
# df_futures_contracts_meta.get_group(Leg2).loc[Leg2 + mth + str(yr), 'First_Trade_Date'],
# df_futures_contracts_meta.get_group(Leg3).loc[Leg3 + mth + str(yr), 'First_Trade_Date'])
row_dict['Last_Trade_Date'] = min(
df_futures_contracts_meta.get_group(Leg1).loc[Leg1 + mth + str(yr), 'Last_Trade_Date'],
df_futures_contracts_meta.get_group(Leg2).loc[Leg2 + mth + str(yr), 'Last_Trade_Date'],
df_futures_contracts_meta.get_group(Leg3).loc[Leg3 + mth + str(yr), 'Last_Trade_Date'])
else:
# row_dict['First_Trade_Date'] = max(
# df_futures_contracts_meta.get_group(Leg1).loc[Leg1 + mth + str(yr), 'First_Trade_Date'],
# df_futures_contracts_meta.get_group(Leg2).loc[Leg2 + mth + str(yr), 'First_Trade_Date'])
row_dict['Last_Trade_Date'] = min(
df_futures_contracts_meta.get_group(Leg1).loc[Leg1 + mth + str(yr), 'Last_Trade_Date'],
df_futures_contracts_meta.get_group(Leg2).loc[Leg2 + mth + str(yr), 'Last_Trade_Date'])
row_dict['Root'] = sym_root
s.name = sym_root + mth + str(yr)
hist_data_spread = pd.concat([hist_data_spread, s], axis=1, sort=True)
df_2 = pd.DataFrame(row_dict, index=[s.name])
meta_data_spread = meta_data_spread.append(df_2)
logging.debug('{} {} {} constructed'.format(sym_root, yr, mth))
except:
logging.debug('{} {} {} passed'.format(sym_root, yr, mth))
inter_comdty_spread_hist_data_dict[sym_root] = hist_data_spread
inter_comdty_spread_hist_data_dict[sym_root].to_hdf(os.path.join(global_settings.root_path, 'data/inter_comdty_spread_historical_prices.h5'), key=sym_root)
logging.info('{} is constructed'.format(sym_root))
meta_data_spread.sort_values(by='Last_Trade_Date', inplace=True, axis=0, ascending=True)
meta_data_spread.to_csv(os.path.join(global_settings.root_path, 'data/config/inter_comdty_spread_contract_meta.csv'), index=True)
logging.info('commodity_inter_spread saved')
def construct_comdty_generic_hist_prices() -> None:
"""
construct generic prices series on the fly
:return:
"""
generic_futures_hist_prices_dict = {}
df_futures_meta = pd.read_csv(os.path.join(global_settings.root_path, 'data/config/futures_meta.csv'), index_col=0)
df_futures_meta = df_futures_meta[~np.isnan(df_futures_meta['QuandlMultiplier'])]
df_futures_contracts_meta = pd.read_csv(os.path.join(global_settings.root_path, 'data/config/futures_contract_meta.csv'), index_col=0, keep_default_na=False)
df_futures_contracts_meta['Last_Trade_Date'] = pd.to_datetime(df_futures_contracts_meta['Last_Trade_Date'])
df_futures_contracts_meta = df_futures_contracts_meta.groupby('Root')
futures_data_dict = dict()
if os.path.isfile(os.path.join(global_settings.root_path, 'data/futures_historical_prices.h5')):
with h5py.File(os.path.join(global_settings.root_path, 'data/futures_historical_prices.h5'), 'r') as f:
for k in f.keys():
futures_data_dict[k] = None
for k in futures_data_dict.keys():
futures_data_dict[k] = pd.read_hdf(os.path.join(global_settings.root_path, 'data/futures_historical_prices.h5'), key=k)
for idx, _ in df_futures_meta.iterrows():
root_sym = idx
try:
gen = get_generic_futures_hist_data(futures_data_dict[root_sym], df_futures_contracts_meta.get_group(root_sym))
generic_futures_hist_prices_dict[root_sym] = gen
generic_futures_hist_prices_dict[root_sym].to_hdf(os.path.join(global_settings.root_path, 'data/futures_generic_historical_prices.h5'), key=root_sym)
logging.info('{} generic prices generated'.format(root_sym))
except:
logging.error('{} failed to generate generic prices'.format(root_sym))
def construct_inter_comdty_generic_hist_prices() -> None:
"""
construct generic prices series on the fly
:return:
"""
generic_inter_comdty_hist_prices_dict = {}
df_futures_contracts_meta = pd.read_csv(os.path.join(global_settings.root_path, 'data/config/inter_comdty_spread_contract_meta.csv'), index_col=0, keep_default_na=False)
df_futures_contracts_meta['Last_Trade_Date'] = pd.to_datetime(df_futures_contracts_meta['Last_Trade_Date'])
df_futures_contracts_meta = df_futures_contracts_meta.groupby('Root')
inter_comdty_spread_hist_data_dict = dict()
if os.path.isfile(os.path.join(global_settings.root_path, 'data/inter_comdty_spread_historical_prices.h5')):
with h5py.File(os.path.join(global_settings.root_path, 'data/inter_comdty_spread_historical_prices.h5'), 'r') as f:
for k in f.keys():
inter_comdty_spread_hist_data_dict[k] = None
for k in inter_comdty_spread_hist_data_dict.keys():
inter_comdty_spread_hist_data_dict[k] = pd.read_hdf(os.path.join(global_settings.root_path, 'data/inter_comdty_spread_historical_prices.h5'), key=k)
for root_sym, group in df_futures_contracts_meta:
try:
# gen = get_generic_futures_hist_data(inter_comdty_spread_hist_data_dict[root_sym], df_futures_contracts_meta.get_group(root_sym))
gen = get_generic_futures_hist_data(inter_comdty_spread_hist_data_dict[root_sym], group)
generic_inter_comdty_hist_prices_dict[root_sym] = gen
generic_inter_comdty_hist_prices_dict[root_sym].to_hdf(os.path.join(global_settings.root_path, 'data/inter_comdty_spread_generic_historical_prices.h5'), key=root_sym)
logging.info('{} generic prices generated'.format(root_sym))
except:
logging.error('{} failed to generate generic prices'.format(root_sym))
def construct_curve_spread_fly():
# cache_dir = os.path.dirname(os.path.realpath(__file__))
_, futures_contracts_meta_df, _, inter_comdty_spread_contracts_meta_df = data_loader.load_futures_meta_data()
futures_hist_prices_dict, _ = data_loader.load_futures_hist_prices()
generic_futures_hist_prices_dict = data_loader.load_comdty_generic_hist_prices()
inter_comdty_spread_hist_data_dict = data_loader.load_inter_comdty_spread_hist_prices()
generic_inter_comdty_hist_prices_dict = data_loader.load_inter_comdty_generic_hist_prices()
combined_root_syms = list(generic_futures_hist_prices_dict.keys())
combined_root_syms.extend(list(generic_inter_comdty_hist_prices_dict.keys()))
# get spread/fly for outright and inter-comdty-spread
for sym_root in combined_root_syms:
if ':' in sym_root:
hist_data = inter_comdty_spread_hist_data_dict[sym_root]
meta_data = inter_comdty_spread_contracts_meta_df[inter_comdty_spread_contracts_meta_df['Root'] == sym_root]
meta_data.sort_values('Last_Trade_Date', inplace=True)
generic_data = generic_inter_comdty_hist_prices_dict[sym_root]
else:
hist_data = futures_hist_prices_dict[sym_root]
meta_data = futures_contracts_meta_df[futures_contracts_meta_df['Root'] == sym_root]
meta_data.sort_values('Last_Trade_Date', inplace=True)
generic_data = generic_futures_hist_prices_dict[sym_root]
try:
asofdate = hist_data.index[-1]
except: # probably no data
continue
meta_data = get_futures_chain(meta_data, asofdate)
# get spread combos
spread_combos = []
tenors = range(1, generic_data.shape[1] + 1)
for i in tenors:
for j in tenors:
spread = j - i
if i <= 24 and j > i and spread <= 12:
spread_combos.append((i, j))
fly_combos = []
tenors = range(1, generic_data.shape[1] + 1)
for i in tenors:
for j in tenors:
spread1 = j - i
for k in tenors:
spread2 = k - j
if i <= 24 and j > i and k > j and spread1 <= 12 and spread2 <= 12 and spread1 == spread2:
fly_combos.append((i, j, k,))
cols_spread = ['Name', 'Leg1', 'Leg2', 'Leg1 Actual', 'Leg2 Actual', 'Spread', 'Spread Prcnt', 'RD Prcnt', 'Spread Z-Score', 'RD Z-Score']
df_spread_stats = | pd.DataFrame(columns=cols_spread) | pandas.DataFrame |
import pandas as pd
def get_sets(instance, varname):
"""Get sets that belong to a pyomo Variable or Param
:param instance: Pyomo Instance
:param varname: Name of the Pyomo Variable (string)
:return: A list with the sets that belong to this Param
"""
var = getattr(instance, varname)
if var.dim() > 1:
sets = [pset.getname() for pset in var._index.subsets()]
else:
sets = [var._index.name]
return sets
def get_set_members(instance, sets):
"""Get set members relative to a list of sets
:param instance: Pyomo Instance
:param sets: List of strings with the set names
:return: A list with the set members
"""
sm = []
for s in sets:
sm.append([v for v in getattr(instance, s).data()])
return sm
def pyomo_to_pandas(instance, varname, dates=None):
"""
Function converting a pyomo variable or parameter into a pandas dataframe.
The variable must have one or two dimensions and the sets must be provided as a list of lists
:param instance: Pyomo model instance
:param varname: Name of the Pyomo Variable (string)
:param dates: List of datetimes or pandas DatetimeIndex
"""
setnames = get_sets(instance, varname)
sets = get_set_members(instance, setnames)
var = getattr(instance, varname) # Previous script used model.var instead of var
####
if len(sets) != var.dim():
raise ValueError(
"The number of provided set lists ("
+ str(len(sets))
+ ") does not match the dimensions of the variable ("
+ str(var.dim())
+ ")"
)
if var.dim() == 1:
[SecondSet] = sets
out = | pd.DataFrame(columns=[var.name], index=SecondSet) | pandas.DataFrame |
r"""
Created on 21/9/2021 5:52 PM
@author: jiahuei
streamlit run streamlit_plotly.py
"""
import pandas as pd
import plotly.express as px
import streamlit as st
# from numpy.polynomial import Polynomial as T
def main():
st.title("Plotly Demo: COVID-19 Vaccination Data Visualisation")
# CSV data file
upload_help = "Provide the OWID CSV file"
uploaded_file = st.sidebar.file_uploader(upload_help)
if uploaded_file is None:
st.info(f"{upload_help}, by uploading it in the sidebar")
return
# Convert to Pandas DF
df_raw = pd.read_csv(uploaded_file)
df = df_raw.copy()
df["date"] = pd.to_datetime(df["date"])
# Filter by date
key = "people_fully_vaccinated_per_hundred"
df = df.loc[df[key].dropna().index]
date = st.slider(
"Select a date",
min_value=df["date"].min().to_pydatetime(),
max_value=df["date"].max().to_pydatetime(),
value=pd.to_datetime("today").normalize().to_pydatetime(),
step=None,
)
df = df.loc[pd.to_datetime(date) - df["date"] <= | pd.Timedelta(14, unit="d") | pandas.Timedelta |
import numpy as np
import pandas as pd
import pylab as plt
file_path = 'files/'
import pickle
from scipy.stats import binom
from sklearn.cluster import KMeans
import pathlib
path = pathlib.Path.cwd()
if path.stem == 'depth_norm':
cwd = path
else:
cwd = list(path.parents)[::-1][path.parts.index('depth_norm')]
import sys
sys.path.append(str(cwd))
##TCGA data used
sample_df = pickle.load(open(cwd / 'files' / 'tcga_public_sample_table.pkl', 'rb'))
##https://gdc.cancer.gov/about-data/publications/pancanatlas
tcga_master_calls = pd.read_csv(cwd / 'files' / 'TCGA_mastercalls.abs_tables_JSedit.fixed.txt', sep='\t')
tcga_master_calls['sample_id'] = tcga_master_calls['sample'].apply(lambda x: x[:16])
tcga_master_calls = tcga_master_calls.loc[tcga_master_calls['call status'] == 'called']
tcga_master_calls = tcga_master_calls.groupby('sample_id')['purity'].mean().to_frame().reset_index()
sample_df = pd.merge(sample_df, tcga_master_calls, left_on='bcr_sample_barcode', right_on='sample_id', how='inner')
tcga_maf = pickle.load(open(cwd / 'files' / 'tcga_public_maf.pkl', 'rb'))
tcga_maf['vaf'] = tcga_maf['t_alt_count'].values / (tcga_maf['t_alt_count'].values + tcga_maf['t_ref_count'].values)
tcga_maf['depth'] = tcga_maf['t_ref_count'] + tcga_maf['t_alt_count']
tcga_maf['sample_id'] = tcga_maf['Tumor_Sample_Barcode'].apply(lambda x: x[:16])
cols = ['sample', 'subclonal.ix']
##https://gdc.cancer.gov/about-data/publications/pancan-aneuploidy
clonality_maf = pd.read_csv(cwd / 'files' / 'TCGA_consolidated.abs_mafs_truncated.fixed.txt', sep='\t', usecols=cols, low_memory=False)
result = clonality_maf.groupby('sample')['subclonal.ix'].apply(lambda x: sum(x) / len(x)).to_frame().reset_index()
del clonality_maf
sample_df = | pd.merge(sample_df, result, left_on='Tumor_Sample_Barcode', right_on='sample', how='inner') | pandas.merge |
# -*- coding: utf-8 -*-
"""This script generates a sklearn random forest
model that predict values that were created using the
formula (a + b) / 2
The two arrays a and b must have values with range [0, 256].
"""
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
import pandas as pd
from sklearn.externals import joblib
def compute_efficiency(model_result, measurement):
diff = model_result - measurement
eff = 1- sum(diff * diff)/((measurement.var()) * len(measurement))
return(eff)
print("Create training data")
# Train a value adder that represents the formula (a + b) / 2
a = np.random.randint(-1, 257, 1000)
b = np.random.randint(-1, 257, 1000)
# Create the predicting data that is used for training
c = (a + b)/2
# Cast to integer
y = c.astype(int)
print("Train random forest model")
model = RandomForestRegressor(n_estimators=100, max_depth=12, max_features="log2", n_jobs=16,
min_samples_split=2, min_samples_leaf=1, verbose=0)
# This is the training data with two arrays
X = | pd.DataFrame() | pandas.DataFrame |
# %%
import json
import requests
import pandas as pd
import matplotlib.pyplot as plt
from Exceptions import APIException
from Exceptions import APIRequestException
class DefiLlama:
data = None
API_URL = "https://api.llama.fi"
DEFAULT_TIMEOUT = 10
def __init__(self, proxies = None):
self._session = self._init_session(proxies)
@staticmethod
def _init_session(proxies):
session = requests.session()
if proxies is not None:
session.proxies.update(proxies)
return session
def close(self):
self._session.close()
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
def _request(self, method, path, **kwargs):
uri = "{}/{}".format(self.API_URL, path)
kwargs["timeout"] = kwargs.get("timeout", self.DEFAULT_TIMEOUT)
kwargs["params"] = self._format_params(kwargs.get("params", {}))
response = getattr(self._session, method)(uri, **kwargs)
return self._handle_response(response)
def _handle_response(self, response):
if not response.ok:
raise APIException(response)
try:
content_type = response.headers.get('Content-Type', '')
if 'application/json' in content_type:
return response.json()
if 'text/csv' in content_type:
return response.text
if 'text/plain' in content_type:
return response.text
raise APIRequestException("Invalid Response: {}".format(response.text))
except ValueError:
raise APIRequestException("Invalid Response: {}".format(response.text))
@staticmethod
def _format_params(params):
return {k: json.dumps(v) if isinstance(v, bool) else v for k, v in params.items()}
def _get(self, path, **kwargs):
return self._request("get", path, **kwargs)
def _post(self, path, **kwargs):
return self._request("post", path, **kwargs)
#GET GENERIC FUNCTION
def get_generic(self, endpoint = "", **params):
return self._get(endpoint, params=params)
#TVL
##Get list all DeFi protocols across all blockchains
def protocols(self, output_df = True):
data = self._get("/protocols")
if output_df == True:
df = pd.DataFrame(data)
df.set_index('name', inplace=True)
self.data = df
else:
self.data = data
return self.data
##Get metrics and historic TVL for one DeFi dApp
def protocol(self, protocol, output_df = True):
data = self._get("/protocol/" + protocol)
if output_df == True:
tvl = pd.DataFrame(data["tvl"])
tvl.date = pd.to_datetime(tvl.date, unit='s')
tvl = tvl.set_index('date')
del data['tvl']
chain_tvls = {}
for k, v in data["chainTvls"].items():
chain_tvls[k] = | pd.DataFrame(v["tvl"]) | pandas.DataFrame |
import pandas as pd
from tkinter.filedialog import asksaveasfilename
import tkinter as tk
def saveDialog(self):
'''Saving data from add page in form of excel file'''
values = []
for child in self.display.get_children():
values.append(self.display.item(child)["values"] ) # appending the value of treeview, ["values"] will append the value without datatype information
print(values)
df = | pd.DataFrame(values) | pandas.DataFrame |
"""Python调用天软的封装"""
import sys
sys.path.append(r"D:\programs\Analyse.NET")
import pandas as pd
import TSLPy3 as tsl
import os
from FactorLib.utils.tool_funcs import tradecode_to_tslcode, tslcode_to_tradecode
from FactorLib.utils.datetime_func import DateRange2Dates
from FactorLib.utils.TSDataParser import *
from functools import reduce, partial
_ashare = "'上证A股;深证A股;创业板;中小企业板;科创板;暂停上市;终止上市'"
_fund = "'上证基金;深证基金;开放式基金'"
_condition = 'firstday()<=getsysparam(pn_date())'
def _gstr_from_func(func_name, func_args):
func_str = "data := {func_name}({args}); return data;".format(func_name=func_name, args=",".join(func_args))
return func_str
def encode_datetime(dt):
return tsl.EncodeDateTime(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, 0)
def decode_date(dt):
dt_decode = tsl.DecodeDate(dt)
return pd.Timestamp(*dt_decode)
def run_script(script, sysparams):
data = tsl.RemoteExecute(script, sysparams)
return data
def run_function(func_name, *args, **kwargs):
"""
调用天软任意函数
Parameters:
func_name: str
函数名称
args:
函数参数列表
字符串类型的参数需要加单引号,例如"'SH600001'"
kwargs:
天软系统参数
"""
script_str = _gstr_from_func(func_name, args)
return run_script(script_str, kwargs)
def CsQuery(field_dict, end_date, bk_name=_ashare, stock_list=None, condition="1",
code_transfer=True, **kwargs):
"""对天软Query函数的封装
Parameters:
===========
field_dict: dict
计算字段{字段名称: 函数名称},字段名称需自加单引号。
例:{"'IDs'": 'DefaultStockID()'}
end_daet: str
截面日期
bk_name: str
天软板块名称,以分号分割。
stock_list: list
股票列表,默认的格式是6位代码
condition: str
天软Csquery参数
code_transfer: bool
是否要将股票列表的代码转为天软格式。如果stock_list中代码格式
没有后缀,那code_transfer需为True。
kwargs: dict
天软系统参数
"""
field_dict.update({"'IDs'": 'DefaultStockID()'})
if stock_list is None:
stock_list = "''"
else:
if code_transfer:
stock_list = "'%s'" % ";".join(map(tradecode_to_tslcode, stock_list))
else:
stock_list = "'%s'" % ";".join(stock_list)
if (end_date.hour == 0) and (end_date.minute == 0) and (end_date.second == 0):
encode_date = tsl.EncodeDate(end_date.year, end_date.month, end_date.day)
else:
encode_date = tsl.EncodeDateTime(end_date.year, end_date.month, end_date.day,
end_date.hour, end_date.minute, end_date.second, 0)
func_name = "Query"
func_args = [bk_name, stock_list, condition, "''"] + list(reduce(lambda x, y: x+y, field_dict.items()))
script_str = _gstr_from_func(func_name, func_args)
sysparams = {'CurrentDate': encode_date}
sysparams.update(kwargs)
data = tsl.RemoteExecute(script_str, sysparams)
df = parse2DArray(data, column_decode=['IDs'])
df['IDs'] = df['IDs'].apply(tslcode_to_tradecode)
df['date'] = end_date
return df.set_index(['date', 'IDs'])
def TsQuery(field_dict, dates, stock, code_transfer=True, **kwargs):
"""
天软时间序列函数
"""
field_dict.update({"'date'": 'DateTimeToStr(sp_time())', "'IDs'": 'DefaultStockID()'})
if code_transfer:
stock = tradecode_to_tslcode(stock)
N = len(dates)
func_args = [str(N)] + list(reduce(lambda x, y: x+y, field_dict.items()))
func_name = "Nday"
script_str = _gstr_from_func(func_name, func_args)
end_date = max(dates)
if (end_date.hour == 0) and (end_date.minute == 0) and (end_date.second == 0):
encode_date = tsl.EncodeDate(end_date.year, end_date.month, end_date.day)
else:
encode_date = tsl.EncodeDateTime(end_date.year, end_date.month, end_date.day,
end_date.hour, end_date.minute, end_date.second, 0)
sysparams = {'CurrentDate': encode_date, 'StockID': stock}
sysparams.update(kwargs)
data = tsl.RemoteExecute(script_str, sysparams)
df = parse2DArray(data, column_decode=['IDs', 'date'])
df['IDs'] = df['IDs'].apply(tslcode_to_tradecode)
df['date'] = pd.DatetimeIndex(df['date'])
return df.set_index(['date', 'IDs'])
def CsQueryMultiFields(field_dict, end_date, bk_name=_ashare, stock_list=None,
condition="1", code_transfer=True, **kwargs):
"""天软Query函数封装
与CsQuery()的不同是,此函数对每只股票提取的字段数量大于1。
"""
field_dict.update({"'IDs'": 'DefaultStockID()'})
if stock_list is None:
stock_list = "''"
else:
if code_transfer:
stock_list = "'%s'" % ";".join(map(tradecode_to_tslcode, stock_list))
else:
stock_list = "'%s'" % ";".join(stock_list)
if (end_date.hour == 0) and (end_date.minute == 0) and (end_date.second == 0):
encode_date = tsl.EncodeDate(end_date.year, end_date.month, end_date.day)
else:
encode_date = tsl.EncodeDateTime(end_date.year, end_date.month, end_date.day,
end_date.hour, end_date.minute, end_date.second, 0)
func_name = "Query"
func_args = [bk_name, stock_list, condition, "''"] + list(reduce(lambda x, y: x + y, field_dict.items()))
script_str = _gstr_from_func(func_name, func_args)
sysparams = {'CurrentDate': encode_date}
sysparams.update(kwargs)
data = tsl.RemoteExecute(script_str, sysparams)
df = parseByStock(data)
return df
@DateRange2Dates
def PanelQuery(field_dict, start_date=None, end_date=None, dates=None,
bk_name=_ashare, stock_list=None, condition="1",
code_transfer=True, **kwargs):
"""对天软Query函数的封装
Parameters:
===========
field_dict:
"""
data = [None] * len(dates)
for i, date in enumerate(dates):
print("天软函数执行日期: %s" % date.strftime("%Y%m%d"))
idata = CsQuery(field_dict, date, bk_name=bk_name, stock_list=stock_list, condition=condition,
code_transfer=code_transfer, **kwargs)
data[i] = idata
return pd.concat(data).sort_index().reindex(dates, level='date')
@DateRange2Dates
def PanelQueryByStocks(field_dict, stocks, start_date=None, end_date=None, dates=None,
code_transfer=True, **kwargs):
data = [None] * len(stocks)
for i, s in enumerate(stocks):
print("天软函数执行代码: %s" % s)
idata = TsQuery(field_dict, dates, s, code_transfer, **kwargs)
data[i] = idata
return pd.concat(data).sort_index()
def partialCsQueryFunc(*args, **kwargs):
"""CsQuery的偏函数"""
return partial(CsQuery, *args, **kwargs)
def _read_factors():
file_pth = os.path.abspath(os.path.dirname(__file__)+'/..')
file_pth = os.path.join(file_pth, 'resource', 'tsl_tableinfo.xlsx')
factorIDs = | pd.read_excel(file_pth, index_col=0, header=0) | pandas.read_excel |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import numbers
import warnings
from copy import deepcopy
import re
from bokeh.plotting import figure
import pandas as pd
import numpy as np
from bokeh.models import (
HoverTool,
ColumnDataSource,
DatetimeTickFormatter,
LinearColorMapper,
LogColorMapper,
CategoricalColorMapper,
ColorBar,
FuncTickFormatter,
WheelZoomTool,
)
from bokeh.models.tickers import FixedTicker
from bokeh.palettes import all_palettes, Inferno256
from bokeh.models.ranges import FactorRange
from bokeh.transform import dodge, cumsum
from bokeh.core.properties import value as _value
from bokeh.models.glyphs import Text
from bokeh.models.callbacks import CustomJS
from bokeh.events import Tap
from pandas.plotting._core import BasePlotMethods
from .base import show, embedded_html
from .geoplot import geoplot
def check_type(data):
"""Checks type of provided data array."""
if isinstance(data[0], numbers.Number):
return "numeric"
elif isinstance(data[0], (np.datetime64, datetime.datetime, datetime.date)):
return "datetime"
else:
return "object"
def get_colormap(colormap, N_cols):
"""Returns a colormap with <N_cols> colors. <colormap> can be either None,
a string with the name of a Bokeh color palette or a list/tuple of colors."""
if colormap is None:
if N_cols <= 10:
colormap = all_palettes["Category10"][10][:N_cols]
elif N_cols <= 20:
colormap = all_palettes["Category20"][N_cols]
else:
colormap = all_palettes["Category20"][20] * int(N_cols / 20 + 1)
colormap = colormap[:N_cols]
elif isinstance(colormap, str):
if colormap in all_palettes:
colormap = all_palettes[colormap]
max_key = max(colormap.keys())
if N_cols <= max_key:
colormap = colormap[N_cols]
else:
colormap = colormap[max_key]
colormap = colormap * int(N_cols / len(colormap) + 1)
colormap = colormap[:N_cols]
else:
raise ValueError(
"Could not find <colormap> with name %s. The following predefined colormaps are supported (see also https://bokeh.pydata.org/en/latest/docs/reference/palettes.html ): %s"
% (colormap, list(all_palettes.keys()))
)
elif isinstance(colormap, (list, tuple)):
colormap = colormap * int(N_cols / len(colormap) + 1)
colormap = colormap[:N_cols]
else:
raise ValueError(
"<colormap> can onyl be None, a name of a colorpalette as string( see https://bokeh.pydata.org/en/latest/docs/reference/palettes.html ) or a list/tuple of colors."
)
return colormap
def _times_to_string(times):
types = []
for t in times:
t = pd.to_datetime(t)
if t.microsecond > 0:
types.append("microsecond")
elif t.second > 0:
types.append("second")
elif t.hour > 0:
types.append("hour")
else:
types.append("date")
if "microsecond" in types:
return [pd.to_datetime(t).strftime("%Y/%m/%d %H:%M:%S.%f") for t in times]
elif "second" in types:
return [pd.to_datetime(t).strftime("%Y/%m/%d %H:%M:%S") for t in times]
elif "hour" in types:
return [pd.to_datetime(t).strftime("%Y/%m/%d %H:%M") for t in times]
elif "date" in types:
return [pd.to_datetime(t).strftime("%Y/%m/%d") for t in times]
def plot(
df_in,
x=None,
y=None,
kind="line",
figsize=None,
use_index=True,
title="",
grid=None, # TODO:
legend="top_right",
logx=False,
logy=False,
xlabel=None,
ylabel=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
fontsize=None, # TODO:
color=None,
colormap=None,
category=None,
histogram_type=None,
stacked=False,
weights=None,
bins=None,
normed=False,
cumulative=False,
show_average=False,
plot_data_points=False,
plot_data_points_size=5,
number_format=None,
disable_scientific_axes = None,
show_figure=True,
return_html=False,
panning=True,
zooming=True,
toolbar_location="right",
hovertool=True,
hovertool_string=None,
vertical_xlabel=False,
webgl=True,
**kwargs
):
"""Method for creating a interactive with 'Bokeh' as plotting backend. Available
plot kinds are:
* line
* point
* scatter
* bar / barh
* hist
* area
* pie
* map
Examples
--------
>>> df.plot_bokeh.line()
>>> df.plot_bokeh.scatter(x='x',y='y')
These plotting methods can also be accessed by calling the accessor as a
method with the ``kind`` argument (except of "map" plot):
``df.plot_bokeh(kind='line')`` is equivalent to ``df.plot_bokeh.line()``
For more information about the individual plot kind implementations, have a
look at the underlying method accessors (like df.plot_bokeh.line) or visit
https://github.com/PatrikHlobil/Pandas-Bokeh.
"""
if kind == "map":
raise ValueError("Mapplots can only be plotted using the accessor methods. Please use df.plot_bokeh.map(...) instead of df.plot_bokeh(kind='map', ...).")
# Make a local copy of the DataFrame:
df = df_in.copy()
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
# Get and check options for base figure:
figure_options = {
"title": title,
"toolbar_location": toolbar_location,
"active_scroll": "wheel_zoom",
"plot_width": 600,
"plot_height": 400,
"output_backend": "webgl",
}
if not figsize is None:
width, height = figsize
figure_options["plot_width"] = width
figure_options["plot_height"] = height
if logx:
figure_options["x_axis_type"] = "log"
if logy:
figure_options["y_axis_type"] = "log"
if not xlabel is None:
figure_options["x_axis_label"] = xlabel
if not ylabel is None:
figure_options["y_axis_label"] = ylabel
if not xlim is None:
if not isinstance(xlim, (tuple, list)):
raise ValueError("<xlim> must be a list/tuple of form (x_min, x_max).")
elif len(xlim) != 2:
raise ValueError("<xlim> must be a list/tuple of form (x_min, x_max).")
else:
figure_options["x_range"] = xlim
if not ylim is None:
if not isinstance(ylim, (tuple, list)):
raise ValueError("<ylim> must be a list/tuple of form (y_min, y_max).")
elif len(ylim) != 2:
raise ValueError("<ylim> must be a list/tuple of form (y_min, y_max).")
else:
figure_options["y_range"] = ylim
if webgl:
figure_options["output_backend"] = "webgl"
if number_format is None:
number_format = ""
else:
number_format = "{%s}"%number_format
# Check plot kind input:
allowed_kinds = [
"line",
"step",
"point",
"scatter",
"bar",
"barh",
"hist",
"area",
"pie",
"map",
]
if kind not in allowed_kinds:
allowed_kinds = "', '".join(allowed_kinds)
raise ValueError("Allowed plot kinds are '%s'." % allowed_kinds)
# Check hovertool_string and define additional columns to keep in source:
additional_columns = []
if hovertool_string is not None:
if not isinstance(hovertool_string, str):
raise ValueError("<hovertool_string> can only be None or a string.")
# Search for hovertool_string columns in DataFrame:
for s in re.findall("@[^\s\{]+", hovertool_string):
s = s[1:]
if s in df.columns:
additional_columns.append(s)
for s in re.findall("@\{.+\}", hovertool_string):
s = s[2:-1]
if s in df.columns:
additional_columns.append(s)
# Set standard linewidth:
if "line_width" not in kwargs:
kwargs["line_width"] = 2
# Get x-axis Name and Values:
delete_in_y = None
if not x is None:
if issubclass(x.__class__, pd.Index) or issubclass(x.__class__, pd.Series):
if x.name is not None:
name = str(x.name)
else:
name = ""
x = x.values
elif x in df.columns:
delete_in_y = x
name = str(x)
x = df[x].values
elif isinstance(x, (tuple, list, type(np.array))):
if len(x) == len(df):
x = x
name = ""
else:
raise Exception(
"Length of provided <x> argument does not fit length of DataFrame or Series."
)
else:
raise Exception(
"Please provide for the <x> parameter either a column name of the DataFrame/Series or an array of the same length."
)
else:
if use_index:
x = df.index.values
if not df.index.name is None:
name = str(df.index.name)
else:
name = ""
else:
x = np.linspace(0, len(df) - 1, len(df))
name = ""
# Define name of axis of x-values (for horizontal plots like barh, this corresponds
# to y-axis):
if kind == "barh":
if "y_axis_label" not in figure_options:
figure_options["y_axis_label"] = name
else:
if "x_axis_label" not in figure_options:
figure_options["x_axis_label"] = name
# Check type of x-axis:
if check_type(x) == "datetime":
figure_options["x_axis_type"] = "datetime"
xaxis_type = "datetime"
if not xlim is None:
starttime, endtime = xlim
try:
starttime = pd.to_datetime(starttime)
except:
raise ValueError("Could not parse x_min input of <xlim> as datetime.")
try:
endtime = pd.to_datetime(endtime)
except:
raise ValueError("Could not parse x_max input of <xlim> as datetime.")
figure_options["x_range"] = (starttime, endtime)
elif check_type(x) == "numeric":
xaxis_type = "numerical"
else:
xaxis_type = "categorical"
if kind in ["bar", "barh", "pie"]:
xaxis_type = "categorical"
x_old = x
x_labels_dict = None
if xaxis_type == "categorical":
if check_type(x) == "datetime":
x = _times_to_string(x)
else:
x = [str(el) for el in x]
if kind != "hist":
x_labels_dict = dict(zip(range(len(x)), x))
x = list(range(len(x)))
if "x_axis_type" in figure_options:
del figure_options["x_axis_type"]
# Determine data cols to plot (only plot numeric data):
if y is None:
cols = df.columns
elif not isinstance(y, (list, tuple)):
cols = [y]
else:
cols = y
data_cols = []
for i, col in enumerate(cols):
if col not in df.columns:
raise Exception(
"Could not find '%s' in the columns of the provided DataFrame/Series. Please provide for the <y> parameter either a column name of the DataFrame/Series or an array of the same length."
% col
)
if np.issubdtype(df[col].dtype, np.number):
data_cols.append(col)
if len(data_cols) == 0:
raise Exception("No numeric data columns found for plotting.")
# Convert y-column names into string representation:
df.rename(columns={col: str(col) for col in data_cols}, inplace=True)
data_cols = [str(col) for col in data_cols]
# Delete x column if it appears in y columns:
if not delete_in_y is None:
if delete_in_y in data_cols:
data_cols.remove(delete_in_y)
N_cols = len(data_cols)
if len(data_cols) == 0:
raise Exception(
"The only numeric column is the column %s that is already used on the x-axis."
% delete_in_y
)
# Autodetect y-label if no y-label is provided by user and only one y-column exists:
if N_cols == 1:
if kind == "barh":
if "x_axis_label" not in figure_options:
figure_options["x_axis_label"] = data_cols[0]
else:
if "y_axis_label" not in figure_options:
figure_options["y_axis_label"] = data_cols[0]
# Get Name of x-axis data:
if kind == "barh":
xlabelname = (
figure_options["y_axis_label"]
if figure_options.get("y_axis_label", "") != ""
else "x"
)
else:
xlabelname = (
figure_options["x_axis_label"]
if figure_options.get("x_axis_label", "") != ""
else "x"
)
# Create Figure for plotting:
p = figure(**figure_options)
if "x_axis_type" not in figure_options:
figure_options["x_axis_type"] = None
# For categorical plots, set the xticks:
if x_labels_dict is not None:
p.xaxis.formatter = FuncTickFormatter(
code="""
var labels = %s;
return labels[tick];
"""
% x_labels_dict
)
# Define ColumnDataSource for Plot if kind != "hist":
if kind != "hist":
source = {col: df[col].values for col in data_cols}
source["__x__values"] = x
source["__x__values_original"] = x_old
for kwarg, value in kwargs.items():
if value in df.columns:
source[value] = df[value].values
for add_col in additional_columns:
source[add_col] = df[add_col].values
# Define colormap
if kind not in ["scatter", "pie"]:
colormap = get_colormap(colormap, N_cols)
if not color is None:
colormap = get_colormap([color], N_cols)
# Add Glyphs to Plot:
if kind == "line":
p = lineplot(
p,
source,
data_cols,
colormap,
hovertool,
xlabelname,
figure_options["x_axis_type"],
plot_data_points,
plot_data_points_size,
hovertool_string,
number_format,
**kwargs
)
if kind == "step":
p = stepplot(
p,
source,
data_cols,
colormap,
hovertool,
xlabelname,
figure_options["x_axis_type"],
plot_data_points,
plot_data_points_size,
hovertool_string,
number_format,
**kwargs
)
if kind == "point":
p = pointplot(
p,
source,
data_cols,
colormap,
hovertool,
hovertool_string,
xlabelname,
figure_options["x_axis_type"],
number_format,
**kwargs
)
if kind == "scatter":
if N_cols > 2:
raise Exception(
"For scatterplots <x> and <y> values can only be a single column of the DataFrame, not a list of columns. Please specify both <x> and <y> columns for a scatterplot uniquely."
)
# Get and set y-labelname:
y_column = data_cols[0]
if "y_axis_label" not in figure_options:
p.yaxis.axis_label = y_column
# Get values for y-axis:
y = df[y_column].values
# Get values for categorical colormap:
category_values = None
if category in df.columns:
category_values = df[category].values
elif not category is None:
raise Exception(
"<category> parameter has to be either None or the name of a single column of the DataFrame"
)
scatterplot(
p,
df,
x,
x_old,
y,
category,
category_values,
colormap,
hovertool,
hovertool_string,
additional_columns,
x_axis_type=figure_options["x_axis_type"],
xlabelname=xlabelname,
ylabelname=y_column,
**kwargs
)
if kind == "bar" or kind == "barh":
# Define data source for barplot:
data = {col: df[col].values for col in data_cols}
data["__x__values"] = x
data["__x__values_original"] = x_old
source = ColumnDataSource(data)
for kwarg, value in kwargs.items():
if value in df.columns:
source.data[value] = df[value].values
for add_col in additional_columns:
source.data[add_col] = df[add_col].values
# Create Figure (just for categorical barplots):
del figure_options["x_axis_type"]
if "y_axis_label" not in figure_options and kind == "barh":
figure_options["y_axis_label"] = xlabelname
p = figure(**figure_options)
figure_options["x_axis_type"] = None
# Set xticks:
if kind == "bar":
p.xaxis.formatter = FuncTickFormatter(
code="""
var labels = %s;
return labels[tick];
"""
% x_labels_dict
)
elif kind == "barh":
p.yaxis.formatter = FuncTickFormatter(
code="""
var labels = %s;
return labels[tick];
"""
% x_labels_dict
)
if not stacked:
if N_cols >= 3:
base_width = 0.5
else:
base_width = 0.35
width = base_width / (N_cols - 0.5)
if N_cols == 1:
shifts = [0]
else:
delta_shift = base_width / (N_cols - 1)
shifts = [-base_width / 2 + i * delta_shift for i in range(N_cols)]
for i, name, color, shift in zip(
range(N_cols), data_cols, colormap, shifts
):
if kind == "bar":
glyph = p.vbar(
x=dodge("__x__values", shift, range=p.x_range),
top=name,
width=width,
source=source,
color=color,
legend=" " + name,
**kwargs
)
hovermode = "vline"
elif kind == "barh":
glyph = p.hbar(
y=dodge("__x__values", shift, range=p.y_range),
right=name,
height=width,
source=source,
color=color,
legend=" " + name,
**kwargs
)
hovermode = "hline"
if hovertool:
my_hover = HoverTool(mode=hovermode, renderers=[glyph])
if hovertool_string is None:
my_hover.tooltips = [
(xlabelname, "@__x__values_original"),
(name, "@{%s}" % name),
]
else:
my_hover.tooltips = hovertool_string
p.add_tools(my_hover)
if stacked:
legend_ref = [_value(col) for col in data_cols]
if kind == "bar":
glyph = p.vbar_stack(
data_cols,
x="__x__values",
width=0.8,
source=source,
color=colormap,
legend=legend_ref,
**kwargs
)
hovermode = "vline"
elif kind == "barh":
glyph = p.hbar_stack(
data_cols,
y="__x__values",
height=0.8,
source=source,
color=colormap,
legend=legend_ref,
**kwargs
)
hovermode = "hline"
if hovertool:
my_hover = HoverTool(mode=hovermode, renderers=[glyph[-1]])
if hovertool_string is None:
my_hover.tooltips = [(xlabelname, "@__x__values_original")] + [
(col, "@{%s}" % col) for col in data_cols
]
else:
my_hover.tooltips = hovertool_string
p.add_tools(my_hover)
if kind == "hist":
# Disable line_color (for borders of histogram bins) per default:
if not "line_color" in kwargs:
kwargs["line_color"] = None
elif kwargs["line_color"] == True:
del kwargs["line_color"]
# Check for stacked keyword:
if stacked and histogram_type not in [None, "stacked"]:
warnings.warn(
"<histogram_type> was set to '%s', but was overriden by <stacked>=True parameter."
% histogram_type
)
histogram_type = "stacked"
elif stacked and histogram_type is None:
histogram_type = "stacked"
# Set xlabel if only one y-column is given and user does not override this via
# xlabel parameter:
if len(data_cols) == 1 and xlabel is None:
p.xaxis.axis_label = data_cols[0]
# If Histogram should be plotted, calculate bins, aggregates and
# averages:
# Autocalculate bins if bins are not specified:
if bins is None:
values = df[data_cols].values
values = values[~np.isnan(values)]
data, bins = np.histogram(values)
# Calculate bins if number of bins is given:
elif isinstance(bins, int):
if bins < 1:
raise ValueError(
"<bins> can only be an integer>0, a list or a range of numbers."
)
values = df[data_cols].values
values = values[~np.isnan(values)]
v_min, v_max = values.min(), values.max()
bins = np.linspace(v_min, v_max, bins + 1)
bins = list(bins)
if not weights is None:
if weights not in df.columns:
raise ValueError(
"Columns '%s' for <weights> is not in provided DataFrame."
)
else:
weights = df[weights].values
aggregates = []
averages = []
for col in data_cols:
values = df[col].values
if not weights is None:
not_nan = ~(np.isnan(values) | np.isnan(weights))
values_not_nan = values[not_nan]
weights_not_nan = weights[not_nan]
if sum(not_nan) < len(not_nan):
warnings.warn(
"There are NaN values in column '%s' or in the <weights> column. For the histogram, these rows have been neglected."
% col,
Warning,
)
else:
not_nan = ~np.isnan(values)
values_not_nan = values[not_nan]
weights_not_nan = None
if sum(not_nan) < len(not_nan):
warnings.warn(
"There are NaN values in column '%s'. For the histogram, these rows have been neglected."
% col,
Warning,
)
average = np.average(values_not_nan, weights=weights_not_nan)
averages.append(average)
data, bins = np.histogram(
values_not_nan, bins=bins, weights=weights_not_nan
)
if normed:
data = data / np.sum(data) * normed
if cumulative:
data = np.cumsum(data)
aggregates.append(data)
p = histogram(
p,
df,
data_cols,
colormap,
aggregates,
bins,
averages,
hovertool,
hovertool_string,
additional_columns,
normed,
cumulative,
show_average,
histogram_type,
logy,
**kwargs
)
if kind == "area":
p = areaplot(
p,
source,
data_cols,
colormap,
hovertool,
hovertool_string,
xlabelname,
figure_options["x_axis_type"],
stacked,
normed,
**kwargs
)
if kind == "pie":
source["__x__values"] = x_old
p = pieplot(
source,
data_cols,
colormap,
hovertool,
hovertool_string,
figure_options,
xlabelname,
**kwargs
)
# Set xticks:
if not xticks is None:
p.xaxis[0].ticker = list(xticks)
elif (xaxis_type == "numerical" and kind not in ["hist", "scatter"]) or (
x_labels_dict is not None and kind != "barh"
):
p.xaxis.ticker = x
elif kind == "barh":
p.yaxis.ticker = x
if not yticks is None:
p.yaxis.ticker = yticks
# Format datetime ticks correctly:
if figure_options["x_axis_type"] == "datetime":
p.xaxis.formatter = DatetimeTickFormatter(
milliseconds=["%H:%M:%S.%f"],
seconds=["%H:%M:%S"],
minutes=["%H:%M:%S"],
hours=["%H:%M:%S"],
days=["%d %B %Y"],
months=["%d %B %Y"],
years=["%d %B %Y"],
)
# Rotate xlabel if wanted:
if vertical_xlabel:
p.xaxis.major_label_orientation = np.pi / 2
# Make mapplot:
if kind == "map":
if xlabel is None:
figure_options["x_axis_label"] = "Longitude"
if ylabel is None:
figure_options["y_axis_label"] = "Latitude"
figure_options["x_axis_type"] = "mercator"
figure_options["y_axis_type"] = "mercator"
if len(data_cols) > 1:
raise ValueError(
"For map plots, only one <y>-column representing the latitude of the coordinate can be passed."
)
source["latitude"] = source[data_cols[0]]
source["longitude"] = source["__x__values"]
# p = mapplot(
# source,
# hovertool,
# hovertool_string,
# figure_options,
# colormap,
# tile_provider,
# tile_provider_url,
# tile_attribution,
# tile_alpha,
# **kwargs
# )
gdf = pd.DataFrame(source)
gdf["x"] = gdf["longitude"]
gdf["y"] = gdf["latitude"]
p = geoplot(gdf)#, colormap, hovertool)
# Set panning option:
if panning is False:
p.toolbar.active_drag = None
# Set zooming option:
if zooming is False:
p.toolbar.active_scroll = None
# Set click policy for legend:
if not stacked and kind != "pie":
p.legend.click_policy = "hide"
# Hide legend if wanted:
if not legend:
p.legend.visible = False
# Modify legend position:
else:
if legend is True:
p.legend.location = "top_right"
elif legend in [
"top_left",
"top_center",
"top_right",
"center_left",
"center",
"center_right",
"bottom_left",
"bottom_center",
"bottom_right",
]:
p.legend.location = legend
else:
raise ValueError(
"Legend can only be True/False or one of 'top_left', 'top_center', 'top_right', 'center_left', 'center', 'center_right', 'bottom_left', 'bottom_center', 'bottom_right'"
)
#Scientific formatting for axes:
if disable_scientific_axes is None:
pass
elif disable_scientific_axes == "x":
p.xaxis[0].formatter.use_scientific = False
elif disable_scientific_axes == "y":
p.yaxis[0].formatter.use_scientific = False
elif disable_scientific_axes in ["xy", True]:
p.xaxis[0].formatter.use_scientific = False
p.yaxis[0].formatter.use_scientific = False
else:
raise ValueError("""Keyword parameter <disable_scientific_axes> only accepts "xy", True, "x", "y" or None.""")
# Display plot if wanted
if show_figure:
show(p)
# Return as (embeddable) HTML if wanted:
if return_html:
return embedded_html(p)
# Return plot:
return p
def _base_lineplot(
linetype,
p,
source,
data_cols,
colormap,
hovertool,
xlabelname,
x_axis_type,
plot_data_points,
plot_data_points_size,
hovertool_string,
number_format,
**kwargs
):
"""Adds lineplot to figure p for each data_col."""
if "marker" in kwargs:
marker = kwargs["marker"]
del kwargs["marker"]
else:
marker = "circle"
# Add line (and optional scatter glyphs) to figure:
linetype = getattr(p, linetype.lower())
for name, color in zip(data_cols, colormap):
glyph = linetype(
x="__x__values",
y=name,
legend=" " + name,
source=source,
color=color,
**kwargs
)
if plot_data_points:
p.scatter(
x="__x__values",
y=name,
legend=" " + name,
source=source,
color=color,
marker=marker,
size=plot_data_points_size,
)
if hovertool:
my_hover = HoverTool(mode="vline", renderers=[glyph])
if hovertool_string is None:
if x_axis_type == "datetime":
my_hover.tooltips = [
(xlabelname, "@__x__values_original{%F}"),
(name, "@{%s}%s" % (name, number_format)),
]
my_hover.formatters = {"__x__values_original": "datetime"}
else:
my_hover.tooltips = [
(xlabelname, "@__x__values_original"),
(name, "@{%s}%s" % (name, number_format)),
]
else:
my_hover.tooltips = hovertool_string
p.add_tools(my_hover)
return p
def lineplot(
p,
source,
data_cols,
colormap,
hovertool,
xlabelname,
x_axis_type,
plot_data_points,
plot_data_points_size,
hovertool_string,
number_format,
**kwargs
):
return _base_lineplot(
linetype="line",
p=p,
source=source,
data_cols=data_cols,
colormap=colormap,
hovertool=hovertool,
xlabelname=xlabelname,
x_axis_type=x_axis_type,
plot_data_points=plot_data_points,
plot_data_points_size=plot_data_points_size,
hovertool_string=hovertool_string,
number_format=number_format,
**kwargs
)
def stepplot(
p,
source,
data_cols,
colormap,
hovertool,
xlabelname,
x_axis_type,
plot_data_points,
plot_data_points_size,
hovertool_string,
number_format,
**kwargs
):
return _base_lineplot(
linetype="step",
p=p,
source=source,
data_cols=data_cols,
colormap=colormap,
hovertool=hovertool,
xlabelname=xlabelname,
x_axis_type=x_axis_type,
plot_data_points=plot_data_points,
plot_data_points_size=plot_data_points_size,
hovertool_string=hovertool_string,
number_format=number_format,
**kwargs
)
def pointplot(
p,
source,
data_cols,
colormap,
hovertool,
hovertool_string,
xlabelname,
x_axis_type,
number_format,
**kwargs
):
"""Adds pointplot to figure p for each data_col."""
N_cols = len(data_cols)
# Define marker for pointplot:
if "marker" in kwargs:
markers = [kwargs["marker"]] * N_cols
del kwargs["marker"]
else:
marker = [
"circle",
"square",
"triangle",
"asterisk",
"circle_x",
"square_x",
"inverted_triangle",
"x",
"circle_cross",
"square_cross",
"diamond",
"cross",
]
markers = marker * int(N_cols / 20 + 1)
markers = markers[:N_cols]
# Add scatter/point glyphs to figure:
for name, color, marker in zip(data_cols, colormap, markers):
glyph = p.scatter(
x="__x__values",
y=name,
legend=" " + name,
source=source,
color=color,
marker=marker,
**kwargs
)
if hovertool:
my_hover = HoverTool(mode="vline", renderers=[glyph])
if hovertool_string is None:
if x_axis_type == "datetime":
my_hover.tooltips = [
(xlabelname, "@__x__values_original{%F}"),
(name, "@{%s}%s" % (name, number_format)),
]
my_hover.formatters = {"__x__values_original": "datetime"}
else:
my_hover.tooltips = [
(xlabelname, "@__x__values_original"),
(name, "@{%s}%s" % (name, number_format)),
]
else:
my_hover.tooltips = hovertool_string
p.add_tools(my_hover)
return p
def scatterplot(
p,
df,
x,
x_old,
y,
category,
category_values,
colormap,
hovertool,
hovertool_string,
additional_columns,
x_axis_type,
xlabelname,
ylabelname,
**kwargs
):
"""Adds a scatterplot to figure p for each data_col."""
# Set standard size and linecolor of markers:
if "size" not in kwargs:
kwargs["size"] = 10
if "line_color" not in kwargs:
kwargs["line_color"] = "black"
# Define source:
source = ColumnDataSource({"__x__values": x, "__x__values_original": x_old, "y": y})
for kwarg, value in kwargs.items():
if value in df.columns:
source.data[value] = df[value].values
for add_col in additional_columns:
source.data[add_col] = df[add_col].values
# Define Colormapper for categorical scatterplot:
if category is not None:
category = str(category)
source.data[category] = category_values
# Make numerical categorical scatterplot:
if check_type(category_values) == "numeric":
kwargs["legend"] = category + " "
# Define colormapper for numerical scatterplot:
if colormap == None:
colormap = Inferno256
elif isinstance(colormap, str):
if colormap in all_palettes:
colormap = all_palettes[colormap]
max_key = max(colormap.keys())
colormap = colormap[max_key]
else:
raise ValueError(
"Could not find <colormap> with name %s. The following predefined colormaps are supported (see also https://bokeh.pydata.org/en/latest/docs/reference/palettes.html ): %s"
% (colormap, list(all_palettes.keys()))
)
elif isinstance(colormap, (list, tuple)):
pass
else:
raise ValueError(
"<colormap> can onyl be None, a name of a colorpalette as string( see https://bokeh.pydata.org/en/latest/docs/reference/palettes.html ) or a list/tuple of colors."
)
colormapper = LinearColorMapper(palette=colormap)
# Set fill-color to colormapper:
kwargs["fill_color"] = {"field": category, "transform": colormapper}
# Define Colorbar:
colorbar_options = {
"color_mapper": colormapper,
"label_standoff": 0,
"border_line_color": None,
"location": (0, 0),
}
colorbar = ColorBar(**colorbar_options)
p.add_layout(colorbar, "right")
# Draw glyph:
glyph = p.scatter(x="__x__values", y="y", source=source, **kwargs)
# Add Hovertool
if hovertool:
my_hover = HoverTool(renderers=[glyph])
if hovertool_string is None:
if x_axis_type == "datetime":
my_hover.tooltips = [
(xlabelname, "@__x__values_original{%F}"),
(ylabelname, "@y"),
]
my_hover.formatters = {"__x__values_original": "datetime"}
else:
my_hover.tooltips = [
(xlabelname, "@__x__values_original"),
(ylabelname, "@y"),
]
my_hover.tooltips.append((str(category), "@{%s}" % category))
else:
my_hover.tooltips = hovertool_string
p.add_tools(my_hover)
# Make categorical scatterplot:
elif check_type(category_values) == "object":
# Define colormapper for categorical scatterplot:
labels, categories = pd.factorize(category_values)
colormap = get_colormap(colormap, len(categories))
# Draw each category as separate glyph:
x, y = source.data["__x__values"], source.data["y"]
for cat, color in zip(categories, colormap):
# Define reduced source for this categorx:
x_cat = x[category_values == cat]
x_old_cat = x_old[category_values == cat]
y_cat = y[category_values == cat]
cat_cat = category_values[category_values == cat]
source = ColumnDataSource(
{
"__x__values": x_cat,
"__x__values_original": x_old_cat,
"y": y_cat,
"category": cat_cat,
}
)
for kwarg, value in kwargs.items():
if value in df.columns:
source.data[value] = df[value].values[category_values == cat]
for add_col in additional_columns:
source.data[add_col] = df[add_col].values[category_values == cat]
# Draw glyph:
glyph = p.scatter(
x="__x__values",
y="y",
legend=str(cat) + " ",
source=source,
color=color,
**kwargs
)
# Add Hovertool
if hovertool:
my_hover = HoverTool(renderers=[glyph])
if hovertool_string is None:
if x_axis_type == "datetime":
my_hover.tooltips = [
(xlabelname, "@__x__values_original{%F}"),
(ylabelname, "@y"),
]
my_hover.formatters = {"__x__values_original": "datetime"}
else:
my_hover.tooltips = [
(xlabelname, "@__x__values_original"),
(ylabelname, "@y"),
]
my_hover.tooltips.append((str(category), "@category"))
else:
my_hover.tooltips = hovertool_string
p.add_tools(my_hover)
if len(categories) > 5:
warnings.warn(
"There are more than 5 categories in the scatterplot. The legend might be crowded, to hide the axis you can pass 'legend=False' as an optional argument."
)
else:
raise ValueError(
"<category> is not supported with datetime objects. Consider casting the datetime objects to strings, which can be used as <category> values."
)
# Draw non-categorical plot:
else:
# Draw glyph:
glyph = p.scatter(
x="__x__values", y="y", source=source, legend="Hide/Show", **kwargs
)
# Add Hovertool:
if hovertool:
my_hover = HoverTool(renderers=[glyph])
if hovertool_string is None:
if x_axis_type == "datetime":
my_hover.tooltips = [
(xlabelname, "@__x__values_original{%F}"),
(ylabelname, "@y"),
]
my_hover.formatters = {"__x__values_original": "datetime"}
else:
my_hover.tooltips = [
(xlabelname, "@__x__values_original"),
(ylabelname, "@y"),
]
else:
my_hover.tooltips = hovertool_string
p.add_tools(my_hover)
return p
def histogram(
p,
df,
data_cols,
colormap,
aggregates,
bins,
averages,
hovertool,
hovertool_string,
additional_columns,
normed,
cumulative,
show_average,
histogram_type,
logy,
**kwargs
):
"Adds histogram to figure p for each data_col."
bottom = None
N_cols = len(data_cols)
if logy:
bottomvalue = 0.000000001
else:
bottomvalue = 0
for i, name, color, aggregate, average in zip(
range(len(data_cols)), data_cols, colormap, aggregates, averages
):
if histogram_type is None:
histogram_type = "topontop"
if histogram_type not in ["sidebyside", "topontop", "stacked"]:
raise ValueError(
'<histogram_type> can only be one of ["sidebyside", "topontop", "stacked"].'
)
# Get bar edges to plot for side-by-side display
if histogram_type == "sidebyside":
left = [
bins[index] + float(i) / N_cols * (bins[index + 1] - bins[index])
for index in range(len(bins) - 1)
]
right = [
bins[index] + float(i + 1) / N_cols * (bins[index + 1] - bins[index])
for index in range(len(bins) - 1)
]
bottom = [bottomvalue] * len(left)
top = aggregate
# Get bar edges for top-on-top display:
elif histogram_type == "topontop":
left = bins[:-1]
right = bins[1:]
bottom = [bottomvalue] * len(left)
top = aggregate
if "alpha" not in kwargs:
kwargs["alpha"] = 0.5
# Get bar edges for stacked display:
elif histogram_type == "stacked":
left = bins[:-1]
right = bins[1:]
if bottom is None:
bottom = [bottomvalue] * len(left)
top = [0] * len(left)
else:
bottom = top
top = top + aggregate
# Define DataSource for plotting:
source = ColumnDataSource(
dict(
bins=[
"%s-%s" % (bins[index], bins[index + 1])
for index in range(len(bins) - 1)
],
left=left,
right=right,
top=top,
bottom=bottom,
)
)
# Add histogram glyphs to plot:
g1 = p.quad(
left="left",
right="right",
bottom="bottom",
top="top",
source=source,
color=color,
legend=name,
**kwargs
)
if hovertool:
my_hover = HoverTool(mode="vline", renderers=[g1])
if hovertool_string is None:
my_hover.tooltips = (
"""<h3> %s: </h3> <h4>bin=@bins</h4> <h4>value=@top </h4>"""
% (name)
)
else:
warnings.warn(
"For histograms, <hovertool_string> is not a supported keyword argument."
)
p.add_tools(my_hover)
# Plot average line if wanted:
if show_average:
for sign in [1, -1]:
g1 = p.ray(
x=[average],
y=[0],
length=0,
angle=sign * np.pi / 2,
line_width=3,
color=color,
legend="<%s> = %f" % (name, average),
)
p.xaxis.ticker = bins
return p
def areaplot(
p,
source,
data_cols,
colormap,
hovertool,
hovertool_string,
xlabelname,
x_axis_type,
stacked,
normed,
**kwargs
):
"""Adds areaplot to figure p for each data_col."""
# Transform columns to be able to plot areas as patches:
if not stacked:
line_source = deepcopy(source)
for key in list(source.keys()):
if key == "__x__values":
source[key] = [source[key][0]] + list(source[key]) + [source[key][-1]]
else:
source[key] = np.array([0] + list(source[key]) + [0])
if "alpha" not in kwargs:
kwargs["alpha"] = 0.4
else:
if "alpha" not in kwargs:
kwargs["alpha"] = 0.8
if normed is not False:
data = []
for col in data_cols:
data.append(source[col])
data = np.array(data)
norm = np.sum(data, axis=0)
for col in data_cols:
source[col] = np.array(source[col]) / norm * normed
line_source = {
"__x__values": source["__x__values"],
"__x__values_original": source["__x__values_original"],
}
baseline = np.zeros(len(source["__x__values"]))
del source["__x__values_original"]
source["__x__values"] = (
list(source["__x__values"]) + list(source["__x__values"])[::-1]
)
for j, col in enumerate(data_cols):
# Stack lines:
line_source[col + "_plot"] = baseline + np.array(source[col])
line_source[col] = np.array(source[col])
# Stack patches:
source[col] = baseline + np.array(source[col])
new_baseline = source[col]
source[col] = list(source[col]) + list(baseline)[::-1]
baseline = new_baseline
# Add area patches to figure:
for j, name, color in list(zip(range(len(data_cols)), data_cols, colormap))[::-1]:
p.patch(
x="__x__values",
y=name,
legend=" " + name,
source=source,
color=color,
**kwargs
)
# Add hovertool:
if hovertool and int(len(data_cols) / 2) == j + 1:
# Add single line for displaying hovertool:
if stacked:
y = name + "_plot"
else:
y = name
glyph = p.line(
x="__x__values",
y=y,
legend=" " + name,
source=line_source,
color=color,
alpha=0,
)
# Define hovertool and add to line:
my_hover = HoverTool(mode="vline", renderers=[glyph])
if hovertool_string is None:
if x_axis_type == "datetime":
my_hover.tooltips = [(xlabelname, "@__x__values_original{%F}")] + [
(name, "@{%s}" % name) for name in data_cols[::-1]
]
my_hover.formatters = {"__x__values_original": "datetime"}
else:
my_hover.tooltips = [(xlabelname, "@__x__values_original")] + [
(name, "@{%s}" % name) for name in data_cols[::-1]
]
else:
my_hover.tooltips = hovertool_string
p.add_tools(my_hover)
return p
def pieplot(
source,
data_cols,
colormap,
hovertool,
hovertool_string,
figure_options,
xlabelname,
**kwargs
):
"""Creates a Pieplot from the provided data."""
# Determine Colormap for Pieplot:
colormap = get_colormap(colormap, len(source["__x__values"]))
source["color"] = colormap
max_col_stringlength = max([len(col) for col in data_cols])
# Create Figure for Pieplot:
plot_width = figure_options["plot_width"]
plot_height = figure_options["plot_height"]
title = figure_options["title"]
toolbar_location = None
x_range = (-1.4 - 0.05 * max_col_stringlength, 2)
y_range = (-1.2, 1.2)
p = figure(
plot_width=plot_width,
plot_height=plot_height,
title=title,
toolbar_location=toolbar_location,
x_range=x_range,
y_range=y_range,
)
p.axis.axis_label = None
p.axis.visible = False
p.grid.grid_line_color = None
# Calculate angles for Pieplot:
for col in data_cols:
source[col + "_angle"] = source[col] / source[col].sum() * 2 * np.pi
# Make Pieplots:
for i, col in list(enumerate(data_cols))[::-1]:
inner_radius = float(i) / len(data_cols)
outer_radius = float(i + 0.9) / len(data_cols)
source["inner_radius"] = [inner_radius] * len(source["__x__values"])
source["outer_radius"] = [outer_radius] * len(source["__x__values"])
if i == 0:
legend = "__x__values"
else:
legend = False
if "line_color" not in kwargs:
kwargs["line_color"] = "white"
glyph = p.annular_wedge(
x=0,
y=0,
inner_radius="inner_radius",
outer_radius="outer_radius",
start_angle=cumsum(col + "_angle", include_zero=True),
end_angle=cumsum(col + "_angle"),
fill_color="color",
legend=legend,
source=source,
**kwargs
)
# Add annotation:
if len(data_cols) > 1:
text_source = {
"__x__values": [-1.3 - 0.05 * max_col_stringlength],
"y": [0.5 - 0.3 * i],
"text": [col],
}
ann = p.text(
x="__x__values",
y="y",
text="text",
text_font_style="bold",
source=text_source,
)
p.line(
x=[-1.3 - 0.04 * (max_col_stringlength - len(col)), 0],
y=[0.5 - 0.3 * i, -(inner_radius + outer_radius) / 2],
line_color="black",
)
# Define hovertool and add to Pieplot:
if hovertool:
my_hover = HoverTool(renderers=[glyph])
if hovertool_string is None:
my_hover.tooltips = [
(xlabelname, "@__x__values_original"),
(col, "@{%s}" % col),
]
else:
my_hover.tooltips = hovertool_string
p.add_tools(my_hover)
return p
def mapplot(
source,
hovertool,
hovertool_string,
figure_options,
colormap,
tile_provider,
tile_provider_url,
tile_attribution,
tile_alpha,
**kwargs
):
"""Creates Point on a Map from the provided data. Provided x,y coordinates
have to be longitude, latitude in WGS84 projection."""
latitude = source["latitude"]
longitude = source["longitude"]
# Check values of longitude and latitude:
if not (check_type(latitude) == "numeric" and check_type(longitude) == "numeric"):
raise ValueError(
"<x> and <y> have to be numeric columns of the DataFrame. Further they correspond to longitude, latitude in WGS84 projection."
)
if not (np.min(latitude) > -90 and np.max(latitude) < 90):
raise ValueError(
"All values of the y-column have to be restricted to (-90, 90). The <y> value corresponds to the latitude in WGS84 projection."
)
if not (np.min(longitude) > -180 and np.max(longitude) < 180):
raise ValueError(
"All values of the x-column have to be restricted to (-180, 180). The <x> value corresponds to the longitude in WGS84 projection."
)
# Convert longitude, latitude coordinates to Web Mercator projection:
RADIUS = 6378137.0
source["y"] = np.log(np.tan(np.pi / 4 + np.radians(latitude) / 2)) * RADIUS
source["x"] = np.radians(longitude) * RADIUS
# Create map figure to plot:
p = figure(**figure_options)
# Get ridd of zoom on axes:
for t in p.tools:
if type(t) == WheelZoomTool:
t.zoom_on_axis = False
# Add Background Tile:
from .geoplot import _add_backgroundtile
p = _add_backgroundtile(
p, tile_provider, tile_provider_url, tile_attribution, tile_alpha
)
# Plot geocoordinates on map:
glyph = p.scatter(
x="x",
y="y",
source=source,
legend="Show/Hide Layer",
color=colormap[0],
**kwargs
)
# Add hovertool:
if hovertool:
if hovertool_string is not None:
my_hover = HoverTool(renderers=[glyph])
my_hover.tooltips = hovertool_string
p.add_tools(my_hover)
return p
##############################################################################
###########Class to add Bokeh plotting methods to Pandas DataFrame
##############################################################################
class FramePlotMethods(BasePlotMethods):
"""DataFrame plotting accessor and method
Examples
--------
>>> df.plot_bokeh.line()
>>> df.plot_bokeh.scatter('x', 'y')
>>> df.plot_bokeh.hexbin()
These plotting methods can also be accessed by calling the accessor as a
method with the ``kind`` argument:
``df.plot_bokeh(kind='line')`` is equivalent to ``df.plot_bokeh.line()``
"""
def __call__(self, *args, **kwargs):
return plot(self.df, *args, **kwargs)
@property
def df(self):
if pd.__version__ >= "0.24":
dataframe = self._parent
else:
dataframe = self._data
# Convert PySpark Dataframe to Pandas Dataframe:
if hasattr(dataframe, "toPandas"):
dataframe = dataframe.toPandas()
return dataframe
__call__.__doc__ = plot.__doc__
def line(self, x=None, y=None, **kwargs):
"""
Plot DataFrame columns as lines.
This function is useful to plot lines using DataFrame's values
as coordinates.
Parameters
----------
x : int or str, optional
Columns to use for the horizontal axis.
Either the location or the label of the columns to be used.
By default, it will use the DataFrame indices.
y : int, str, or list of them, optional
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
**kwds
Keyword arguments to pass on to :meth:`pandas.DataFrame.plot_bokeh`.
Returns
-------
Bokeh.plotting.figure or Bokeh.layouts.row
Examples
--------
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame({
... 'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]
... }, index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot_bokeh.line()
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot_bokeh.line(x='pig', y='horse')
"""
return self(kind="line", x=x, y=y, **kwargs)
def step(self, x=None, y=None, **kwargs):
"""
Plot DataFrame columns as step lines.
This function is useful to plot step lines using DataFrame's values
as coordinates.
Parameters
----------
x : int or str, optional
Columns to use for the horizontal axis.
Either the location or the label of the columns to be used.
By default, it will use the DataFrame indices.
y : int, str, or list of them, optional
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
**kwds
Keyword arguments to pass on to :meth:`pandas.DataFrame.plot_bokeh`.
Returns
-------
Bokeh.plotting.figure or Bokeh.layouts.row
Examples
--------
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame({
... 'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]
... }, index=[1990, 1997, 2003, 2009, 2014])
>>> steps = df.plot_bokeh.step()
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> steps = df.plot_bokeh.step(x='pig', y='horse')
"""
return self(kind="step", x=x, y=y, **kwargs)
def point(self, x=None, y=None, **kwargs):
"""
Plot DataFrame columns as points.
This function is useful to plot lines using DataFrame's values
as coordinates.
Parameters
----------
x : int or str, optional
Columns to use for the horizontal axis.
Either the location or the label of the columns to be used.
By default, it will use the DataFrame indices.
y : int, str, or list of them, optional
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
**kwds
Keyword arguments to pass on to :meth:`pandas.DataFrame.plot_bokeh`.
Returns
-------
Bokeh.plotting.figure or Bokeh.layouts.row
Examples
--------
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame({
... 'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]
... }, index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot_bokeh.point()
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot_bokeh.point(x='pig', y='horse')
"""
return self(kind="point", x=x, y=y, **kwargs)
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
**kwds
Additional keyword arguments are documented in
:meth:`pandas.DataFrame.plot_bokeh`.
Returns
-------
Bokeh.plotting.figure
See Also
--------
pandas.DataFrame.plot_bokeh.barh : Horizontal bar plot.
pandas.DataFrame.plot_bokeh : Make interactive plots of a DataFrame.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
return self(kind="bar", x=x, y=y, **kwds)
def barh(self, x=None, y=None, **kwds):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwds
Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`.
Returns
-------
Bokeh.plotting.figure
See Also
--------
pandas.DataFrame.plot_bokeh.bar: Vertical bar plot.
pandas.DataFrame.plot_bokeh : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot_bokeh.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot_bokeh.barh()
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot_bokeh.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot_bokeh.barh(x='lifespan')
"""
return self(kind="barh", x=x, y=y, **kwds)
def box(self, by=None, **kwds):
r"""
Make a box plot of the DataFrame columns.
A box plot is a method for graphically depicting groups of numerical
data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. The position of the whiskers
is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the
box. Outlier points are those past the end of the whiskers.
For further details see Wikipedia's
entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`__.
A consideration when using this chart is that the box and the whiskers
can overlap, which is very common when plotting small sets of data.
Parameters
----------
by : string or sequence
Column in the DataFrame to group by.
**kwds : optional
Additional keywords are documented in
:meth:`pandas.DataFrame.plot_bokeh`.
Returns
-------
Bokeh.plotting.figure
See Also
--------
pandas.Series.plot_bokeh.box: Draw a box plot from a Series object.
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = pd.DataFrame(data, columns=list('ABCD'))
>>> ax = df.plot.box()
"""
return self(kind="box", by=by, **kwds)
def hist(self, **kwds):
"""
Draw one histogram of the DataFrame's columns.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins and draws all bins in one figure.
This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
...
bins : int, default 10
Number of histogram bins to be used.
**kwds
Additional keyword arguments are documented in
:meth:`pandas.DataFrame.plot_bokeh`.
Returns
-------
Bokeh.plotting.figure
See Also
--------
DataFrame.hist : Draw histograms per DataFrame's Series.
Series.hist : Draw a histogram with Series' data.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns = ['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> ax = df.plot_bokeh.hist(bins=12, alpha=0.5)
"""
return self(kind="hist", **kwds)
def area(self, x=None, y=None, **kwds):
"""
Area plot
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
`**kwds` : optional
Additional keyword arguments are documented in
:meth:`pandas.DataFrame.plot_bokeh`.
Returns
-------
Bokeh.plotting.figure
"""
return self(kind="area", x=x, y=y, **kwds)
def pie(self, y=None, **kwds):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column(s) to plot.
**kwds
Keyword arguments to pass on to :meth:`pandas.DataFrame.plot_bokeh`.
Returns
-------
Bokeh.plotting.figure
See Also
--------
Series.plot.pie : Generate a pie plot for a Series.
DataFrame.plot : Make plots of a DataFrame.
Examples
--------
In the example below we have a DataFrame with the information about
planet's mass and radius. We pass the the 'mass' column to the
pie function to get a pie plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.plot_bokeh.pie(y='mass')
When you pass multiple y-columns, the plot contains several nested
pieplots:
.. plot::
:context: close-figs
>>> plot = df.plot.pie()
"""
return self(kind="pie", y=y, **kwds)
def scatter(self, x, y, category=None, **kwds):
"""
Create a scatter plot with varying marker color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
category : str or object
A column name whose values will be used to color the
marker points according to a colormap.
**kwds
Keyword arguments to pass on to :meth:`pandas.DataFrame.plot_bokeh`.
Returns
-------
Bokeh.plotting.figure or Bokeh.layouts.row
See Also
--------
bokeh.plotting.figure.scatter : scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot_bokeh.scatter(x='length',
... y='width')
And now with the color and size determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot_bokeh.scatter(x='length',
... y='width',
... category='species',
... size="species",
... colormap='viridis')
"""
return self(kind="scatter", x=x, y=y, category=category, **kwds)
def map(self, x, y, **kwds):
"""
Create a plot of geographic points stored in a Pandas DataFrame on an
interactive map.
The coordinates (latitude/longitude) of each point are defined by two
dataframe columns.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates (longitude) for each point.
y : int or str
The column name or column position to be used as vertical
coordinates (latitude) for each point.
hovertool_string : str
If specified, this string will be used for the hovertool (@{column}
will be replaced by the value of the column for the element the
mouse hovers over, see also Bokeh documentation). This can be
used to display additional information on the map.
tile_provider : None or str (default: 'CARTODBPOSITRON_RETINA')
Define build-in tile provider for background maps. Possible
values: None, 'CARTODBPOSITRON', 'CARTODBPOSITRON_RETINA',
'STAMEN_TERRAIN', 'STAMEN_TERRAIN_RETINA', 'STAMEN_TONER',
'STAMEN_TONER_BACKGROUND', 'STAMEN_TONER_LABELS'.
tile_provider_url : str
An arbitraty tile_provider_url of the form '/{Z}/{X}/{Y}*.png'
can be passed to be used as background map.
tile_attribution : str
String (also HTML accepted) for showing attribution
for tile source in the lower right corner.
tile_alpha : float (Default: 1)
Sets the alpha value of the background tile between [0, 1].
**kwds
Keyword arguments to pass on to :meth:`pandas.DataFrame.plot_bokeh`.
Returns
-------
Bokeh.plotting.figure
See Also
--------
bokeh.plotting.figure.scatter : scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns. Below an example of plotting all cities
for more than 1 million inhabitants:
.. plot::
:context: close-figs
>>> df_mapplot = pd.read_csv(r"https://raw.githubusercontent.com\
... /PatrikHlobil/Pandas-Bokeh/master/Documentation/Testdata\
... /populated%20places/populated_places.csv")
>>> df_mapplot["size"] = df_mapplot["pop_max"] / 1000000
>>> df_mapplot.plot_bokeh.map(
... x="longitude",
... y="latitude",
... hovertool_string="<h2> @{name} </h2> \n\n \
... <h3> Population: @{pop_max} </h3>",
... tile_provider='STAMEN_TERRAIN_RETINA',
... size="size",
... figsize=(900, 600),
... title="World cities with more than 1.000.000 inhabitants")
"""
#return self(kind="map", x=x, y=y, **kwds)
#Get data of x and y columns:
df = self.df.copy()
if not x in df.columns:
raise ValueError("<x> parameter has to be a column name of the provided dataframe.")
if not y in df.columns:
raise ValueError("<y> parameter has to be a column name of the provided dataframe.")
latitude = df[y]
longitude = df[x]
#Check if NaN values are in x & y columns:
if ( | pd.isnull(latitude) | pandas.isnull |
import pickle
from io import BytesIO
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
from rdt.transformers import (
CategoricalTransformer, LabelEncodingTransformer, OneHotEncodingTransformer)
def test_categorical_numerical_nans():
"""Ensure CategoricalTransformer works on numerical + nan only columns."""
data = pd.DataFrame([1, 2, float('nan'), np.nan], columns=['column_name'])
transformer = CategoricalTransformer()
transformer.fit(data, list(data.columns))
transformed = transformer.transform(data)
reverse = transformer.reverse_transform(transformed)
pd.testing.assert_frame_equal(reverse, data)
def test_categoricaltransformer_pickle_nans():
"""Ensure that CategoricalTransformer can be pickled and loaded with nan value."""
# setup
data = pd.DataFrame([1, 2, float('nan'), np.nan], columns=['column_name'])
transformer = CategoricalTransformer()
transformer.fit(data, list(data.columns))
transformed = transformer.transform(data)
# create pickle file on memory
bytes_io = BytesIO()
pickle.dump(transformer, bytes_io)
# rewind
bytes_io.seek(0)
# run
pickled_transformer = pickle.load(bytes_io)
# assert
pickle_transformed = pickled_transformer.transform(data)
pd.testing.assert_frame_equal(pickle_transformed, transformed)
def test_categoricaltransformer_strings():
"""Test the CategoricalTransformer on string data.
Ensure that the CategoricalTransformer can fit, transform, and reverse
transform on string data. Expect that the reverse transformed data
is the same as the input.
Input:
- 4 rows of string data
Output:
- The reverse transformed data
"""
# setup
data = pd.DataFrame(['a', 'b', 'a', 'c'], columns=['column_name'])
transformer = CategoricalTransformer()
# run
transformer.fit(data, list(data.columns))
reverse = transformer.reverse_transform(transformer.transform(data))
# assert
pd.testing.assert_frame_equal(data, reverse)
def test_categoricaltransformer_strings_2_categories():
"""Test the CategoricalTransformer on string data.
Ensure that the CategoricalTransformer can fit, transform, and reverse
transform on string data, when there are 2 categories of strings with
the same value counts. Expect that the reverse transformed data is the
same as the input.
Input:
- 4 rows of string data
Output:
- The reverse transformed data
"""
# setup
data = pd.DataFrame(['a', 'b', 'a', 'b'], columns=['column_name'])
transformer = CategoricalTransformer()
transformer.fit(data, list(data.columns))
reverse = transformer.reverse_transform(transformer.transform(data))
# assert
pd.testing.assert_frame_equal(data, reverse)
def test_categoricaltransformer_integers():
"""Test the CategoricalTransformer on integer data.
Ensure that the CategoricalTransformer can fit, transform, and reverse
transform on integer data. Expect that the reverse transformed data is the
same as the input.
Input:
- 4 rows of int data
Output:
- The reverse transformed data
"""
# setup
data = pd.DataFrame([1, 2, 3, 2], columns=['column_name'])
transformer = CategoricalTransformer()
# run
transformer.fit(data, list(data.columns))
reverse = transformer.reverse_transform(transformer.transform(data))
# assert
pd.testing.assert_frame_equal(data, reverse)
def test_categoricaltransformer_bool():
"""Test the CategoricalTransformer on boolean data.
Ensure that the CategoricalTransformer can fit, transform, and reverse
transform on boolean data. Expect that the reverse transformed data is the
same as the input.
Input:
- 4 rows of bool data
Output:
- The reverse transformed data
"""
# setup
data = pd.DataFrame([True, False, True, False], columns=['column_name'])
transformer = CategoricalTransformer()
# run
transformer.fit(data, list(data.columns))
reverse = transformer.reverse_transform(transformer.transform(data))
# assert
pd.testing.assert_frame_equal(data, reverse)
def test_categoricaltransformer_mixed():
"""Test the CategoricalTransformer on mixed type data.
Ensure that the CategoricalTransformer can fit, transform, and reverse
transform on mixed type data. Expect that the reverse transformed data is
the same as the input.
Input:
- 4 rows of mixed data
Output:
- The reverse transformed data
"""
# setup
data = | pd.DataFrame([True, 'a', 1, None], columns=['column_name']) | pandas.DataFrame |
#!/usr/local/var/pyenv/shims/python3
# !python3
import calendar
import pandas as pd
import plotly.express as px
mapbox_public_token = '<KEY>'
px.set_mapbox_access_token(mapbox_public_token)
def abnyc_query(conn):
df = pd.read_sql_query("select * from abnyc;", conn, index_col='id')
while True:
print('1. NYC Airbnb statics by minimum nights on map.\n'
'2. NYC Airbnb statics by availability on map.\n'
'3. NYC Airbnb statics by reviews on map.\n'
'q. Quit')
choice = input('Input Here: ')
size_indicator = ""
if choice == '1':
query = "SELECT compound.id, compound.latitude, compound.longitude, nbhd.neighbourhood_group, compound.minimum_nights " \
"FROM (" \
"SELECT geo.id, geo.latitude, geo.longitude, main.neighbourhood, main.minimum_nights " \
"FROM abnyc_geo AS geo " \
"INNER JOIN abnyc AS main " \
"ON geo.id = main.id) AS compound " \
"INNER JOIN (" \
"SELECT * FROM abnyc_nbhd) AS nbhd " \
"ON nbhd.neighbourhood = compound.neighbourhood;"
size_indicator = "minimum_nights"
elif choice == '2':
query = "SELECT compound.id, compound.latitude, compound.longitude, nbhd.neighbourhood_group, compound.availability_365 " \
"FROM (" \
"SELECT geo.id, geo.latitude, geo.longitude, main.neighbourhood, main.availability_365 " \
"FROM abnyc_geo AS geo " \
"INNER JOIN abnyc AS main " \
"ON geo.id = main.id) AS compound " \
"INNER JOIN (" \
"SELECT * FROM abnyc_nbhd) AS nbhd " \
"ON nbhd.neighbourhood = compound.neighbourhood;"
size_indicator = "availability_365"
elif choice == '3':
query = "SELECT compound.id, compound.latitude, compound.longitude, nbhd.neighbourhood_group, compound.number_of_reviews " \
"FROM (" \
"SELECT geo.id, geo.latitude, geo.longitude, main.neighbourhood, main.number_of_reviews " \
"FROM abnyc_geo AS geo " \
"INNER JOIN abnyc AS main " \
"ON geo.id = main.id) AS compound " \
"INNER JOIN (" \
"SELECT * FROM abnyc_nbhd) AS nbhd " \
"ON nbhd.neighbourhood = compound.neighbourhood;"
size_indicator = "number_of_reviews"
else:
break
df = pd.read_sql_query(query, conn)
fig = px.scatter_mapbox(df, lat='latitude', lon='longitude',
color='neighbourhood_group',
size=size_indicator,
opacity=0.8,
color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10)
fig.update_layout(
mapbox_style="dark",
showlegend=False,
mapbox_layers=[
{
"below": 'traces',
"sourcetype": "raster",
},
]
)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 4})
fig.show()
def liquor_query(conn):
df = pd.read_sql_query("select * from abnyc;", conn, index_col='id')
while True:
print('1. NYC liquor statics by month on map.\n'
'2. NYC liquor statics by year on map.\n'
'3. NYC liquor statics overall on map.\n'
'q. Quit')
choice = input('Input Here: ')
# data_per = int(input('How many data you want to see? (Enter a integer less than 100000)\n Enter here: ')
if choice == '1':
year_month = input('Which [YEAR-MONTH] would you like to check?\nEnter here: ')
query = "SELECT compound.license_serial_number, compound.latitude, compound.longitude, compound.license_effective_date, type.license_type_name " \
"FROM (" \
"SELECT geo.license_serial_number, geo.latitude, geo.longitude, main.license_class_code, main.license_effective_date " \
"FROM liquor_geo AS geo " \
"INNER JOIN ( " \
"SELECT * " \
"FROM liquor " \
"WHERE license_effective_date >= '%(year)s-%(month)s-01' AND license_effective_date < '%(year)s-%(month)s-%(end_day)s') AS main " \
"ON geo.license_serial_number = main.license_serial_number) AS compound " \
"INNER JOIN (" \
"SELECT * FROM liquor_type) AS type " \
"ON type.license_class_code = compound.license_class_code;"
year = year_month.split("-")[0]
month = year_month.split("-")[1]
month_range = calendar.monthrange(int(year), int(month))
end_day = month_range[1]
df = pd.read_sql_query(query, conn, params={'year': int(year), 'month': int(month), 'end_day': end_day})
elif choice == '2':
year = int(input('Which [YEAR] would you like to check?\nEnter here: '))
query = "SELECT compound.license_serial_number, compound.latitude, compound.longitude, compound.license_effective_date, type.license_type_name " \
"FROM (" \
"SELECT geo.license_serial_number, geo.latitude, geo.longitude, main.license_class_code, main.license_effective_date " \
"FROM liquor_geo AS geo " \
"INNER JOIN ( " \
"SELECT * " \
"FROM liquor " \
"WHERE license_effective_date >= '%(year)s-01-01' AND license_effective_date <= '%(year)s-12-31') AS main " \
"ON geo.license_serial_number = main.license_serial_number) AS compound " \
"INNER JOIN (" \
"SELECT * FROM liquor_type) AS type " \
"ON type.license_class_code = compound.license_class_code;"
df = pd.read_sql_query(query, conn, params={'year': year})
elif choice == '3':
query = "SELECT compound.license_serial_number, compound.latitude, compound.longitude, compound.license_effective_date, type.license_type_name " \
"FROM (" \
"SELECT geo.license_serial_number, geo.latitude, geo.longitude, main.license_class_code, main.license_effective_date " \
"FROM liquor_geo AS geo " \
"INNER JOIN liquor AS main " \
"ON geo.license_serial_number = main.license_serial_number) AS compound " \
"INNER JOIN (" \
"SELECT * FROM liquor_type) AS type " \
"ON type.license_class_code = compound.license_class_code;"
# size_indicator = "number_of_reviews"
df = pd.read_sql_query(query, conn)
else:
break
# df = df.sample(data_per)
fig = px.scatter_mapbox(df, lat='latitude', lon='longitude',
# color='license_effective_date',
# size=10,
opacity=0.8,
color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10)
fig.update_layout(
mapbox_style="dark",
showlegend=False,
mapbox_layers=[
{
"below": 'traces',
"sourcetype": "raster",
},
]
)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 4})
fig.show()
def crime_query(conn):
df = pd.read_sql_query("select * from airquality_indicator;", conn, index_col='indicator_id')
while True:
print('1. Crime statics by crime.\n'
'2. Crime statics by months.\n'
'3. Crime statics by hours.\n'
'4. Crime statics by map.\n'
'q. Quit')
choice = input('Input Here: ')
if choice == '1':
query = "SELECT cd.ky_cd, ofns_desc, law_cat_cd, count(cmplnt_num) " \
"FROM crime c " \
"JOIN crime_desc cd " \
"ON (c.ky_cd=cd.ky_cd) " \
"GROUP BY cd.ky_cd, ofns_desc, law_cat_cd " \
"ORDER BY count desc;"
df = pd.read_sql_query(query,
conn)
print(df)
fig = px.bar(df, x='ofns_desc', y='count',
color='ofns_desc', barmode='relative',
hover_data=['law_cat_cd'],
labels={'pop': 'New York City Crime Data'})
fig.show()
elif choice == '2':
query = "select TO_CHAR(cmplnt_fr_dt, 'Month') as cmplnt_year, count(*) from crime group by cmplnt_year;"
df = pd.read_sql_query(query,
conn)
print(df)
fig = px.line(df, x='cmplnt_year', y='count')
fig.show()
elif choice == '3':
date_method = 'hour'
query = "select date_trunc(%(d_method)s, cmplnt_fr_tm) as cmplnt_hour, count(cmplnt_num) " \
"from crime " \
"group by cmplnt_hour;"
df = pd.read_sql_query(query,
conn,
params={'d_method': date_method})
df['cmplnt_hour'] = df['cmplnt_hour'].astype(str).str[-18:-10]
df['cmplnt_hour'] = pd.to_datetime(df['cmplnt_hour'], format='%H:%M:%S').dt.time
print(df)
fig = px.line(df, x='cmplnt_hour', y='count')
fig.show()
elif choice == '4':
law = ['MISDEMEANOR', 'VIOLATION', 'FELONY']
law_num = int(
input('Which of the crime type you want to see?\n1.Misdemeanor\n2.Violation\n3.Felony\nEnter here: '))
data_per = int(input('How many data you want to see?(Enter a integer less than 100000)\n Enter here: '))
query = "SELECT geo.cmplnt_num, region.boro_nm, geo.latitude, geo.longitude, cd.law_cat_cd " \
"FROM (SELECT geo.cmplnt_num, c.ky_cd, geo.latitude, geo.longitude " \
"FROM crime_geo AS geo " \
"JOIN crime AS c " \
"ON geo.cmplnt_num=c.cmplnt_num) AS geo " \
"JOIN (SELECT * " \
"FROM crime_desc " \
"WHERE law_cat_cd=%(type)s) AS cd " \
"ON geo.ky_cd=cd.ky_cd " \
"JOIN crime_region AS region " \
"ON geo.cmplnt_num=region.cmplnt_num;"
df = pd.read_sql_query(query, conn, params={'type': law[law_num - 1]})
df = df.sample(data_per)
fig = px.scatter_mapbox(df, lat='latitude', lon='longitude',
color='boro_nm',
opacity=0.8,
hover_data=['law_cat_cd'],
color_continuous_scale=px.colors.cyclical.IceFire,
zoom=10)
fig.update_layout(
mapbox_style="dark",
showlegend=False,
mapbox_layers=[
{
"below": 'traces',
"sourcetype": "raster",
},
]
)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 4})
fig.show()
else:
break
def airquality_query(conn):
acode = ['UHF42', 'Borough']
while True:
print("Which of the area code you want to see?\n1. UHF42\n2. Borough\nq. Quit")
area = input('Enter here: ')
if area != '1' and area != '2':
break
area = int(area)
query = "SELECT geo_entity_name, sum(data_valuemessage) FROM " \
"airquality aq JOIN airquality_geo geo " \
"ON (aq.indicator_data_id=geo.indicator_data_id) " \
"WHERE geo_type_name=%(code)s " \
"GROUP BY geo_entity_name " \
"ORDER BY sum DESC;"
df = pd.read_sql_query(query,
conn,
params={'code': acode[area - 1]})
fig = px.bar(df, x='geo_entity_name', y='sum',
color='geo_entity_name',
barmode='relative',
labels={'pop': 'New York City Air Quality Data'})
fig.show()
def crime_airbnb(conn, col, fav):
while True:
col.remove()
crime_bo = ['QUEENS', 'MANHATTAN', 'BRONX', 'BROOKLYN', 'STATEN ISLAND']
an_bo = ['Queens', 'Manhattan', 'Bronx', 'Brooklyn', 'Staten Island']
print('Enter the number to see the crime and airbnb data on map.\n1.Queens\n'
'2.Manhattan\n3.Bronx\n4.Brooklyn\n5.Staten Island')
boro_n = int(input('Enter here: '))
query1 = 'SELECT * FROM crime_geo g ' \
'JOIN crime_region r ' \
'ON (g.cmplnt_num=r.cmplnt_num) ' \
'WHERE boro_nm = %(boro)s;'
df1 = pd.read_sql_query(query1, conn, params={'boro': crime_bo[boro_n - 1]})
df1 = df1.sample(3000)
df1['name'] = df1.shape[0] * ['crime']
data1 = df1.to_dict(orient='records')
col.insert_many(data1)
query2 = 'SELECT a.id, g.latitude, g.longitude, n.neighbourhood_group ' \
'FROM abnyc a, abnyc_geo g, abnyc_nbhd n ' \
'WHERE a.id=g.id AND a.neighbourhood=n.neighbourhood ' \
'AND neighbourhood_group = %(boro)s;'
df2 = pd.read_sql_query(query2, conn, params={'boro': an_bo[boro_n - 1]})
df2['name'] = df2.shape[0] * ['airbnb']
data2 = df2.to_dict(orient='records')
col.insert_many(data2)
df = pd.DataFrame(list(col.find()))
print(df.shape[0])
fig = px.scatter_mapbox(df, lat='latitude', lon='longitude',
color='name',
opacity=0.8,
hover_data=['id'],
color_continuous_scale=px.colors.cyclical.IceFire,
zoom=10)
fig.update_layout(
mapbox_style="dark",
showlegend=False,
mapbox_layers=[
{
"below": 'traces',
"sourcetype": "raster",
},
]
)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 4})
fig.show()
print('Do you have any preference Airbnb? \nEnter Airbnb ID with comma seperate.')
pre = input('Enter here: ')
lst = [int(i) for i in pre.split(',')]
for i in lst:
print(col.find({'id':i}))
if fav.find({'id':i}).count() > 0:
print('ID:',i,'already added in your favorite list.')
else:
query = 'SELECT id, name, host_id, neighbourhood, room_type, price, minimum_nights, number_of_reviews, availability_365 from abnyc WHERE id=%(ids)s;'
df = pd.read_sql_query(query, conn, params={'ids': i})
data = df.to_dict(orient='records')
fav.insert_many(data)
df = pd.DataFrame(list(fav.find()))
print(df)
if input('Press q to QUIT, any other key to CONTINUE: ') == 'q':
break
def airbnb_liquor(conn, col, fav):
while True:
col.remove()
# crime_bo = ['QUEENS', 'MANHATTAN', 'BRONX', 'BROOKLYN', 'STATEN ISLAND']
# an_bo = ['Queens', 'Manhattan', 'Bronx', 'Brooklyn', 'Staten Island']
# print('Enter the number to see the crime and airbnb data on map.\n1.Queens\n'
# '2.Manhattan\n3.Bronx\n4.Brooklyn\n5.Staten Island')
# boro_n = int(input('Enter here: '))
query1 = 'SELECT * FROM abnyc_geo;'
df1 = | pd.read_sql_query(query1, conn) | pandas.read_sql_query |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
# binary moments
def test_rolling_cov(series):
A = series
B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(series):
A = series
B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(func, frame):
result = getattr(frame.rolling(window=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(method, frame):
series = frame[1]
res = getattr(series.rolling(window=10), method)(frame)
res2 = getattr(frame.rolling(window=10), method)(series)
exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
def test_rolling_apply_consistency_sum_nans(
consistency_data, window, min_periods, center, f
):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@pytest.mark.parametrize("f", [lambda v: | Series(v) | pandas.Series |
# coding=utf-8
"""
Map a signal track from one species/assembly to another
"""
import os as os
import numpy as np
import pandas as pd
import collections as col
import multiprocessing as mp
import psutil as psu
from crplib.metadata.md_signal import gen_obj_and_md, MD_SIGNAL_COLDEFS
from crplib.auxiliary.file_ops import create_filepath
from crplib.auxiliary.hdf_ops import get_chrom_list,\
check_path_infos, get_default_group, extract_chromsizes_from_map
from crplib.auxiliary.constants import DIV_B_TO_GB, MAPIDX_BLOCKS
_shm_carr = dict()
def assemble_worker_params(inputfile, inputgroup, mapfile, numalloc, tchroms, qchroms, carrays):
"""
:param inputfile:
:param inputgroup:
:param mapfile:
:param numalloc:
:param tchroms:
:param qchroms:
:param carrays:
:return:
"""
this_run = list(qchroms.keys())[:numalloc]
if not inputgroup:
inputgroup = get_default_group(inputfile)
for qchrom in this_run:
# NB: lock=False is possible since all writes to the chromosome
# arrays happen to disjoint slices
carrays[qchrom] = mp.Array('d', np.zeros(qchroms[qchrom], dtype=np.float64), lock=False)
params = []
commons = {'inputfile': inputfile, 'inputgroup': inputgroup,
'mapfile': mapfile}
base_path = os.path.join('/qt', MAPIDX_BLOCKS, '')
trg_to_load = col.defaultdict(list)
with pd.HDFStore(mapfile, 'r') as hdf:
chrom_maps = list(hdf.keys())
for cm in chrom_maps:
if cm.startswith(base_path):
query, target = os.path.split(cm.replace(base_path, ''))
if query in this_run and target in tchroms:
trg_to_load[target].append((query, cm))
for target, to_load in trg_to_load.items():
tmp = dict(commons)
tmp['tchrom'] = target
tmp['blocks'] = to_load
params.append(tmp)
for qchrom in this_run:
del qchroms[qchrom]
assert params, 'No parameter list for workers created'
return params
def map_row(row, target, query):
"""
Row contains: tstart - tend - qstart - qend - qstrand
:param row:
:param target:
:param query:
:return:
"""
query[row[2]:row[3]] = target[row[0]:row[1]][::row[4]]
# this was for debugging
# return np.sum(target[row[0]:row[1]] > 0)
return row[1] - row[0]
def map_signal_data(params):
"""
:param params:
:return:
"""
results = []
tchrom = params['tchrom']
with pd.HDFStore(params['inputfile'], 'r') as hdf:
load_group = os.path.join(params['inputgroup'], params['tchrom'])
sig_to_map = hdf[load_group].values
global _shm_carr
for qchrom, bpath in params['blocks']:
assert qchrom in bpath and tchrom in bpath, \
'Wrong block path {}: q {} - t {}'.format(bpath, qchrom, tchrom)
with pd.HDFStore(params['mapfile'], 'r') as hdf:
# load Pandas DataFrame from map file that holds all blocks
# describing a mapping between query and target for one
# particular chromosome combination
mapblocks = hdf[bpath]
carr = _shm_carr[qchrom]
cov = mapblocks.apply(map_row, axis=1, raw=True, args=(sig_to_map, carr))
results.append((qchrom, tchrom, cov.sum()))
assert results, 'No data processed for parameter set: {}'.format(params)
return results
def run_map_signal(args):
"""
:param args:
:return:
"""
baseline_mem = round(psu.virtual_memory().active / DIV_B_TO_GB, 2)
logger = args.module_logger
setattr(args, 'selectchroms', args.selectchroms.strip('"'))
logger.debug('Chromosome select pattern for query [map to]: {}'.format(args.selectchroms))
_, ingroup, infile = check_path_infos(args.inputfile, args.inputgroup)
_, outgroup, outfile = check_path_infos(args.outputfile, args.outputgroup)
qchroms = extract_chromsizes_from_map(args.mapfile, 'query', args.selectchroms)
num_qchroms = len(qchroms)
tchroms = get_chrom_list(infile, verify=True)
logger.debug('Chromosomes in target data file [map from]: {}'.format(tchroms))
meminfo = round(psu.virtual_memory().active / DIV_B_TO_GB - baseline_mem, 2)
logger.debug('Occupied RAM: {}GB'.format(meminfo))
_ = create_filepath(args.outputfile, logger)
logger.debug('Processing {} query chromosomes at a time'.format(args.allocate))
meminfo = round(psu.virtual_memory().active / DIV_B_TO_GB - baseline_mem, 2)
logger.debug('Start processing - occupied RAM: {}GB'.format(meminfo))
global _shm_carr
with | pd.HDFStore(outfile, args.filemode, complib='blosc', complevel=9, encoding='utf-8') | pandas.HDFStore |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.