prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from neuralprophet import NeuralProphet, set_random_seed
from src.demand_prediction.events_models import save_events_model, load_events_model
from src.config import SEED
def NeuralProphetEvents(future_events, past_events, events_name, train, test, leaf_name, model_name,
start_pred_time, events_dates, use_cache=False):
test_name = leaf_name
test_df = test.pd_dataframe()
train_df = train.pd_dataframe()
train_df['ds'] = train_df.index
train_df = train_df.rename(columns={'Quantity': 'y'})
name_path_model = leaf_name + "_" + model_name + "_" + start_pred_time
model = load_events_model(name_path_model)
if model is None or not use_cache:
print("Training Event Neural Prophet")
set_random_seed(SEED)
model = NeuralProphet()
model = model.add_country_holidays("US", mode="additive", lower_window=-1, upper_window=1)
model.add_events(events_name)
history_df = model.create_df_with_events(train_df, past_events)
print("Event Neural Prophet Fitting")
metrics = model.fit(history_df, freq='D')
save_events_model(model, name_path_model)
save_events_model(history_df, name_path_model + "_history_df")
else:
print("Loaded Event Neural Prophet")
history_df = load_events_model(name_path_model + "_history_df")
if history_df is None:
print("Creating History df Neural Prophet")
history_df = model.create_df_with_events(train_df, past_events)
save_events_model(history_df, name_path_model + "_history_df")
print("Start Predicting:")
future = model.make_future_dataframe(df=history_df, events_df=future_events, periods=len(test))
forecast = model.predict(future)
preds = forecast[['ds', 'yhat1']]
predictions = pd.DataFrame(preds).rename(columns={'ds': "Date", 'yhat1': model_name})
predictions.index = predictions.Date
predictions = predictions.drop(columns=['Date'])
y_test_df = | pd.DataFrame(test_df, index=test_df.index, columns=['Quantity']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 10:16:42 2021
@author: tungbioinfo
"""
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import time
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import pickle
import os, sys
from joblib import Parallel, delayed
import PCA_Analysis as pca
import RF_Analysis_Multiclass as rfc
import RF_Analysis_Binary as rfb
from Auto_ML_Multiclass import AutoML_classification
###############################################################################
############################## Read data set ##################################
###############################################################################
rumi = pd.read_csv("rumi.csv")
rumi = rumi.drop(rumi[rumi["Depressiongroup"]==1].index, axis=0).reset_index(drop=True)
depre_gr = rumi["Depressiongroup"].apply(lambda x: "BPD"
if x == 2 else "H"
if x == 0 else "MDD")
sex = rumi["Gender_1_male"].apply(lambda x: 0 if x == 2 else 1)
rumi = rumi.drop(columns = ["Depressiongroup", "Gender_1_male"])
rumi = pd.concat([depre_gr, sex, rumi], axis = 1)
rumi = shuffle(rumi).reset_index(drop=True)
rumi_meta = rumi[['MRI_expID', 'MRI_ordID', 'CurrentDepression', 'Depressiongroup', 'TIV',
'Age', 'Gender_1_male', 'BDI_Total', 'RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination',
'RRS_Total', 'Dep_PastEpisodes', 'Dep_Duration']]
rumi_meta = rumi_meta.set_index('MRI_expID')
sns.pairplot(rumi_meta[['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination', 'RRS_Total', 'Depressiongroup']],
hue="Depressiongroup")
rumi_meta_bdp = rumi_meta.loc[rumi_meta['Depressiongroup'] == "BPD"]
rumi_meta_mdd = rumi_meta.loc[rumi_meta['Depressiongroup'] == 'MDD']
sns.pairplot(rumi_meta_bdp[['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination', 'RRS_Total', 'CurrentDepression']],
hue="CurrentDepression")
sns.pairplot(rumi_meta_mdd[['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination', 'RRS_Total', 'CurrentDepression']],
hue="CurrentDepression")
rumi_region = rumi.drop(columns = ['MRI_ordID', 'CurrentDepression', 'Depressiongroup', 'TIV',
'Age', 'Gender_1_male', 'BDI_Total', 'RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination',
'RRS_Total', 'Dep_PastEpisodes', 'Dep_Duration'])
rumi_region = rumi_region.set_index('MRI_expID')
rumi_region_T = rumi_region.T
rumi_region_bdp = rumi_region.loc[rumi_meta_bdp.index]
rumi_region_mdd = rumi_region.loc[rumi_meta_mdd.index]
y = rumi_meta["Depressiongroup"].apply(lambda x: 0
if x == "MDD" else 1
if x == "BPD" else 2)
class_name = ["MDD", "BPD", 'Healthy']
X_train, X_test, y_train, y_test = train_test_split(rumi_region, y, test_size=0.3, random_state=42)
###############################################################################
######################## Step 1 - Run Auto_ML #################################
###############################################################################
automl = AutoML_classification()
result = automl.fit(X_train, y_train, X_test, y_test)
###############################################################################
################### Step 2 - Run selected models ##############################
###############################################################################
log_best, _, _, _, _ = automl.LogisticRegression(X_train, y_train, X_test, y_test)
evaluate_dt = automl.evaluate_multiclass(log_best, X_train, y_train, X_test, y_test,
model = "Logistics_regression", num_class=3, class_name = class_name)
sgd_best, _, _, _, _ = automl.Stochastic_Gradient_Descent(X_train, y_train, X_test, y_test)
evaluate_dt = automl.evaluate_multiclass(sgd_best, X_train, y_train, X_test, y_test,
model = "Stochastic_Gradient_Descent", num_class=3, class_name = class_name)
rf_best, _, _, _, _ = automl.Random_Forest(X_train, y_train, X_test, y_test)
evaluate_rf = automl.evaluate_multiclass(rf_best, X_train, y_train, X_test, y_test,
model = "Random Forest", num_class=3, top_features=20, class_name = class_name)
###############################################################################
########## Step 3.1 - Run forward algorithm + Random Forest ###################
###############################################################################
import itertools
from scipy import interp
from itertools import cycle
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold, RepeatedStratifiedKFold, RepeatedKFold
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import TimeSeriesSplit, GridSearchCV, RandomizedSearchCV
from xgboost import XGBClassifier
from datetime import datetime as dt
import warnings
warnings.filterwarnings("ignore")
st_t = dt.now()
n_samples, n_features = X_train.shape
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
max_depth = [5, 10, 25, 50, 75, 100]
min_samples_leaf = [1, 2, 4, 8, 10]
min_samples_split = [2, 4, 6, 8, 10]
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features,
}
base_model_rf = RandomForestClassifier(criterion = "gini", random_state=42)
n_iter_search = 30
scoring = "accuracy"
n_selected_features = 240
# selected feature set, initialized to be empty
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_model = []
start = time.time()
while count < n_selected_features:
max_acc = 0
time_loop = time.time()
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
#cv=cv_timeSeries,
cv=2,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if acc > max_acc:
max_acc = acc
idx = i
best_model = best_estimator
F.append(idx)
count += 1
print("The current number of features: {} - Accuracy: {}%".format(count, round(max_acc*100, 2)))
print("Time for computation: {}".format(time.time() - time_loop))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
all_info = pd.concat([c, a, f["All"]], axis=1)
all_info.columns = ['Num_feature', 'Accuracy', 'Feature']
all_info = all_info.sort_values(by='Accuracy', ascending=False).reset_index(drop=True)
all_info.to_csv("CDI_subset_accuracy.csv", index=False)
f.to_csv("CDI_subset.csv")
with open("CDI_models.txt", "wb") as fp:
pickle.dump(all_model, fp)
###############################################################################
################# Step 3.1 - Run forward algorithm + SGD ######################
###############################################################################
from sklearn.linear_model import SGDClassifier
st_t = dt.now()
n_samples, n_features = X_train.shape
# Loss function
loss = ["hinge", "log", "modified_huber", "squared_hinge", "perceptron"]
penalty = ["l2", "l1", "elasticnet"]
# The higher the value, the stronger the regularization
alpha = np.logspace(-7, -1, 100)
# The Elastic Net mixing parameter
l1_ratio = np.linspace(0, 1, 100)
epsilon = np.logspace(-5, -1, 100)
learning_rate = ["constant", "optimal", "invscaling", "adaptive"]
eta0 = np.logspace(-7, -1, 100)
hyperparameter = {"loss": loss,
"penalty": penalty,
"alpha": alpha,
"l1_ratio": l1_ratio,
"epsilon": epsilon,
"learning_rate": learning_rate,
"eta0": eta0}
model = SGDClassifier(n_jobs = -1)
n_iter_search = 30
scoring = "accuracy"
n_selected_features = 240
# selected feature set, initialized to be empty
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_model = []
start = time.time()
while count < n_selected_features:
max_acc = 0
time_loop = time.time()
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator = model,
param_distributions = hyperparameter,
cv = 2,
scoring = scoring,
n_iter = n_iter_search,
n_jobs = -1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if acc > max_acc:
max_acc = acc
idx = i
best_model = best_estimator
F.append(idx)
count += 1
print("The current number of features: {} - Accuracy: {}%".format(count, round(max_acc*100, 2)))
print("Time for computation: {}".format(time.time() - time_loop))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
###############################################################################
######## Step 4.1 - Run forward algorithm + Random_Forest_regression ##########
###############################################################################
from Auto_ML_Regression import AutoML_Regression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_percentage_error
from sklearn.metrics import mean_absolute_percentage_error
import math
y = rumi_meta["RRS_Brooding"]
rumi_region_plus = pd.concat([rumi_meta[['CurrentDepression', 'TIV', 'Age','Gender_1_male']],
rumi_region], axis=1)
#-------
y = rumi_meta_bdp["BDI_Total"]
rumi_region_bdp_plus = pd.concat([rumi_meta_bdp[['CurrentDepression', 'TIV', 'Age','Gender_1_male']],
rumi_region_bdp], axis=1)
X_train, X_test, y_train, y_test = train_test_split(rumi_region_bdp_plus, y, test_size=0.3, random_state=42)
# ------
y = rumi_meta_bdp["BDI_Total"]
rumi_region_mdd_plus = pd.concat([rumi_meta_mdd[['CurrentDepression', 'TIV', 'Age','Gender_1_male']],
rumi_region_mdd], axis=1)
X_train, X_test, y_train, y_test = train_test_split(rumi_region_mdd_plus, y, test_size=0.3, random_state=42)
# ------
ress_BPD_brain = pd.read_csv("BPD_brain.csv", header=None)
ress_BPD_brain.columns = rumi_region.columns
ress_BPD_meta = pd.read_csv("BPD_rrs.csv", header=None)
ress_BPD_meta.columns = ['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination','RRS_Total']
y = ress_BPD_meta["RRS_Brooding"]
X_train, X_test, y_train, y_test = train_test_split(BPD_subset, y, test_size=0.3, random_state=42)
# ------
ress_MDD_brain = pd.read_csv("MDD_brain.csv", header=None)
ress_MDD_brain.columns = rumi_region.columns
ress_MDD_meta = pd.read_csv("MDD_rrs.csv", header=None)
ress_MDD_meta.columns = ['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination','RRS_Total']
y = ress_MDD_meta["RRS_Brooding"]
X_train, X_test, y_train, y_test = train_test_split(ress_MDD_brain, y, test_size=0.3, random_state=42)
# ------
ress_HC_brain = pd.read_csv("Health_brain.csv", header=None)
ress_HC_brain.columns = rumi_region.columns
ress_HC_meta = pd.read_csv("Health_rrs.csv", header=None)
ress_HC_meta.columns = ['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination','RRS_Total']
y = ress_HC_meta["RRS_Brooding"]
X_train, X_test, y_train, y_test = train_test_split(ress_HC_brain, y, test_size=0.3, random_state=42)
automl = AutoML_Regression()
result = automl.fit(X_train, y_train, X_test, y_test)
result.to_csv("AutoML_RRS_total_rumi_region_plus.csv", index = False)
ress_BPD_meta["Label"] = "BPD"
ress_MDD_meta["Label"] = "MDD"
ress_HC_meta["Label"] = "HC"
ress = pd.concat([ress_BPD_meta, ress_MDD_meta, ress_HC_meta]).reset_index(drop=True)
sns.pairplot(ress, hue="Label")
#------------------------------------------------------------------------------
automl = AutoML_Regression()
lasso_best, _, _, _ = automl.Random_Forest(X_train, y_train, X_test, y_test)
lasso_best.fit(X_train, y_train)
y_pred = lasso_best.predict(X_test)
plt.scatter(y_pred, y_test, s=8)
plt.plot([min(y_pred), max(y_pred)], [min(y_test), max(y_test)], '--k')
plt.ylabel('True RRS_total')
plt.xlabel('Predicted RRS_total')
#plt.text(s='Random Forest without Forward varible', x=1,
# y=2, fontsize=12, multialignment='center')
plt.text(min(y_pred), max(y_test) - 5, r'$R^2$ = %.2f' % (r2_score(y_test, y_pred)))
plt.text(min(y_pred), max(y_test) - 10, r'MSE = %.2f' % (mean_squared_error(y_test, y_pred)))
plt.text(min(y_pred), max(y_test) - 15, r'Accuracy = %.2f %' % (100 - 100*mean_absolute_percentage_error(y_test, y_pred)))
#plt.ticklabel_format(axis="both", style="sci", scilimits=(0, 0))
errors = abs(y_pred - y_test)
mean_err = np.stack(errors/y_test)
mean_err = mean_err[np.isfinite(mean_err)]
mape = 100 * np.mean(mean_err)
acc = 100 - mape
#------------------------------------------------------------------------------
n_samples, n_features = X_train.shape
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
max_depth = [5, 10, 25, 50, 75, 100]
min_samples_leaf = [1, 2, 4, 8, 10]
min_samples_split = [2, 4, 6, 8, 10]
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features,
}
my_cv = RepeatedKFold(n_splits=10, n_repeats=10, random_state=42)
base_model_rf = RandomForestRegressor(criterion = "mse", random_state=42)
n_iter_search = 30
scoring = "neg_mean_squared_error"
n_selected_features = 240
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_mse = []
all_model = []
start = time.time()
while count < n_selected_features:
max_acc = 0
min_err = np.inf
time_loop = time.time()
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
#cv=my_cv,
cv=5,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
mse = mean_squared_error(y_test, y_pred)
#acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if mse < min_err:
min_err = mse
idx = i
best_model = best_estimator
#errors = abs(y_pred - y_test)
#mean_err = np.stack(errors/y_test)
#mean_err = mean_err[np.isfinite(mean_err)]
mape = mean_absolute_percentage_error(y_test, y_pred)
max_acc = 100 - (100*mape)
F.append(idx)
count += 1
print("The current number of features: {} - MSE: {}".format(count, round(min_err, 2)))
print("Time for computation: {}".format(time.time() - time_loop))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
all_mse.append(min_err)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = | pd.DataFrame(all_F) | pandas.DataFrame |
import pandas as pd
import numpy as np
import yfinance as yf
from pandas import Series
from prettytable import PrettyTable
from Common.Readers.Engine.AbstractEngine import AbstractEngine
from Common.StockType.AbstractStock import AbstractStock
from Common.StockType.Equities.AbstractStockEquity import AbstractStockEquity
from Common.StockType.Funds.ExchangeTraded.ExchangeTradedFund import ExchangeTradedFund
from Common.StockType.Funds.Index.IndexFund import IndexFund
from Common.StockType.Funds.Mutual.MutualFund import MutualFund
from Common.StockType.Currencies.Crypto.CryptoCurrency import CryptoCurrency
from Common.StockType.Currencies.Regular.RegularCurrency import RegularCurrency
from Common.StockType.Futures.AbstractStockFuture import AbstractStockFuture
class YahooFinanceEngine(AbstractEngine):
__pretty_table: PrettyTable = PrettyTable()
_info_labels: list = list()
_info_list: list = list()
__ticker: str = 'NA'
_stock_type: AbstractStock
_url: str = 'NA'
_url_logo: str = 'NA'
_address1: str = 'NA'
_address2: str = 'NA'
_city: str = 'NA'
_company_name: str = 'NA'
_country: str = 'NA'
_currency: str = 'NA'
_exchange: str = 'NA'
_fax: str = 'NA'
_state: str = 'NA'
_phone: str = 'NA'
_postal_code: str = 'NA'
_market: str = 'NA'
_market_cap: str = 'NA'
_quote_type: str = 'NA'
_beta: float = -1.1
_high52: float = -1.1
_low52: float = -1.1
_high_today: float = -1.1
_low_today: float = -1.1
_avg50: float = -1.1
_avg200: float = -1.1
_ratio_payout: float = -1.1
_ratio_peg: float = -1.1
_ratio_short: float = -1.1
_pe_forward: float = -1.1
_pe_trailing: float = -1.1
_book_value: float = -1.1
_book_price_to: float = -1.1
_ent_value: int = -1
_ent2revenue: float = -1.1
_ent2ebitda: float = -1.1
_div_rate: float = -1.1
_div_5y_avg_yield: float = -1.1
_div_yield: float = -1.1
_div_last_value: float = -1.1
_div_last_date: int = -1
_div_ex_date: int = -1
_div_last_date: int = -1
_split_date: int = -1
_fiscal_year_end_last: int = -1
_fiscal_year_end_next: int = -1
_last_quarter: int = -1
InfoDic: dict # = dict()
ActionsDf: pd.DataFrame = | pd.DataFrame() | pandas.DataFrame |
# Inspired by https://www.quantopian.com/posts/grahamfundmantals-algo-simple-screening-on-benjamin-graham-number-fundamentals
# Trading Strategy using Fundamental Data
# 1. Filter the top 50 companies by market cap
# 2. Find the top two sectors that have the highest average PE ratio
# 3. Every month exit all the positions before entering new ones at the month
# 4. Log the positions that we need
from csv import DictReader
from os import path, remove
from datetime import datetime
import numpy as np
import pandas as pd
import wget
import json
from zipline.api import (
symbol,
order_target_percent,
record,
schedule_function,
date_rules,
time_rules,
)
from zipline.errors import SymbolNotFound
from contract.ContractHandler import ContractHandler
def initialize(context):
# Ethereum contract
#context.contract = ContractHandler()
# Get blacklist of sectors which returns a list of codes
#context.blacklist = context.contract.getBlacklist()
# for testing purpose without Ethereum exclude Defense, Beer & Alcohol, Tabacco and Coal
context.blacklist = [26.0, 4.0, 5.0, 29.0]
# Only run get_fundamentals when necessary based on the rebalance function
context.initial = True
# Dictionary of stocks and their respective weights
context.stock_weights = {}
# Count of days before rebalancing
context.days = 0
# Number of sectors to go long in
context.sect_numb = 2
# Sector mappings
context.sector_mappings = get_sectors(key='code')
context.ticker_sector_dict = get_sector_code()
# TODO: Update this accordingly (weekly?)
# Rebalance monthly on the first day of the month at market open
schedule_function(rebalance,
date_rule=date_rules.month_start(),
time_rule=time_rules.market_open())
def rebalance(context, data):
# update the fundamentals data
if not context.initial:
get_fundamentals(context, data)
# Exit all positions before starting new ones
for stock in context.portfolio.positions:
if stock not in context.fundamental_df:
order_target_percent(stock, 0)
print("The two sectors we are ordering today are %r" % context.sectors)
# Create weights for each stock
weight = create_weights(context, context.stocks)
# Rebalance all stocks to target weights
for stock in context.fundamental_df:
if weight != 0:
print("Ordering %0.0f%% percent of %s in %s" %
(weight * 100,
stock.symbol,
context.sector_mappings[context.fundamental_df[stock]['sector_code']]))
order_target_percent(stock, weight)
# track how many positions we're holding
record(num_positions=len(context.fundamental_df))
def get_fundamentals(context, data):
print("Updating fundamentals data")
fundamentals = dict()
with open(path.join('data', 'fundamentals', 'data.csv'), 'r') as fundamentals_csv:
reader = DictReader(fundamentals_csv, ['ticker', 'indicator', 'dimension', 'date', 'value'])
thisticker = ''
values = dict()
for line in reader:
# print("Processing line {}".format(line))
try:
symbol_ticker = symbol(line['ticker'])
if data.can_trade(symbol_ticker):
# Store most recent values in the ticker
if thisticker != symbol_ticker:
# print("Processing {}".format(symbol_ticker))
if not thisticker:
thisticker = symbol_ticker
else:
# add the sector code
try:
values['sector_code'] = context.ticker_sector_dict[thisticker.symbol]
if values['sector_code'] and values['pe_ratio'] and values['market_cap']:
fundamentals[thisticker] = values
# print("Adding {}".format(values))
values = dict()
except KeyError as e:
# print("Error on adding {}".format(e))
pass
thisticker = symbol_ticker
# Select only data that was available at that time
date = data.current(symbol_ticker, "last_traded").replace(tzinfo=None)
if date > datetime.strptime(line['date'], '%Y-%m-%d'):
# Set PE Ratio
if line['indicator'] in 'EPS':
values['pe_ratio'] = float(line['value'])
# Set Market Cap
elif line['indicator'] in 'SHARESWA':
price = data.current(symbol_ticker, "price")
totalshares = float(line['value'])
market_cap = price * totalshares
# Only consider stock with at least 10 million market cap
if market_cap > 10000000:
values['market_cap'] = price * totalshares
except SymbolNotFound as e:
pass
# convert dict to DataFrame
fundamentals_df = | pd.DataFrame.from_dict(fundamentals) | pandas.DataFrame.from_dict |
import argparse
import os
import sys
from collections import defaultdict
import pandas as pd
from maggot import Experiment
from maggot.containers import NestedContainer
from maggot.utils import bold, green, red, blue
| pd.set_option("display.max_colwidth", 500) | pandas.set_option |
import os as os
from lib import ReadCsv
from lib import ReadConfig
from lib import ReadData
from lib import NetworkModel
from lib import ModelMetrics
from lib import SeriesPlot
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from lib import modwt
import keras
from datetime import date,datetime,time
from datetime import datetime
config = ReadConfig.ReadConfig()
config_data = config.read_config(os.path.join("config", "config.json"))
reader = ReadData.ReadData()
all_data = reader.readClimateFiles(config_data)
subset = all_data[['date','site_x', 'Hs','Hmax','Tz','Tp','DirTpTRUE','SST']]
subset.describe()
def make_date(series):
for dt in series:
yield datetime.strptime(dt, '%d/%m/%Y')
dates = list(make_date(subset['date']))
subset.index = range(0, subset.shape[0])
datesDf = pd.DataFrame({'dates': pd.Series(dates)}, index=range(0,len(dates)))
subset2 = | pd.concat([subset, datesDf], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 8 22:46:34 2019
@author: Samuel
"""
import pandas as pd
import numpy as np
import pickle
import six
import warnings
from itertools import cycle
from collections import OrderedDict
from scipy.sparse import csr_matrix
from sklearn.base import BaseEstimator, TransformerMixin
class DiscreteMinxin(object):
def get_obj_cols(self, df):
dtypes_to_encode=['object','category']
cols = df.select_dtypes(include=dtypes_to_encode).columns.tolist()
return cols
def save(self, path):
with open(path,'wb+') as fp:
pickle.dump(self.__dict__, fp)
def load(self,path):
with open(path,'rb') as fp:
dict_param = pickle.load(fp)
return dict_param
class OneHotEncoder(BaseEstimator, TransformerMixin, DiscreteMinxin):
def __init__(self, dummy_na=True, handle_unknown='ignore',
category_threshold=50, drop_threshold_cols=True,
replace_na=-99):
"""
parameter
---------
dummy_na: bool, defualt True
handle_unknown: str, 'error' or 'ignore'
category_threshold: columns of categories more then this threhold will
not be encoded
drop_threshold_cols: drop columns that not satisfy category_threshold
or columns of one category
"""
self.dummy_na = dummy_na
self.handle_unknown = handle_unknown
self.category_threshold = category_threshold
self.drop_threshold_cols = drop_threshold_cols
self.encode_cols= []
self.drop_cols=[]
self.mapping = {}
self.replace_na = replace_na
self._dim = None
def fit(self, X, y=None, cols_to_encoder=None, extra_numeric_cols=None):
"""
parameter
----------
X: DataFrame obj to generate one-hot-encoder rule
cols_to_encoders: specify the columns to be encoded
extra_numeric_cols: if cols_to_encoder is provided this param will
not be used, otherwise all object columns and extra_numeric_cols
will be encoded.
"""
if not isinstance(X, pd.DataFrame):
raise TypeError('X should be DataFrame object')
if y is not None:
if y not in X.columns:
raise ValueError('y is not in X.columns during fit')
self._dim = X.shape[1] -1
else:
self._dim = X.shape[1]
if cols_to_encoder is None:
cols = self.get_obj_cols(X)
cols += list(extra_numeric_cols) if extra_numeric_cols is not None \
else []
else:
cols = cols_to_encoder
cols = list(set(cols))
#'no colums to encoder'
if len(cols)==0:
return
# remove target if it is included in data source
if isinstance(y, six.string_types) and y in cols:
cols.remove(y)
# re-order cols by original order
cols = sorted(cols, key=X.columns.get_loc)
# convert na to pre -defined vlaue
df = X[cols].fillna(self.replace_na, downcast='infer')
# generato rules
cats_list = pd.Series()
for col in cols:
cats_list[col] = df[col].unique().tolist()
if not self.dummy_na and self.replace_na in cats_list[col]:
cats_list[col].remove(self.replace_na)
cats_cnt = cats_list.apply(lambda x: len(x))
# exclude columns of too manay categories or just one category
drop_mask = (cats_cnt > self.category_threshold) | (cats_cnt==1)
drop_index = cats_cnt[drop_mask].index
cats_list = cats_list[~cats_list.index.isin(drop_index)]
self.drop_cols = drop_index.tolist()
self.encode_cols = cats_list.index.tolist()
maps={}
for col in self.encode_cols:
# map each val in col into a index
val_list = cats_list[col]
val_map = OrderedDict({val:i for i,val in enumerate(val_list)})
maps[col] = val_map
self.mapping = maps
def transform(self, X, y=None, dtype=None, inplace=False):
"""
parameter
-----------
dtype: specifies the dtype of encoded value
"""
if not isinstance(X, pd.DataFrame):
raise TypeError('X shoule be DataFrame object')
if y is not None:
if y not in X.columns:
raise ValueError('y not in X.column during transform')
if self._dim != X.shape[1] -1:
raise ValueError('dimension error')
elif self._dim != X.shape[1] :
raise ValueError('dimension error')
if not inplace:
X = X.copy() # X=X.copy(deep=True)
if self.drop_threshold_cols:
X.drop(self.drop_cols, axis=1, inplace=True)
data_to_encode = X[self.encode_cols].fillna(self.replace_na,
downcast='infer')
with_dummies = [X.drop(self.encode_cols,axis=1)]
prefix = self.encode_cols
prefix_sep = cycle(['_'])
for (col, pre, sep) in zip(data_to_encode.iteritems(), prefix,
prefix_sep):
# col is (col_name, col_series) type
dummy = self._encode_column(col[1], pre, sep, dtype = dtype)
with_dummies.append(dummy)
result = pd.concat(with_dummies, axis=1)
return result
def _encode_column(self, data, prefix, prefix_sep, dtype):
if dtype is None:
dtype = np.uint8
maps = self.mapping[prefix]
dummy_strs = cycle([u'{prefix}{sep}{val}'])
dummy_cols = [dummy_str.format(prefix=prefix,sep=prefix_sep,val=str(v))
for dummy_str, v in zip(dummy_strs, maps.keys())]
if isinstance(data, pd.Series):
index = data.index
else:
index = None
row_idxs= []
col_idxs= []
for i, v in enumerate(data):
idx = maps.get(v,None)
if idx is None:
print("{} only exist in test column '{}'".format(v, prefix))
else:
row_idxs.append(i)
col_idxs.append(idx)
sarr = csr_matrix((np.ones(len(row_idxs)),(row_idxs,col_idxs)),shape=
(len(data),len(dummy_cols)), dtype=dtype)
out = pd.SparseDataFrame(sarr, index=index, columns=dummy_cols,
default_fill_value=0,dtype=dtype)
return out.astype(dtype)
class MeanEncoder(BaseEstimator, TransformerMixin, DiscreteMinxin):
def __init__(self, dummy_na = True, handle_unknown='prior', n_critical=1,
scale_factor=1, drop_last = False, replace_na= -99):
"""
dummy_na: bool,if False the null values will be repaced with prior after
transform
handle_unknown: str, 'error' of 'prior'
drop_last: bool,whether to get C-1 categories out of C by removing the
last class.
n_critical: the critical point that the posterior will contribute more
scale_factor: scale the smoothing factor
replace_na : int
"""
self.dummy_na = dummy_na
self.handle_unknown =handle_unknown
self.n_critical = n_critical
self.scale_factor = scale_factor
self.drop_last = drop_last
self.mapping={}
self.prior=None
self.encode_cols= None
self.replace_na = replace_na #
self._dim =None # attribution dimension
def fit(self, X, y, cols_to_encode=None, extra_numeric_cols=None):
if not isinstance(X, pd.DataFrame):
raise ValueError('X should be DataFrame type')
if isinstance(y, six.string_types):
if y in X.columns:
self._dim = X.shape[1] - 1
X = X.rename(columns={y:'_y_'})
else:
raise ValueError('y not in X.columns during fit')
else:
self._dim = X.shape[1]
y = pd.Series(y, name='_y_')
X = X.join(y)
X['_y_'] = X['_y_'].astype(int)
# get encoder columns
if cols_to_encode is None:
cols = self.get_obj_cols(X)
cols += list(extra_numeric_cols) if extra_numeric_cols is not None\
else []
else:
cols = cols_to_encode
cols = list(set(cols))
if len(cols)==0:
return
# re-order cols by original order
cols = sorted(cols, key=X.columns.get_loc)
self.encode_cols = cols
data_to_encode = X[self.encode_cols+['_y_']]
# convert na to a pre-defined value
data_to_encode.fillna(self.replace_na, downcast='infer',inplace=True)
prior = data_to_encode['_y_'].value_counts()/len(data_to_encode['_y_'])
prior.sort_index(axis=0,inplace=True)
prior.name='prior'
self.prior = prior #series
maps = {}
for col in self.encode_cols:
ctb = pd.crosstab(index=data_to_encode[col], columns=data_to_encode['_y_'])
# deal with missing y.
ctb = ctb.reindex(columns=prior.index, fill_value = 0)
ctb.sort_index(axis=1,inplace=True)
# calculate posterior
post = ctb.apply(lambda x: x/x.sum(), axis =1)
# calcalate smoothing factor of prior and posterior
smooth = ctb.applymap(lambda x: 1/(1+np.exp(-(x-self.n_critical)/self.scale_factor)))
smooth_prior = (1-smooth).multiply(prior,axis=1) # DataFrame multiple series
smooth_post = smooth.multiply(post)
codes = smooth_prior + smooth_post
# normalize
codes = codes.divide(codes.sum(axis=1),axis=0)
# encode na with prior if na is not treated as a cateogry
if not self.dummy_na and self.replace_na in codes.index:
codes.loc[self.replace_na,:]=self.prior
maps[col] =codes
self.mapping = maps
def transform(self, X, y=None):
if not isinstance(X, pd.DataFrame):
raise ValueError('X should be DataFrame type')
if isinstance(y, six.string_types) and y in X.columns:
if self._dim != X.shape[1] -1:
raise ValueError('dimension error')
elif self._dim != X.shape[1]:
raise ValueError('dimension error')
if not self.encode_cols:
return X
data_to_encode = X[self.encode_cols]
#fill na
data_to_encode.fillna(self.replace_na, downcast='infer',inplace=True)
with_dummies = [X.drop(self.encode_cols,axis=1)]
prefix = self.encode_cols
prefix_sep = cycle(['_'])
for (col, pre, sep) in zip(data_to_encode.iteritems(), prefix,
prefix_sep):
# col is (col_name, col_series) type
dummy = self._encode_column(col[1], pre, sep)
with_dummies.append(dummy)
result = pd.concat(with_dummies, axis=1)
return result
def _encode_column(self, data, prefix, prefix_sep):
maps = self.mapping[prefix]
dummy_strs = cycle([u'{prefix}{sep}{val}'])
dummy_cols = [dummy_str.format(prefix=prefix,sep=prefix_sep,val=str(v))
for dummy_str, v in zip(dummy_strs, maps.columns)]
if isinstance(data, pd.Series):
index = data.index
else:
index = None
enc_df = maps.loc[data.values,:] # NaN with unknonw value
#handle unknown value
if not all(data.isin(maps.index)):
msg = "unknown category {} in column '{}'".format(
data[~data.isin(maps.index)].values, prefix)
if self.handle_unknown=='error' :
raise ValueError(msg)
else:
print(msg)
enc_df.fillna(self.prior, inplace=True)
enc_df.index = index
enc_df.columns = dummy_cols
if self.drop_last:
enc_df = enc_df.iloc[:,:-1]
return enc_df
class WoeEncoder(BaseEstimator, TransformerMixin, DiscreteMinxin):
"""
currently only support discrete variable encode.
"""
def __init__(self, dummy_na = True, handle_unknown='zero', replace_na=-99,
reg = 1):
'''
dummy_na: bool, if true null value is treated as a category, otherwise
null value will be filled with zero.
handle_unknown: one of ('zero', 'error')
reg: int, bayesian prior value to avoid divding by zero when calculate woe.
'''
self.dummy_na = dummy_na
self.handle_unknown = handle_unknown
self.replace_na = replace_na
self.mapping ={}
self.reg = reg
self._dim = None
def fit(self, X, y, cols_to_encode = None, extra_numeric_cols=None):
if not isinstance(X, pd.DataFrame):
raise ValueError('X should be DataFrame type')
if isinstance(y, six.string_types):
if y in X.columns:
self._dim = X.shape[1] - 1
X = X.rename(columns={y:'_y_'})
else:
raise ValueError('y not in X.columns during fit')
else:
self._dim = X.shape[1]
y = pd.Series(y, name='_y_')
X = X.join(y)
# target label as '_y_'
X['_y_'] = X['_y_'].astype(int)
# get encoder columns
if cols_to_encode is None:
cols = self.get_obj_cols(X)
cols += list(extra_numeric_cols) if extra_numeric_cols is not None \
else []
else:
cols = cols_to_encode
# re-order cols by original order
self.encode_cols = sorted(list(set(cols)), key=X.columns.get_loc)
if len(self.encode_cols)==0:
return
data_to_encode = X[self.encode_cols+['_y_']]
# convert na to a predefined value
data_to_encode.fillna(self.replace_na, downcast='infer',inplace=True)
self._pos = data_to_encode['_y_'].sum() # global positive count
self._neg = len(data_to_encode['_y_']) - self._pos # global negative count
maps ={}
for col in self.encode_cols:
woe = self._compute_woe(data_to_encode, col, '_y_') # return series
maps[col] = woe
self.mapping = maps
def _compute_woe(self, df, var, y='_y_'):
grp = df[y].groupby(df[var]).agg(['sum',lambda x: x.count()-x.sum()])
grp = grp.rename(columns={'sum':'pos', '<lambda>':'neg'})
#use bayesian prior value to avoid dividing by zero
woe = np.log((grp['pos']+self.reg)/(grp['neg']+self.reg)) - \
np.log((self._pos+2*self.reg)/(self._neg+2*self.reg))
if not self.dummy_na and self.replace_na in woe:
woe[self.replace_na] = 0.0
return woe
def transform(self, X, y=None, inplace=False):
if not isinstance(X, pd.DataFrame):
raise ValueError('X should be DataFrame type')
if isinstance(y, six.string_types) and y in X.columns:
if self._dim != X.shape[1] -1:
raise ValueError('dimension error')
elif self._dim != X.shape[1]:
raise ValueError('dimension error')
if not self.encode_cols:
return X
if not inplace:
X = X.copy()
X[self.encode_cols] = X[self.encode_cols].fillna(self.replace_na,
downcast = 'infer')
msg = "unseen category {} in column '{}'"
for col in self.encode_cols:
X[col] = X[col].map(self.mapping[col]) # unseen value filled with NaN
#handle unknown value
if any(X[col].isnull()):
if self.handle_unknown == 'error':
raise ValueError(msg.format(X[X[col].isnull()][col].values, col))
else:
print(msg.format(X[X[col].isnull()][col].values, col))
X[col] = X[col].fillna(0.0)
return X
if __name__== '__main__':
warnings.filterwarnings('ignore')
print('OneHotEncoder'.center(40,'-'))
df = | pd.DataFrame([[1,2,3,'a',2.0],[1,np.nan,4,'6',3.0],[2,3,4,5,6],
[2.0,3,4,5,np.nan]],columns=['x','y','z','j','k']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 3 13:35:46 2022
@author: user
"""
import os
import numpy as np
import sklearn
from sklearn import metrics
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.ticker as ticker
from mpl_toolkits.axisartist.parasite_axes import SubplotHost
from scipy.spatial.transform import Rotation
import pandas as pd
import scipy.cluster
import scipy.optimize
from collections import Counter
import copy
os.chdir(os.path.dirname(__file__))
class Parameter:
PATH = r'./Skels connectome_mod' # Path to .swc files
RUN = True
SAVE = False
PLOT = True
RN = '1'
SEED = 1234
glo_info = pd.read_excel('./all_skeletons_type_list_180919.xls') # Path to glomerulus label information
fp = [f for f in os.listdir(Parameter.PATH) if os.path.isfile(os.path.join(Parameter.PATH, f))]
fp = [os.path.join(Parameter.PATH, f) for f in fp]
fp.pop(17)
class MorphData():
def __init__(self):
self.morph_id = []
self.morph_parent = []
self.morph_dist = []
self.neuron_id = []
self.endP = []
self.somaP = []
self.calyxdist = []
self.calyxdist_trk = []
self.calyxdist_per_n = []
self.LHdist = []
self.LHdist_trk = []
self.LHdist_per_n = []
self.ALdist = []
self.ALdist_trk = []
self.ALdist_per_n = []
class LengthData:
length_total = np.empty(len(fp))
length_branch = []
length_direct = []
length_calyx = []
length_LH = []
length_AL = []
length_calyx_total = []
length_LH_total = []
length_AL_total = []
class BranchData:
branchTrk = []
branch_dist = []
branchP = []
calyx_branchTrk = []
calyx_branchP = []
calyx_endP = []
LH_branchTrk = []
LH_branchP = []
LH_endP = []
AL_branchTrk = []
AL_branchP = []
AL_endP = []
branchNum = np.empty(len(fp))
np.random.seed(Parameter.SEED)
MorphData = MorphData()
r_d_x = -10
r_rad_x = np.radians(r_d_x)
r_x = np.array([0, 1, 0])
r_vec_x = r_rad_x * r_x
rotx = Rotation.from_rotvec(r_vec_x)
r_d_y = -25
r_rad_y = np.radians(r_d_y)
r_y = np.array([0, 1, 0])
r_vec_y = r_rad_y * r_y
roty = Rotation.from_rotvec(r_vec_y)
r_d_z = -40
r_rad_z = np.radians(r_d_z)
r_z = np.array([0, 1, 0])
r_vec_z = r_rad_z * r_z
rotz = Rotation.from_rotvec(r_vec_z)
for f in range(len(fp)):
print(f, fp[f])
morph_neu_id = []
morph_neu_parent = []
morph_neu_prox = []
morph_neu_dist = []
df = pd.read_csv(fp[f], delimiter=' ', header=None)
MorphData.neuron_id.append(os.path.basename(fp[f]).split('.')[0])
scall = int(df.iloc[np.where(df[6] == -1)[0]].values[0][0])
MorphData.somaP.append(scall)
MorphData.morph_id.append(df[0].tolist())
MorphData.morph_parent.append(df[6].tolist())
MorphData.morph_dist.append(np.divide(np.array(df[[2,3,4]]), 1000).tolist()) # Scale
ctr = Counter(df[6].tolist())
ctrVal = list(ctr.values())
ctrKey = list(ctr.keys())
BranchData.branchNum[f] = sum(i > 1 for i in ctrVal)
branchInd = np.array(ctrKey)[np.where(np.array(ctrVal) > 1)[0]]
neu_branchTrk = []
startid = []
endid = []
neu_indBranchTrk = []
branch_dist_temp1 = []
length_branch_temp = []
indMorph_dist_temp1 = []
indMDistLen_temp = []
list_end = np.setdiff1d(MorphData.morph_id[f], MorphData.morph_parent[f])
BranchData.branchP.append(branchInd.tolist())
MorphData.endP.append(list_end)
bPoint = np.append(branchInd, list_end)
calyxdist_per_n_temp = []
LHdist_per_n_temp = []
ALdist_per_n_temp = []
length_calyx_per_n = []
length_LH_per_n = []
length_AL_per_n = []
calyx_branchTrk_temp = []
calyx_branchP_temp = []
LH_branchTrk_temp = []
LH_branchP_temp = []
AL_branchTrk_temp = []
AL_branchP_temp = []
calyx_endP_temp = []
LH_endP_temp = []
AL_endP_temp = []
for bp in range(len(bPoint)):
if bPoint[bp] != scall:
neu_branchTrk_temp = []
branch_dist_temp2 = []
dist = 0
neu_branchTrk_temp.append(bPoint[bp])
branch_dist_temp2.append(MorphData.morph_dist[f][MorphData.morph_id[f].index(bPoint[bp])])
parentTrck = bPoint[bp]
parentTrck = MorphData.morph_parent[f][MorphData.morph_id[f].index(parentTrck)]
if parentTrck != -1:
neu_branchTrk_temp.append(parentTrck)
rhs = branch_dist_temp2[-1]
lhs = MorphData.morph_dist[f][MorphData.morph_id[f].index(parentTrck)]
branch_dist_temp2.append(lhs)
dist += np.linalg.norm(np.subtract(rhs, lhs))
while (parentTrck not in branchInd) and (parentTrck != -1):
parentTrck = MorphData.morph_parent[f][MorphData.morph_id[f].index(parentTrck)]
if parentTrck != -1:
neu_branchTrk_temp.append(parentTrck)
rhs = branch_dist_temp2[-1]
lhs = MorphData.morph_dist[f][MorphData.morph_id[f].index(parentTrck)]
branch_dist_temp2.append(lhs)
dist += np.linalg.norm(np.subtract(rhs, lhs))
if len(neu_branchTrk_temp) > 1:
neu_branchTrk.append(neu_branchTrk_temp)
startid.append(neu_branchTrk_temp[0])
endid.append(neu_branchTrk_temp[-1])
branch_dist_temp1.append(branch_dist_temp2)
length_branch_temp.append(dist)
# rotate -25 degrees on y-axis
branch_dist_temp2_rot = roty.apply(branch_dist_temp2)
# rotate -35 degrees on x-axis
branch_dist_temp2_rot2 = rotx.apply(branch_dist_temp2)
# rotate 50 degrees on z-axis
branch_dist_temp2_rot3 = rotz.apply(branch_dist_temp2)
if ((np.array(branch_dist_temp2_rot)[:,0] > 353.95).all() and (np.array(branch_dist_temp2_rot)[:,0] < 426.14).all() and
(np.array(branch_dist_temp2_rot)[:,1] > 176.68).all() and (np.array(branch_dist_temp2_rot)[:,1] < 272.91).all() and
(np.array(branch_dist_temp2_rot3)[:,2] > 434.08).all() and (np.array(branch_dist_temp2_rot3)[:,2] < 496.22).all()):
MorphData.calyxdist.append(branch_dist_temp2)
MorphData.calyxdist_trk.append(f)
calyxdist_per_n_temp.append(branch_dist_temp2)
length_calyx_per_n.append(dist)
calyx_branchTrk_temp.append(neu_branchTrk_temp)
calyx_branchP_temp.append(list(set(neu_branchTrk_temp) & set(branchInd)))
if bPoint[bp] in list_end:
calyx_endP_temp.append(bPoint[bp])
elif ((np.array(branch_dist_temp2_rot)[:,0] < 353.95).all() and (np.array(branch_dist_temp2_rot)[:,1] > 176.68).all() and
(np.array(branch_dist_temp2_rot)[:,1] < 272.91).all() and (np.array(branch_dist_temp2_rot)[:,2] > 286.78).all() and
(np.array(branch_dist_temp2_rot)[:,2] < 343.93).all()):
MorphData.LHdist.append(branch_dist_temp2)
MorphData.LHdist_trk.append(f)
LHdist_per_n_temp.append(branch_dist_temp2)
length_LH_per_n.append(dist)
LH_branchTrk_temp.append(neu_branchTrk_temp)
LH_branchP_temp.append(list(set(neu_branchTrk_temp) & set(branchInd)))
if bPoint[bp] in list_end:
LH_endP_temp.append(bPoint[bp])
elif ((np.array(branch_dist_temp2_rot)[:,0] > 426.14).all() and (np.array(branch_dist_temp2_rot)[:,0] < 533.42).all() and
(np.array(branch_dist_temp2_rot)[:,1] > 272.91).all() and (np.array(branch_dist_temp2_rot)[:,1] < 363.12).all() and
(np.array(branch_dist_temp2_rot2)[:,2] < 180.77).all()):
MorphData.ALdist.append(branch_dist_temp2)
MorphData.ALdist_trk.append(f)
ALdist_per_n_temp.append(branch_dist_temp2)
length_AL_per_n.append(dist)
AL_branchTrk_temp.append(neu_branchTrk_temp)
AL_branchP_temp.append(list(set(neu_branchTrk_temp) & set(branchInd)))
if bPoint[bp] in list_end:
AL_endP_temp.append(bPoint[bp])
BranchData.branchTrk.append(neu_branchTrk)
BranchData.branch_dist.append(branch_dist_temp1)
LengthData.length_branch.append(length_branch_temp)
MorphData.calyxdist_per_n.append(calyxdist_per_n_temp)
MorphData.LHdist_per_n.append(LHdist_per_n_temp)
MorphData.ALdist_per_n.append(ALdist_per_n_temp)
LengthData.length_calyx.append(length_calyx_per_n)
LengthData.length_LH.append(length_LH_per_n)
LengthData.length_AL.append(length_AL_per_n)
BranchData.calyx_branchTrk.append(calyx_branchTrk_temp)
BranchData.calyx_branchP.append(np.unique([item for sublist in calyx_branchP_temp for item in sublist]).tolist())
BranchData.LH_branchTrk.append(LH_branchTrk_temp)
BranchData.LH_branchP.append(np.unique([item for sublist in LH_branchP_temp for item in sublist]).tolist())
BranchData.AL_branchTrk.append(AL_branchTrk_temp)
BranchData.AL_branchP.append(np.unique([item for sublist in AL_branchP_temp for item in sublist]).tolist())
BranchData.calyx_endP.append(calyx_endP_temp)
BranchData.LH_endP.append(LH_endP_temp)
BranchData.AL_endP.append(AL_endP_temp)
glo_list = []
glo_idx = []
for f in range(len(MorphData.neuron_id)):
idx = np.where(glo_info.skid == int(MorphData.neuron_id[f]))[0][0]
if 'glomerulus' in glo_info['old neuron name'][idx]:
if glo_info['type'][idx] != 'unknown glomerulus': # One neuron in this glomerulus that does not project to LH
if glo_info['type'][idx] == 'DP1l, VL2p': # Neuron with both DP1l and VL2p label
glo_name = 'VL2p' # Neuron seems to have more similar spetrum as VL2p
else:
glo_name = glo_info['type'][idx]
if glo_name in glo_list:
glo_idx[glo_list.index(glo_name)].append(f)
else:
glo_list.append(glo_name)
glo_idx.append([f])
glo_len = [len(arr) for arr in glo_idx]
glo_lb = [sum(glo_len[0:i]) for i in range(len(glo_len)+1)]
glo_lbs = np.subtract(glo_lb, glo_lb[0])
glo_float = np.divide(glo_lbs, glo_lbs[-1])
glo_idx_flat = [item for sublist in glo_idx for item in sublist]
glo_idx_flat.sort()
glo_list_neuron = np.repeat(glo_list, glo_len)
glo_lb_idx = []
for i in range(len(glo_lb)-1):
glo_lb_idx.append(np.arange(glo_lb[i],glo_lb[i+1]))
morph_dist_calyx = []
morph_dist_LH = []
morph_dist_AL = []
for i in range(len(glo_list)):
morph_dist_calyx_temp = []
morph_dist_LH_temp = []
morph_dist_AL_temp = []
morph_dist_calyx_bp_temp = []
morph_dist_LH_bp_temp = []
morph_dist_AL_bp_temp = []
morph_dist_calyx_ep_temp = []
morph_dist_LH_ep_temp = []
morph_dist_AL_ep_temp = []
for j in range(len(glo_idx[i])):
morph_dist_calyx_temp2 = []
morph_dist_LH_temp2 = []
morph_dist_AL_temp2 = []
morph_dist_calyx_bp_temp2 = []
morph_dist_LH_bp_temp2 = []
morph_dist_AL_bp_temp2 = []
morph_dist_calyx_ep_temp2 = []
morph_dist_LH_ep_temp2 = []
morph_dist_AL_ep_temp2 = []
for p in range(len(MorphData.morph_dist[glo_idx[i][j]])):
branch_dist_temp2_rot = roty.apply(np.array(MorphData.morph_dist[glo_idx[i][j]][p]))
branch_dist_temp2_rot2 = rotx.apply(np.array(MorphData.morph_dist[glo_idx[i][j]][p]))
branch_dist_temp2_rot3 = rotz.apply(np.array(MorphData.morph_dist[glo_idx[i][j]][p]))
if ((np.array(branch_dist_temp2_rot)[0] > 353.95).all() and (np.array(branch_dist_temp2_rot)[0] < 426.14).all() and
(np.array(branch_dist_temp2_rot)[1] > 176.68).all() and (np.array(branch_dist_temp2_rot)[1] < 272.91).all() and
(np.array(branch_dist_temp2_rot3)[2] > 434.08).all() and (np.array(branch_dist_temp2_rot3)[2] < 496.22).all()):
morph_dist_calyx_temp2.append(MorphData.morph_dist[glo_idx[i][j]][p])
elif ((np.array(branch_dist_temp2_rot)[0] < 353.95).all() and (np.array(branch_dist_temp2_rot)[1] > 176.68).all() and
(np.array(branch_dist_temp2_rot)[1] < 272.91).all() and (np.array(branch_dist_temp2_rot)[2] > 286.78).all() and
(np.array(branch_dist_temp2_rot)[2] < 343.93).all()):
morph_dist_LH_temp2.append(MorphData.morph_dist[glo_idx[i][j]][p])
elif ((np.array(branch_dist_temp2_rot)[0] > 426.14).all() and (np.array(branch_dist_temp2_rot)[0] < 533.42).all() and
(np.array(branch_dist_temp2_rot)[1] > 272.91).all() and (np.array(branch_dist_temp2_rot)[1] < 363.12).all() and
(np.array(branch_dist_temp2_rot2)[2] < 180.77).all()):
morph_dist_AL_temp2.append(MorphData.morph_dist[glo_idx[i][j]][p])
morph_dist_calyx_temp.append(morph_dist_calyx_temp2)
morph_dist_LH_temp.append(morph_dist_LH_temp2)
morph_dist_AL_temp.append(morph_dist_AL_temp2)
morph_dist_calyx.append(morph_dist_calyx_temp)
morph_dist_LH.append(morph_dist_LH_temp)
morph_dist_AL.append(morph_dist_AL_temp)
cg = np.array(MorphData.calyxdist_per_n, dtype=object)[glo_idx_flat]
lg = np.array(MorphData.LHdist_per_n, dtype=object)[glo_idx_flat]
ag = np.array(MorphData.ALdist_per_n, dtype=object)[glo_idx_flat]
cg = [item for sublist in cg for item in sublist]
lg = [item for sublist in lg for item in sublist]
ag = [item for sublist in ag for item in sublist]
MorphData.calyxdist_flat_glo = [item for sublist in cg for item in sublist]
MorphData.LHdist_flat_glo = [item for sublist in lg for item in sublist]
MorphData.ALdist_flat_glo = [item for sublist in ag for item in sublist]
#%%
morph_dist_calyx_CM = []
morph_dist_LH_CM = []
morph_dist_AL_CM = []
morph_dist_calyx_std = []
morph_dist_LH_std = []
morph_dist_AL_std = []
for i in range(len(morph_dist_AL)):
morph_dist_calyx_CM_temp = []
morph_dist_LH_CM_temp = []
morph_dist_AL_CM_temp = []
morph_dist_calyx_std_temp = []
morph_dist_LH_std_temp = []
morph_dist_AL_std_temp = []
for j in range(len(morph_dist_AL[i])):
morph_dist_calyx_CM_temp.append(np.average(np.array(morph_dist_calyx[i][j]), axis=0))
morph_dist_LH_CM_temp.append(np.average(np.array(morph_dist_LH[i][j]), axis=0))
morph_dist_AL_CM_temp.append(np.average(np.array(morph_dist_AL[i][j]), axis=0))
morph_dist_calyx_std_temp.append(np.std(np.array(morph_dist_calyx[i][j]), axis=0))
morph_dist_LH_std_temp.append(np.std(np.array(morph_dist_LH[i][j]), axis=0))
morph_dist_AL_std_temp.append(np.std(np.array(morph_dist_AL[i][j]), axis=0))
morph_dist_calyx_CM.append(morph_dist_calyx_CM_temp)
morph_dist_LH_CM.append(morph_dist_LH_CM_temp)
morph_dist_AL_CM.append(morph_dist_AL_CM_temp)
morph_dist_LH_std.append(morph_dist_LH_std_temp)
morph_dist_calyx_std.append(morph_dist_calyx_std_temp)
morph_dist_AL_std.append(morph_dist_AL_std_temp)
from scipy.spatial import ConvexHull
morph_dist_calyx_flt = [item for sublist in morph_dist_calyx for item in sublist]
morph_dist_calyx_flat = [item for sublist in morph_dist_calyx_flt for item in sublist]
mdcalyx_xmax = np.max(np.array(morph_dist_calyx_flat)[:,0])
mdcalyx_xmin = np.min(np.array(morph_dist_calyx_flat)[:,0])
mdcalyx_ymax = np.max(np.array(morph_dist_calyx_flat)[:,1])
mdcalyx_ymin = np.min(np.array(morph_dist_calyx_flat)[:,1])
mdcalyx_zmax = np.max(np.array(morph_dist_calyx_flat)[:,2])
mdcalyx_zmin = np.min(np.array(morph_dist_calyx_flat)[:,2])
morph_dist_LH_flt = [item for sublist in morph_dist_LH for item in sublist]
morph_dist_LH_flat = [item for sublist in morph_dist_LH_flt for item in sublist]
mdLH_xmax = np.max(np.array(morph_dist_LH_flat)[:,0])
mdLH_xmin = np.min(np.array(morph_dist_LH_flat)[:,0])
mdLH_ymax = np.max(np.array(morph_dist_LH_flat)[:,1])
mdLH_ymin = np.min(np.array(morph_dist_LH_flat)[:,1])
mdLH_zmax = np.max(np.array(morph_dist_LH_flat)[:,2])
mdLH_zmin = np.min(np.array(morph_dist_LH_flat)[:,2])
morph_dist_AL_flt = [item for sublist in morph_dist_AL for item in sublist]
morph_dist_AL_flat = [item for sublist in morph_dist_AL_flt for item in sublist]
mdAL_xmax = np.max(np.array(morph_dist_AL_flat)[:,0])
mdAL_xmin = np.min(np.array(morph_dist_AL_flat)[:,0])
mdAL_ymax = np.max(np.array(morph_dist_AL_flat)[:,1])
mdAL_ymin = np.min(np.array(morph_dist_AL_flat)[:,1])
mdAL_zmax = np.max(np.array(morph_dist_AL_flat)[:,2])
mdAL_zmin = np.min(np.array(morph_dist_AL_flat)[:,2])
hull_calyx = ConvexHull(np.array(morph_dist_calyx_flat))
calyx_vol = hull_calyx.volume
calyx_area = hull_calyx.area
calyx_density_l = np.sum(LengthData.length_calyx_total)/calyx_vol
hull_LH = ConvexHull(np.array(morph_dist_LH_flat))
LH_vol = hull_LH.volume
LH_area = hull_LH.area
LH_density_l = np.sum(LengthData.length_LH_total)/LH_vol
hull_AL = ConvexHull(np.array(morph_dist_AL_flat))
AL_vol = hull_AL.volume
AL_area = hull_AL.area
AL_density_l = np.sum(LengthData.length_AL_total)/AL_vol
#%% Inter-PN distance calculation
# The script can re-calculate the inter-PN distances but this can take long.
# Change the LOAD flag to False do so.
LOAD = True
if LOAD:
morph_dist_calyx_r_new = np.load(r'./morph_dist_calyx_r_new.npy')
morph_dist_LH_r_new = np.load(r'./morph_dist_LH_r_new.npy')
morph_dist_AL_r_new = np.load(r'./morph_dist_AL_r_new.npy')
else:
morph_dist_calyx_CM_flat = np.array([item for sublist in morph_dist_calyx_CM for item in sublist])
morph_dist_LH_CM_flat = np.array([item for sublist in morph_dist_LH_CM for item in sublist])
morph_dist_AL_CM_flat = np.array([item for sublist in morph_dist_AL_CM for item in sublist])
morph_dist_calyx_r_new = np.zeros((len(morph_dist_calyx_CM_flat), len(morph_dist_calyx_CM_flat)))
morph_dist_LH_r_new = np.zeros((len(morph_dist_LH_CM_flat), len(morph_dist_LH_CM_flat)))
morph_dist_AL_r_new = np.zeros((len(morph_dist_AL_CM_flat), len(morph_dist_AL_CM_flat)))
for i in range(len(morph_dist_calyx_CM_flat)):
for j in range(len(morph_dist_calyx_CM_flat)):
morph_dist_calyx_ed = scipy.spatial.distance.cdist(morph_dist_calyx_flt[i], morph_dist_calyx_flt[j])
morph_dist_LH_ed = scipy.spatial.distance.cdist(morph_dist_LH_flt[i], morph_dist_LH_flt[j])
morph_dist_AL_ed = scipy.spatial.distance.cdist(morph_dist_AL_flt[i], morph_dist_AL_flt[j])
# NNmetric
if len(morph_dist_calyx_flt[i]) < len(morph_dist_calyx_flt[j]):
N_calyx = len(morph_dist_calyx_flt[i])
dmin_calyx = np.min(morph_dist_calyx_ed, axis=1)
elif len(morph_dist_calyx_flt[i]) > len(morph_dist_calyx_flt[j]):
N_calyx = len(morph_dist_calyx_flt[j])
dmin_calyx = np.min(morph_dist_calyx_ed, axis=0)
else:
N_calyx = len(morph_dist_calyx_flt[i])
r1 = np.min(morph_dist_calyx_ed, axis=0)
r2 = np.min(morph_dist_calyx_ed, axis=1)
if np.sum(r1) < np.sum(r2):
dmin_calyx = r1
else:
dmin_calyx = r2
if len(morph_dist_LH_flt[i]) < len(morph_dist_LH_flt[j]):
N_LH = len(morph_dist_LH_flt[i])
dmin_LH = np.min(morph_dist_LH_ed, axis=1)
elif len(morph_dist_LH_flt[i]) > len(morph_dist_LH_flt[j]):
N_LH = len(morph_dist_LH_flt[j])
dmin_LH = np.min(morph_dist_LH_ed, axis=0)
else:
N_LH = len(morph_dist_LH_flt[i])
r1 = np.min(morph_dist_LH_ed, axis=0)
r2 = np.min(morph_dist_LH_ed, axis=1)
if np.sum(r1) < np.sum(r2):
dmin_LH = r1
else:
dmin_LH = r2
if len(morph_dist_AL_flt[i]) < len(morph_dist_AL_flt[j]):
N_AL = len(morph_dist_AL_flt[i])
dmin_AL = np.min(morph_dist_AL_ed, axis=1)
elif len(morph_dist_AL_flt[i]) > len(morph_dist_AL_flt[j]):
N_AL = len(morph_dist_AL_flt[j])
dmin_AL = np.min(morph_dist_AL_ed, axis=0)
else:
N_AL = len(morph_dist_AL_flt[i])
r1 = np.min(morph_dist_AL_ed, axis=0)
r2 = np.min(morph_dist_AL_ed, axis=1)
if np.sum(r1) < np.sum(r2):
dmin_AL = r1
else:
dmin_AL = r2
morph_dist_calyx_r_new[i][j] = np.sqrt(np.divide(np.sum(np.square(dmin_calyx)), N_calyx))
morph_dist_LH_r_new[i][j] = np.sqrt(np.divide(np.sum(np.square(dmin_LH)), N_LH))
morph_dist_AL_r_new[i][j] = np.sqrt(np.divide(np.sum(np.square(dmin_AL)), N_AL))
calyxdist_cluster_u_full_new = []
calyxdist_noncluster_u_full_new = []
for i in range(len(glo_list)):
calyx_sq = morph_dist_calyx_r_new[glo_lbs[i]:glo_lbs[i+1],glo_lbs[i]:glo_lbs[i+1]]
calyx_sq_tri = calyx_sq[np.triu_indices_from(calyx_sq, k=1)]
calyx_nc = np.delete(morph_dist_calyx_r_new[glo_lbs[i]:glo_lbs[i+1]], np.arange(glo_lbs[i], glo_lbs[i+1]))
if len(calyx_sq_tri) > 0:
calyxdist_cluster_u_full_new.append(calyx_sq_tri)
else:
calyxdist_cluster_u_full_new.append([])
calyxdist_noncluster_u_full_new.append(calyx_nc.flatten())
calyxdist_cluster_u_full_flat_new = [item for sublist in calyxdist_cluster_u_full_new for item in sublist]
calyxdist_noncluster_u_full_flat_new = [item for sublist in calyxdist_noncluster_u_full_new for item in sublist]
LHdist_cluster_u_full_new = []
LHdist_noncluster_u_full_new = []
for i in range(len(glo_list)):
LH_sq = morph_dist_LH_r_new[glo_lbs[i]:glo_lbs[i+1],glo_lbs[i]:glo_lbs[i+1]]
LH_sq_tri = LH_sq[np.triu_indices_from(LH_sq, k=1)]
LH_nc = np.delete(morph_dist_LH_r_new[glo_lbs[i]:glo_lbs[i+1]], np.arange(glo_lbs[i], glo_lbs[i+1]))
if len(LH_sq_tri) > 0:
LHdist_cluster_u_full_new.append(LH_sq_tri)
else:
LHdist_cluster_u_full_new.append([])
LHdist_noncluster_u_full_new.append(LH_nc.flatten())
LHdist_cluster_u_full_flat_new = [item for sublist in LHdist_cluster_u_full_new for item in sublist]
LHdist_noncluster_u_full_flat_new = [item for sublist in LHdist_noncluster_u_full_new for item in sublist]
ALdist_cluster_u_full_new = []
ALdist_noncluster_u_full_new = []
for i in range(len(glo_list)):
AL_sq = morph_dist_AL_r_new[glo_lbs[i]:glo_lbs[i+1],glo_lbs[i]:glo_lbs[i+1]]
AL_sq_tri = AL_sq[np.triu_indices_from(AL_sq, k=1)]
AL_nc = np.delete(morph_dist_AL_r_new[glo_lbs[i]:glo_lbs[i+1]], np.arange(glo_lbs[i], glo_lbs[i+1]))
if len(AL_sq_tri) > 0:
ALdist_cluster_u_full_new.append(AL_sq_tri)
else:
ALdist_cluster_u_full_new.append([])
ALdist_noncluster_u_full_new.append(AL_nc.flatten())
ALdist_cluster_u_full_flat_new = [item for sublist in ALdist_cluster_u_full_new for item in sublist]
ALdist_noncluster_u_full_flat_new = [item for sublist in ALdist_noncluster_u_full_new for item in sublist]
print("Calyx cluster Mean: " + str(np.mean(calyxdist_cluster_u_full_flat_new)) + ", STD: " + str(np.std(calyxdist_cluster_u_full_flat_new)))
print("Calyx noncluster Mean: " + str(np.mean(calyxdist_noncluster_u_full_flat_new)) + ", STD: " + str(np.std(calyxdist_noncluster_u_full_flat_new)))
print("LH cluster Mean: " + str(np.mean(LHdist_cluster_u_full_flat_new)) + ", STD: " + str(np.std(LHdist_cluster_u_full_flat_new)))
print("LH noncluster Mean: " + str(np.mean(LHdist_noncluster_u_full_flat_new)) + ", STD: " + str(np.std(LHdist_noncluster_u_full_flat_new)))
print("AL cluster Mean: " + str(np.mean(ALdist_cluster_u_full_flat_new)) + ", STD: " + str(np.std(ALdist_cluster_u_full_flat_new)))
print("AL noncluster Mean: " + str(np.mean(ALdist_noncluster_u_full_flat_new)) + ", STD: " + str(np.std(ALdist_noncluster_u_full_flat_new)))
#%% Bar graph of d_inter, d_intra, and lambda
fig, ax = plt.subplots(figsize=(6,6))
labels = ['AL', 'MB calyx', 'LH']
x = np.arange(len(labels))
width = .3
cmeans = [np.mean(ALdist_cluster_u_full_flat_new),
np.mean(calyxdist_cluster_u_full_flat_new),
np.mean(LHdist_cluster_u_full_flat_new)]
cerr = [np.std(ALdist_cluster_u_full_flat_new),
np.std(calyxdist_cluster_u_full_flat_new),
np.std(LHdist_cluster_u_full_flat_new)]
ncmeans = [np.mean(ALdist_noncluster_u_full_flat_new),
np.mean(calyxdist_noncluster_u_full_flat_new),
np.mean(LHdist_noncluster_u_full_flat_new)]
ncerr = [np.std(ALdist_noncluster_u_full_flat_new),
np.std(calyxdist_noncluster_u_full_flat_new),
np.std(LHdist_noncluster_u_full_flat_new)]
lamb = [np.mean(ALdist_cluster_u_full_flat_new)/np.mean(ALdist_noncluster_u_full_flat_new),
np.mean(calyxdist_cluster_u_full_flat_new)/np.mean(calyxdist_noncluster_u_full_flat_new),
np.mean(LHdist_cluster_u_full_flat_new)/np.mean(LHdist_noncluster_u_full_flat_new)]
lamberr = [np.sqrt(np.square(cerr[0]/cmeans[0]) + np.square(ncerr[0]/ncmeans[0]))*lamb[0],
np.sqrt(np.square(cerr[1]/cmeans[1]) + np.square(ncerr[1]/ncmeans[1]))*lamb[1],
np.sqrt(np.square(cerr[2]/cmeans[2]) + np.square(ncerr[2]/ncmeans[2]))*lamb[2]]
bar1 = ax.bar(x - width, cmeans, width, yerr=cerr, capsize=5, label=r'$\bar{d}_{{\rm intra}}$', color='tab:blue')
bar2 = ax.bar(x, ncmeans, width, yerr=ncerr, capsize=5, label=r'$\bar{d}_{{\rm inter}}$', color='tab:orange')
ax2 = ax.twinx()
bar3 = ax2.bar(x + width, lamb, width, yerr=lamberr, capsize=5, label='$\lambda$', color='tab:red')
ax2.set_ylim(0, 1)
ax.set_ylabel('Distance ($\mu$m)', fontsize=17)
ax.set_xticks(x)
ax.set_xticklabels(labels, fontsize=17)
ax.tick_params(axis="y", labelsize=15)
ax2.tick_params(axis="y", labelsize=15)
ax2.set_ylabel('Ratio', fontsize=17)
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc=1, fontsize=15)
plt.tight_layout()
plt.show()
fig = plt.figure(figsize=(5, 9))
ax1 = fig.add_subplot(3,1,1)
ax2 = fig.add_subplot(3,1,2)
ax3 = fig.add_subplot(3,1,3)
ax1.hist(ALdist_cluster_u_full_flat_new, bins=20, alpha=0.5, density=True)
ax1.hist(ALdist_noncluster_u_full_flat_new, bins=20, alpha=0.5, density=True)
ax1.set_ylim(0, 0.4)
ax1.set_ylabel('AL', fontsize=15)
ax1.legend(['Identical Glomerulus', 'Different Glomeruli'], fontsize=13)
ax2.hist(calyxdist_cluster_u_full_flat_new, bins=20, alpha=0.5, density=True)
ax2.hist(calyxdist_noncluster_u_full_flat_new, bins=20, alpha=0.5, density=True)
ax2.set_ylim(0, 0.4)
ax2.set_ylabel('MB calyx', fontsize=15)
ax3.hist(LHdist_cluster_u_full_flat_new, bins=20, alpha=0.5, density=True)
ax3.hist(LHdist_noncluster_u_full_flat_new, bins=20, alpha=0.5, density=True)
ax3.set_ylim(0, 0.4)
ax3.set_ylabel('LH', fontsize=15)
ax3.set_xlabel(r'Distance $(\mu m)$', fontsize=15)
plt.tight_layout()
plt.show()
#%% per glomerulus d_inter, d_intra, and lambda
calyxtest_cl = []
calyxtest_ncl = []
calyxtest_cl_std = []
calyxtest_ncl_std = []
for i in range(len(calyxdist_cluster_u_full_new)):
calyxtest_cl.append(np.mean(calyxdist_cluster_u_full_new[i]))
calyxtest_cl_std.append(np.std(calyxdist_cluster_u_full_new[i]))
for i in range(len(calyxdist_noncluster_u_full_new)):
calyxtest_ncl.append(np.mean(calyxdist_noncluster_u_full_new[i]))
calyxtest_ncl_std.append(np.std(calyxdist_noncluster_u_full_new[i]))
LHtest_cl = []
LHtest_ncl = []
LHtest_cl_std = []
LHtest_ncl_std = []
for i in range(len(LHdist_cluster_u_full_new)):
LHtest_cl.append(np.mean(LHdist_cluster_u_full_new[i]))
LHtest_cl_std.append(np.std(LHdist_cluster_u_full_new[i]))
for i in range(len(LHdist_noncluster_u_full_new)):
LHtest_ncl.append(np.mean(LHdist_noncluster_u_full_new[i]))
LHtest_ncl_std.append(np.std(LHdist_noncluster_u_full_new[i]))
ALtest_cl = []
ALtest_ncl = []
ALtest_cl_std = []
ALtest_ncl_std = []
for i in range(len(ALdist_cluster_u_full_new)):
ALtest_cl.append(np.mean(ALdist_cluster_u_full_new[i]))
ALtest_cl_std.append(np.std(ALdist_cluster_u_full_new[i]))
for i in range(len(ALdist_noncluster_u_full_new)):
ALtest_ncl.append(np.mean(ALdist_noncluster_u_full_new[i]))
ALtest_ncl_std.append(np.std(ALdist_noncluster_u_full_new[i]))
calyxtest_cl = np.nan_to_num(calyxtest_cl)
calyxtest_ncl = np.nan_to_num(calyxtest_ncl)
LHtest_cl = np.nan_to_num(LHtest_cl)
LHtest_ncl = np.nan_to_num(LHtest_ncl)
ALtest_cl = np.nan_to_num(ALtest_cl)
ALtest_ncl = np.nan_to_num(ALtest_ncl)
calyxtest_cl_std = np.nan_to_num(calyxtest_cl_std)
calyxtest_ncl_std = np.nan_to_num(calyxtest_ncl_std)
LHtest_cl_std = np.nan_to_num(LHtest_cl_std)
LHtest_ncl_std = np.nan_to_num(LHtest_ncl_std)
ALtest_cl_std = np.nan_to_num(ALtest_cl_std)
ALtest_ncl_std = np.nan_to_num(ALtest_ncl_std)
ALtest_idx = np.where(np.array(ALtest_cl) >= 0)[0]
LHtest_idx = np.where(np.array(LHtest_cl) >= 0)[0]
calyxtest_idx = np.where(np.array(calyxtest_cl) >= 0)[0]
type_idx = [17, 21, 26, 9, 48, 6,
10, 44, 37, 27, 36, 16, 39,
30, 2, 15, 45, 1, 42, 50,
8, 13, 19, 4, 32, 34,
5, 12, 43, 33, 23, 22, 49, 14,
46, 20, 3, 38, 40, 18, 35, 25,
0, 7,
11, 47,
41, 24, 28, 31, 29]
attavdict1 = {'DL2d': '#028e00', 'DL2v': '#028e00', 'VL1': '#028e00', 'VL2a': '#028e00', 'VM1': '#028e00', 'VM4': '#028e00',
'DM1': '#7acb2f', 'DM4': '#7acb2f', 'DM5': '#7acb2f', 'DM6': '#7acb2f', 'VA4': '#7acb2f', 'VC2': '#7acb2f', 'VM7d': '#7acb2f',
'DA3': '#00f700', 'DC1': '#00f700', 'DL1': '#00f700', 'VA3': '#00f700', 'VM2': '#00f700', 'VM5d': '#00f700', 'VM5v': '#00f700',
'DA4m': '#a3a3a3', 'VA7m': '#a3a3a3', 'VC3l': '#a3a3a3', 'VC3m': '#a3a3a3', 'VM6': '#a3a3a3', 'VM7v': '#a3a3a3',
'DM2': '#17d9f7', 'DP1l': '#17d9f7', 'DP1m': '#17d9f7', 'V': '#17d9f7', 'VA2': '#17d9f7', 'VC4': '#17d9f7', 'VL2p': '#17d9f7', 'VM3': '#17d9f7',
'D': '#f10000', 'DA2': '#f10000', 'DA4l': '#f10000', 'DC2': '#f10000', 'DC4': '#f10000', 'DL4': '#f10000', 'DL5': '#f10000', 'DM3': '#f10000',
'VA6': '#e8f0be', 'VC1': '#e8f0be',
'VA5': '#b96d3d', 'VA7l': '#b96d3d',
'DA1': '#a200cb', 'DC3': '#a200cb', 'DL3': '#a200cb', 'VA1d': '#a200cb', 'VA1v': '#a200cb'}
attavdict2 = {'DL2d': '#027000', 'DL2v': '#027000', 'VL1': '#027000', 'VL2a': '#027000', 'VM1': '#027000', 'VM4': '#027000',
'DM1': '#5dad2f', 'DM4': '#5dad2f', 'DM5': '#5dad2f', 'DM6': '#5dad2f', 'VA4': '#5dad2f', 'VC2': '#5dad2f', 'VM7d': '#5dad2f',
'DA3': '#05cf02', 'DC1': '#05cf02', 'DL1': '#05cf02', 'VA3': '#05cf02', 'VM2': '#05cf02', 'VM5d': '#05cf02', 'VM5v': '#05cf02',
'DA4m': '#858585', 'VA7m': '#858585', 'VC3l': '#858585', 'VC3m': '#858585', 'VM6': '#858585', 'VM7v': '#858585',
'DM2': '#17becf', 'DP1l': '#17becf', 'DP1m': '#17becf', 'V': '#17becf', 'VA2': '#17becf', 'VC4': '#17becf', 'VL2p': '#17becf', 'VM3': '#17becf',
'D': '#bf0000', 'DA2': '#bf0000', 'DA4l': '#bf0000', 'DC2': '#bf0000', 'DC4': '#bf0000', 'DL4': '#bf0000', 'DL5': '#bf0000', 'DM3': '#bf0000',
'VA6': '#d4d296', 'VC1': '#d4d296',
'VA5': '#91451f', 'VA7l': '#91451f',
'DA1': '#700099', 'DC3': '#700099', 'DL3': '#700099', 'VA1d': '#700099', 'VA1v': '#700099'}
attavdict3 = {'DL2d': '#025200', 'DL2v': '#025200', 'VL1': '#025200', 'VL2a': '#025200', 'VM1': '#025200', 'VM4': '#025200',
'DM1': '#3f8f2f', 'DM4': '#3f8f2f', 'DM5': '#3f8f2f', 'DM6': '#3f8f2f', 'VA4': '#3f8f2f', 'VC2': '#3f8f2f', 'VM7d': '#3f8f2f',
'DA3': '#05a702', 'DC1': '#05a702', 'DL1': '#05a702', 'VA3': '#05a702', 'VM2': '#05a702', 'VM5d': '#05a702', 'VM5v': '#05a702',
'DA4m': '#676767', 'VA7m': '#676767', 'VC3l': '#676767', 'VC3m': '#676767', 'VM6': '#676767', 'VM7v': '#676767',
'DM2': '#17a0a7', 'DP1l': '#17a0a7', 'DP1m': '#17a0a7', 'V': '#17a0a7', 'VA2': '#17a0a7', 'VC4': '#17a0a7', 'VL2p': '#17a0a7', 'VM3': '#17a0a7',
'D': '#8d0000', 'DA2': '#8d0000', 'DA4l': '#8d0000', 'DC2': '#8d0000', 'DC4': '#8d0000', 'DL4': '#8d0000', 'DL5': '#8d0000', 'DM3': '#8d0000',
'VA6': '#b6b46e', 'VC1': '#b6b46e',
'VA5': '#592628', 'VA7l': '#592628',
'DA1': '#480071', 'DC3': '#480071', 'DL3': '#480071', 'VA1d': '#480071', 'VA1v': '#480071'}
updatedxlabel = np.array(glo_list)[type_idx]
attavlist1 = []
attavlist2 = []
attavlist3 = []
for i in updatedxlabel:
attavlist1.append(attavdict1[i])
attavlist2.append(attavdict2[i])
attavlist3.append(attavdict3[i])
type_idx = np.flip(type_idx)
updatedxlabel = np.flip(updatedxlabel)
attavlist1 = np.flip(attavlist1)
attavlist2 = np.flip(attavlist2)
attavlist3 = np.flip(attavlist3)
fig, ax = plt.subplots(1, 3, figsize=(8,12))
x = np.arange(len(calyxtest_idx))
width = .275
ax[0].barh(x + width, ALtest_cl[type_idx],
width,
capsize=5, label='Identical Glomerulus', color=np.array(attavlist1), alpha=0.5)
ax[0].barh(x , calyxtest_cl[type_idx],
width,
capsize=5, label='Different Glomeruli', color=np.array(attavlist2), alpha=0.75)
ax[0].barh(x - width , LHtest_cl[type_idx],
width,
capsize=5, label='Different Glomeruli', color=np.array(attavlist3), alpha=1)
ax[0].set_yticks(x)
ax[0].set_title('$\\bar{d}_{intra,X}$', fontsize=25)
ax[0].set_yticklabels([])
ax[0].set_xticks(np.array([0, 25]))
ax[0].tick_params(axis="x", labelsize=15)
ax[0].set_ylim(x[0] - 1, x[-1] + 1)
ax[0].set_yticklabels(updatedxlabel, rotation=0, fontsize=15)
[t.set_color(i) for (i,t) in zip(np.flip(list(attavdict2.values())), ax[0].yaxis.get_ticklabels())]
ax[1].barh(x + width, ALtest_ncl[type_idx],
width,
capsize=5, label='Identical Glomerulus', color=np.array(attavlist1), alpha=0.5)
ax[1].barh(x, calyxtest_ncl[type_idx],
width,
capsize=5, label='Different Glomeruli', color=np.array(attavlist2), alpha=0.75)
ax[1].barh(x - width, LHtest_ncl[type_idx],
width,
capsize=5, label='Different Glomeruli', color=np.array(attavlist3), alpha=1.)
ax[1].set_yticks(x)
ax[1].set_title('$\\bar{d}_{inter,X}$', fontsize=25)
ax[1].set_yticklabels([])
ax[1].set_xticks(np.array([0, 25]))
ax[1].tick_params(axis="x", labelsize=15)
ax[1].set_ylim(x[0] - 1, x[-1] + 1)
ax[2].barh(x + width, np.divide(ALtest_cl, ALtest_ncl)[type_idx],
width,
capsize=5, label='Identical Glomerulus', color=np.array(attavlist1), alpha=0.5)
ax[2].barh(x, np.divide(calyxtest_cl, calyxtest_ncl)[type_idx],
width,
capsize=5, label='Different Glomeruli', color=np.array(attavlist2), alpha=0.75)
ax[2].barh(x - width, np.divide(LHtest_cl, LHtest_ncl)[type_idx],
width,
capsize=5, label='Different Glomeruli', color=np.array(attavlist3), alpha=1.)
ax[2].set_yticks(x)
ax[2].set_title('$\\lambda_{X}$', fontsize=25)
ax[2].set_xticks(np.array([0, 0.5, 1, 1.5]))
ax[2].set_yticklabels([])
ax[2].tick_params(axis="x", labelsize=15)
ax[2].set_ylim(x[0] - 1, x[-1] + 1)
plt.tight_layout()
plt.show()
n = np.nonzero(np.divide(LHtest_cl, LHtest_ncl)[type_idx])
comb = pd.DataFrame({'AL': np.divide(ALtest_cl, ALtest_ncl)[type_idx][n],
'calyx': np.divide(calyxtest_cl, calyxtest_ncl)[type_idx][n],
'LH': np.divide(LHtest_cl, LHtest_ncl)[type_idx][n]})
comb_decay = pd.DataFrame({'AL': np.divide(ALtest_cl, ALtest_ncl)[[17, 21, 26, 9, 48]],
'calyx': np.divide(calyxtest_cl, calyxtest_ncl)[[17, 21, 26, 9, 48]],
'LH': np.divide(LHtest_cl, LHtest_ncl)[[17, 21, 26, 9, 48]]})
comb_pheromone = pd.DataFrame({'AL': np.divide(ALtest_cl, ALtest_ncl)[[41, 24, 28, 31, 29]],
'calyx': np.divide(calyxtest_cl, calyxtest_ncl)[[41, 24, 28, 31, 29]],
'LH': np.divide(LHtest_cl, LHtest_ncl)[[41, 24, 28, 31, 29]]})
fig, ax = plt.subplots(figsize=(4,4))
for i in range(len(type_idx)):
if np.divide(LHtest_cl, LHtest_ncl)[type_idx[i]] != 0:
plt.scatter(np.divide(LHtest_cl, LHtest_ncl)[type_idx[i]],
np.divide(ALtest_cl, ALtest_ncl)[type_idx[i]],
color=attavdict2[updatedxlabel[i]])
coef = np.polyfit(comb['LH'], comb['AL'], 1)
poly1d_fn = np.poly1d(coef)
ax.set_xlim(0, 1.5)
ax.set_ylim(0, 1.5)
ax.set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4])
ax.set_xlabel(r'LH, $\lambda_{X}$', fontsize=15)
ax.set_ylabel(r'AL, $\lambda_{X}$', fontsize=15)
plt.show()
fig, ax = plt.subplots(figsize=(4,4))
for i in range(len(type_idx)):
if np.divide(calyxtest_cl, calyxtest_ncl)[type_idx[i]] != 0:
plt.scatter(np.divide(calyxtest_cl, calyxtest_ncl)[type_idx[i]],
np.divide(ALtest_cl, ALtest_ncl)[type_idx[i]],
color=attavdict2[updatedxlabel[i]])
coef = np.polyfit(comb['calyx'], comb['AL'], 1)
poly1d_fn = np.poly1d(coef)
ax.set_xlim(0, 1.5)
ax.set_ylim(0, 1.5)
ax.set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4])
ax.set_xlabel(r'MB calyx, $\lambda_{X}$', fontsize=15)
ax.set_ylabel(r'AL, $\lambda_{X}$', fontsize=15)
plt.show()
fig, ax = plt.subplots(figsize=(4,4))
for i in range(len(type_idx)):
if np.divide(calyxtest_cl, calyxtest_ncl)[type_idx[i]] != 0:
plt.scatter(np.divide(calyxtest_cl, calyxtest_ncl)[type_idx[i]],
np.divide(LHtest_cl, LHtest_ncl)[type_idx[i]],
color=attavdict2[updatedxlabel[i]])
coef = np.polyfit(comb_pheromone['calyx'], comb_pheromone['LH'], 1)
poly1d_fn = np.poly1d(coef)
plt.plot(np.arange(0.2, 1., 0.1), poly1d_fn(np.arange(0.2, 1., 0.1)), attavdict2['DL3'], ls='--')
coef = np.polyfit(comb_decay['calyx'], comb_decay['LH'], 1)
poly1d_fn = np.poly1d(coef)
plt.plot(np.arange(0.1, 1., 0.1), poly1d_fn(np.arange(0.1, 1., 0.1)), attavdict2['VM4'], ls='--')
coef = np.polyfit(comb['calyx'], comb['LH'], 1)
poly1d_fn = np.poly1d(coef)
plt.text(0.9, 0.05, '$r=-0.997$\n$p<0.001$', color=attavdict2['DL3'], fontsize=11)
plt.text(0.6, 0.85, '$r=0.969$\n$p<0.01$', color=attavdict2['VM4'], fontsize=11)
ax.set_ylim(0, 1.5)
ax.set_xlim(0, 1.5)
ax.set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4])
ax.set_xlabel(r'MB calyx, $\lambda_{X}$', fontsize=15)
ax.set_ylabel(r'LH, $\lambda_{X}$', fontsize=15)
plt.show()
#%% Clustering
L_AL_new_ind = scipy.cluster.hierarchy.linkage(scipy.spatial.distance.squareform(morph_dist_AL_r_new), method='complete', optimal_ordering=True)
L_calyx_new_ind = scipy.cluster.hierarchy.linkage(scipy.spatial.distance.squareform(morph_dist_calyx_r_new), method='complete', optimal_ordering=True)
L_LH_new_ind = scipy.cluster.hierarchy.linkage(scipy.spatial.distance.squareform(morph_dist_LH_r_new), method='complete', optimal_ordering=True)
glo_idx_flat_unsrt = [item for sublist in glo_idx for item in sublist]
fig, ax = plt.subplots(figsize=(20, 3))
R_AL_new = scipy.cluster.hierarchy.dendrogram(L_AL_new_ind,
orientation='top',
labels=glo_list_neuron,
distance_sort='ascending',
show_leaf_counts=False,
leaf_font_size=10,
color_threshold=9.5)
ax.set_yticks([])
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
plt.show()
fig, ax = plt.subplots(figsize=(20, 3))
R_calyx_new = scipy.cluster.hierarchy.dendrogram(L_calyx_new_ind,
orientation='top',
labels=glo_list_neuron,
distance_sort='ascending',
show_leaf_counts=False,
leaf_font_size=10,
color_threshold=16)
ax.set_yticks([])
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
plt.show()
fig, ax = plt.subplots(figsize=(20, 3))
R_LH_new = scipy.cluster.hierarchy.dendrogram(L_LH_new_ind,
orientation='top',
labels=glo_list_neuron,
distance_sort='ascending',
show_leaf_counts=False,
leaf_font_size=10,
color_threshold=25)
ax.set_yticks([])
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
plt.show()
AL_t = []
LH_t = []
calyx_t = []
for k in np.arange(2, 70):
ind_AL = scipy.cluster.hierarchy.fcluster(L_AL_new_ind, k, 'maxclust')
AL_t.append(sklearn.metrics.silhouette_score(morph_dist_AL_r_new, ind_AL, metric="precomputed"))
ind_calyx = scipy.cluster.hierarchy.fcluster(L_calyx_new_ind, k, 'maxclust')
calyx_t.append(sklearn.metrics.silhouette_score(morph_dist_calyx_r_new, ind_calyx, metric="precomputed"))
ind_LH = scipy.cluster.hierarchy.fcluster(L_LH_new_ind, k, 'maxclust')
LH_t.append(sklearn.metrics.silhouette_score(morph_dist_LH_r_new, ind_LH, metric="precomputed"))
fig, ax = plt.subplots(figsize=(4, 3))
plt.plot(np.arange(2, 70), AL_t, color='tab:blue')
plt.plot(np.arange(2, 70), calyx_t, color='tab:orange')
plt.plot(np.arange(2, 70), LH_t, color='tab:green')
plt.legend(['AL', 'MB calyx', 'LH'], loc='center right', fontsize=10)
plt.ylabel('Average Silhouette Coefficients', fontsize=12)
plt.xlabel('Number of Clusters', fontsize=12)
plt.show()
print(np.argmax(AL_t))
print(np.argmax(calyx_t))
print(np.argmax(LH_t))
morph_dist_calyx_r_new_df = | pd.DataFrame(morph_dist_calyx_r_new) | pandas.DataFrame |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft & 1
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & Index(right)
tm.assert_series_equal(result, expected)
result = left & Series(right)
tm.assert_series_equal(result, expected)
expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
tm.assert_series_equal(result, expected)
result = left | Index(right)
tm.assert_series_equal(result, expected)
result = left | Series(right)
tm.assert_series_equal(result, expected)
expected = Series([False, True, True, True, True])
result = left ^ right
tm.assert_series_equal(result, expected)
result = left ^ np.array(right)
tm.assert_series_equal(result, expected)
result = left ^ | Index(right) | pandas.Index |
import sys
import os
import torch
import numpy as np
import torch_geometric.datasets
import pyximport
from torch_geometric.data import InMemoryDataset, download_url
import pandas as pd
from sklearn import preprocessing
pyximport.install(setup_args={'include_dirs': np.get_include()})
import os.path as osp
from torch_geometric.data import Data
import time
from torch_geometric.utils import add_self_loops, negative_sampling
from torch_geometric.data import Dataset
from functools import lru_cache
import copy
from fairseq.data import (
NestedDictionaryDataset,
NumSamplesDataset,
)
import json
import pathlib
from pathlib import Path
BASE = Path(os.path.realpath(__file__)).parent
GLOBAL_ROOT = str(BASE / 'graphormer_repo' / 'graphormer')
sys.path.insert(1, (GLOBAL_ROOT))
from data.wrapper import preprocess_item
import datetime
def find_part(hour):
if hour < 11:
part = 1
elif (hour > 11) & (hour < 20):
part = 2
else:
part = 3
return part
def prepare_raw_dataset_edge(dataset_name):
if dataset_name == 'abakan':
raw_data = pd.read_csv('datasets/abakan/raw/abakan_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/abakan/raw/all_roads_graph.pickle').to_networkx().edges())
all_nodes = pd.read_pickle('datasets/abakan/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/abakan/raw/graph_abakan_init.csv')
elif dataset_name == 'omsk':
raw_data = pd.read_csv('datasets/omsk/raw/omsk_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/omsk/raw/all_roads_graph.pickle').to_networkx().edges())
# all_nodes = pd.read_pickle('datasets/omsk/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/omsk/raw/graph_omsk_init.csv')
all_roads_dataset = pd.DataFrame()
all_edge_list = [list((all_roads_graph)[i]) for i in range(0,len( (all_roads_graph)))]
all_roads_dataset['edge_id']= range(0,len(init['edge_id'].unique()))
all_roads_dataset['speed'] = ' 1'
all_roads_dataset['length'] = ' 1'
all_roads_dataset[' start_point_part'] = init['quartal_id'] / len(init['quartal_id'].unique())
all_roads_dataset['finish_point_part'] = init['quartal_id'] / len(init['quartal_id'].unique())
all_roads_dataset_edges = pd.DataFrame()
all_roads_dataset_edges['source'] = [x[0] for x in all_edge_list]
all_roads_dataset_edges['target'] = [x[1] for x in all_edge_list]
# all_roads_dataset_edges = all_roads_dataset_edges.drop_duplicates().reset_index(drop = True)
trip_part = all_roads_dataset[['edge_id', 'speed', 'length', ' start_point_part', 'finish_point_part']].copy()
source_merge = pd.merge(all_roads_dataset_edges, trip_part.rename(columns = {'edge_id':'source'}), on = ['source'], how = 'left')
target_merge = pd.merge(all_roads_dataset_edges, trip_part.rename(columns = {'edge_id':'target'}), on = ['target'], how = 'left')
total_table = pd.DataFrame()
total_table['speed'] = (source_merge['speed'].apply(lambda x: [x]) + target_merge['speed'].apply(lambda x: [x]))
total_table['length'] = (source_merge['length'].apply(lambda x: [x]) + target_merge['length'].apply(lambda x: [x]))
total_table['edges'] = (source_merge['source'].apply(lambda x: [x]) + target_merge['target'].apply(lambda x: [x]))
total_table[' start_point_part'] = source_merge[' start_point_part']
total_table['finish_point_part'] = target_merge['finish_point_part']
total_table['week_period'] = datetime.datetime.now().hour
total_table['hour'] = datetime.datetime.now().weekday()
total_table['day_period'] = total_table['hour'].apply(lambda x: find_part(x))
total_table['RTA'] = 1
total_table['clouds'] = 1
total_table['snow'] = 0
total_table['temperature'] = 10
total_table['wind_dir'] = 180
total_table['wind_speed'] = 3
total_table['pressure'] = 747
total_table['source'] = source_merge['source']
total_table['target'] = source_merge['target']
# total_table = total_table.drop_duplicates().reset_index(drop = True)
return total_table
def prepare_raw_dataset_node(dataset_name):
if dataset_name == 'abakan':
raw_data = pd.read_csv('datasets/abakan/raw/abakan_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/abakan/raw/all_roads_graph.pickle').to_networkx().edges())
all_nodes = pd.read_pickle('datasets/abakan/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/abakan/raw/graph_abakan_init.csv')
elif dataset_name == 'omsk':
raw_data = pd.read_csv('datasets/omsk/raw/omsk_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/omsk/raw/all_roads_graph.pickle').to_networkx().edges())
# all_nodes = pd.read_pickle('datasets/omsk/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/omsk/raw/graph_omsk_init.csv')
all_roads_dataset = pd.DataFrame()
all_edge_list = [list((all_roads_graph)[i]) for i in range(0,len( (all_roads_graph)))]
all_roads_dataset['edge_id']= range(0,len(init['edge_id'].unique()))
all_roads_dataset['speed'] = ' 1'
all_roads_dataset['length'] = ' 1'
all_roads_dataset[' start_point_part'] = init['quartal_id'] / len(init['quartal_id'].unique())
all_roads_dataset['finish_point_part'] = init['quartal_id'] / len(init['quartal_id'].unique())
all_roads_dataset['finish_point_part'] = all_roads_dataset['finish_point_part']
all_roads_dataset['week_period'] = datetime.datetime.now().hour
all_roads_dataset['hour'] = datetime.datetime.now().weekday()
all_roads_dataset['day_period'] = all_roads_dataset['hour'].apply(lambda x: find_part(x))
all_roads_dataset['RTA'] = 1
all_roads_dataset['clouds'] = 1
all_roads_dataset['snow'] = 0
all_roads_dataset['temperature'] = 10
all_roads_dataset['wind_dir'] = 180
all_roads_dataset['wind_speed'] = 3
all_roads_dataset['pressure'] = 747
# all_roads_dataset['source'] = source_merge['source']
# all_roads_dataset['target'] = source_merge['target']
# total_table = total_table.drop_duplicates().reset_index(drop = True)
return all_roads_dataset
class single_geo_Omsk(InMemoryDataset):
def __init__(self, predict_data, transform=None, pre_transform=None, split = 'train'):
self.data = predict_data
def process(self):
# Read data
# print('start single')
start_time = time.time()
data = self.data
# shape = int(data.shape[0]÷)
shape = int(10)
data = data[0:1].copy()
data = data.drop(columns = ['Unnamed: 0'])
data['hour'] = data['start_timestamp'].apply(lambda x: int(x[-10:-8]))
# Graph
graph_columns_gran = ['edges', 'time', 'speed', 'length']
edges = ['edges']
target = ['time']
node_features_gran = ['speed', 'length']
edge_features_agg = [' start_point_part', 'finish_point_part', 'day_period', 'week_period', 'clouds', 'snow', 'temperature', 'wind_dir', 'wind_speed', 'pressure','hour']
all_speed = []
all_length = []
for i in range(0,1):
# print(i)
data_row = data[i:i+1].reset_index(drop = True).copy()
speed_list = [int(x) for x in (data_row['speed'].values[0].replace("'",'').split(','))]
list_length = [int(x) for x in (data_row['length'].values[0].replace("'",'').split(','))]
all_speed.append(speed_list)
all_length.append(list_length)
all_speed = [item for sublist in all_speed for item in sublist]
all_length = [item for sublist in all_length for item in sublist]
data_split_dict = dict()
data_split_dict['all'] = np.arange(0, int(data.shape[0]))
data_list = []
for i in data_split_dict['all']:
data_row = data.iloc[[i],].reset_index(drop = True).copy()
speed_list = [int(x) for x in (data_row['speed'].values[0].replace("'",'').split(','))]
list_length = [int(x) for x in (data_row['length'].values[0].replace("'",'').split(','))]
data_row_gran = pd.DataFrame()
data_row_gran['source'] = data_row['source']
data_row_gran['target'] = data_row['target']
data_row_gran['speed'] = speed_list
data_row_gran['length'] = list_length
target_val = data_row['RTA'].values[0]
data_row_gran['speed'] = data_row_gran['speed']/np.mean(speed_list)
data_row_gran['length'] = data_row_gran['length']/np.mean(list_length)
for col in edge_features_agg:
data_row_gran[col] = data_row[col].values[0]
total_nodes_list = list(set(list(data_row_gran.source.values)))
le = preprocessing.LabelEncoder()
le.fit(total_nodes_list)
data_row_gran['source'] = le.transform(data_row_gran.source.values)
data_row_gran['target'] = le.transform(data_row_gran.target.values)
total_nodes_list = list(set(list(data_row_gran.source.values)))
edge_index = torch.tensor(torch.from_numpy(data_row_gran[['source','target']].values.T),dtype = torch.long)
# Define tensor of nodes features
x = torch.tensor(torch.from_numpy(data_row_gran[['speed','length'] + edge_features_agg].values),dtype = torch.long)
# Define tensor of edge features
edge_num_feach = 1
edge_attr = torch.from_numpy(np.ones(shape = ((edge_index.size()[1]), edge_num_feach)))
edge_attr = torch.tensor(edge_attr,dtype = torch.long)
# Define tensor of targets
y = torch.tensor(target_val,dtype = torch.long)
data_graph = Data(x=x, edge_index = edge_index, edge_attr = edge_attr, y=y)
data_list.append(data_graph)
# print('end single')
return data_list
class single_geo_Abakan_raw():
def __init__(self, predict_data, transform=None, pre_transform=None, split = 'train'):
self.data = predict_data
def process(self):
# Read data
# print('start single')
start_time = time.time()
data = self.data
# shape = int(data.shape[0]÷)
shape = int(10)
data = data[0:1].copy()
data = data.drop(columns = ['Unnamed: 0'])
data['hour'] = data['start_timestamp'].apply(lambda x: int(x[-10:-8]))
# Graph
graph_columns_gran = ['edges', 'time', 'speed', 'length']
edges = ['edges']
target = ['time']
node_features_gran = ['speed', 'length']
edge_features_agg = [' start_point_part', 'finish_point_part', 'day_period', 'week_period', 'clouds', 'snow', 'temperature', 'wind_dir', 'wind_speed', 'pressure','hour']
all_speed = []
all_length = []
for i in range(0,1):
# print(i)
data_row = data[i:i+1].reset_index(drop = True).copy()
speed_list = [int(x) for x in (data_row['speed'].values[0].replace("'",'').split(','))]
list_length = [int(x) for x in (data_row['length'].values[0].replace("'",'').split(','))]
all_speed.append(speed_list)
all_length.append(list_length)
all_speed = [item for sublist in all_speed for item in sublist]
all_length = [item for sublist in all_length for item in sublist]
data_split_dict = dict()
data_split_dict['all'] = np.arange(0, int(data.shape[0]))
data_list = []
for i in data_split_dict['all']:
data_row = data.iloc[[i],].reset_index(drop = True).copy()
edge_list = [int(x) for x in (data_row['edges'].values[0].replace("'",'').split(','))]
speed_list = [int(x) for x in (data_row['speed'].values[0].replace("'",'').split(','))]
list_length = [int(x) for x in (data_row['length'].values[0].replace("'",'').split(','))]
source = edge_list.copy()
target = edge_list[1:].copy() + [edge_list[0]].copy()
data_row_gran = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from numpy import float64, ceil
from statsmodels.compat.pandas import assert_series_equal, assert_frame_equal
from models.Trading import TechnicalAnalysis
def test_should_calculate_addChangePct():
"""
Adds the close percentage to the DataFrame : close_pc
Adds the cumulative returns the DataFrame : close_cpc
Excellent video to understand cumulative returns : https://www.youtube.com/watch?v=fWHQwqT3lNY
"""
# GIVEN a series of values
closes_list = [0.0003, 0.0004, 0.0010, 0.0020, 0.0009]
df = pd.DataFrame({'date': ['2021-10-10 14:30:00',
'2021-10-10 14:31:00',
'2021-10-10 14:32:00',
'2021-10-10 14:33:00',
'2021-10-10 14:34:00'],
'close': closes_list})
df['date'] = pd.to_datetime(df['date'], format="%Y-%d-%m %H:%M:%S")
df.set_index(['date'])
ta = TechnicalAnalysis(df)
# WHEN calculate the percentage evolution and cumulative returns percentage
ta.addChangePct()
# THEN percentage evolution and cumulative returns percentage should be added to dataframe
actual = ta.getDataFrame()
close_pc = [
calculate_percentage_evol(closes_list[0], closes_list[0]),
calculate_percentage_evol(closes_list[0], closes_list[1]),
calculate_percentage_evol(closes_list[1], closes_list[2]),
calculate_percentage_evol(closes_list[2], closes_list[3]),
calculate_percentage_evol(closes_list[3], closes_list[4]),
]
close_cpc = []
close_cpc.append(0.000000)
close_cpc.append((1 + close_pc[1]) * (1 + close_cpc[0]) - 1)
close_cpc.append((1 + close_pc[2]) * (1 + close_cpc[1]) - 1)
close_cpc.append((1 + close_pc[3]) * (1 + close_cpc[2]) - 1)
close_cpc.append((1 + close_pc[4]) * (1 + close_cpc[3]) - 1)
expected = pd.DataFrame({
'date': ['2021-10-10 14:30:00',
'2021-10-10 14:31:00',
'2021-10-10 14:32:00',
'2021-10-10 14:33:00',
'2021-10-10 14:34:00'],
'close': closes_list,
'close_pc': close_pc,
'close_cpc': close_cpc
})
expected['date'] = | pd.to_datetime(df['date'], format="%Y-%d-%m %H:%M:%S") | pandas.to_datetime |
import datetime
import glob
import json
import multiprocessing
import os
import pickle
import sys, re
import warnings
from collections import Counter, defaultdict
from itertools import cycle
from string import digits
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from gensim.models import KeyedVectors
from joblib import Parallel, delayed
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
from numpy.core.multiarray import interp
from dateutil import parser
from scipy.sparse import lil_matrix, csr_matrix
from sklearn import metrics, preprocessing
from sklearn.decomposition import TruncatedSVD, PCA
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.metrics import precision_recall_fscore_support, roc_curve, auc
from sklearn.model_selection import cross_val_score, StratifiedShuffleSplit, GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Imputer, normalize, LabelBinarizer
from sklearn.preprocessing import Normalizer
from sklearn.svm import LinearSVC, SVC
from yellowbrick.classifier import ROCAUC
from yellowbrick.classifier import ClassificationReport
from imblearn.over_sampling import RandomOverSampler
from scipy.sparse import coo_matrix, vstack
import seaborn as sns
import random
import scikitplot as skplt
sys.path.insert(0, os.path.dirname(__file__) + '../2_helpers')
from decoder import decoder
#warnings.filterwarnings("ignore", category=DeprecationWarning)
sns.set(style="ticks")
NEW_CORPUS = False
BUILD_NEW_SPARSE = False
DIR = os.path.dirname(__file__) + '../../3_Data/'
WNL = WordNetLemmatizer()
NLTK_STOPWORDS = set(stopwords.words('english'))
num_cores = multiprocessing.cpu_count()
num_jobs = round(num_cores * 3 / 4)
# global Vars
user_order, users = [],[]
word_to_idx = {}
fact_to_words = {}
bow_corpus_cnt = {}
#if BUILD_NEW_SPARSE:
#word_vectors = KeyedVectors.load_word2vec_format('model_data/GoogleNews-vectors-negative300.bin', binary=True)
word_vectors = 0#KeyedVectors.load_word2vec_format('model_data/word2vec_twitter_model/word2vec_twitter_model.bin', binary=True, unicode_errors='ignore')
def datetime_converter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
def tokenize_text(text, only_retweets=False):
tokenizer = RegexpTokenizer(r'\w+')
if only_retweets:
text = text.lower()
if 'rt' not in text: return []
text = text[text.find('rt'):]
text = text[text.find('@'):text.find(':')]
return [WNL.lemmatize(i.lower()) for i in tokenizer.tokenize(text) if
i.lower() not in NLTK_STOPWORDS]
return [WNL.lemmatize(i.lower()) for i in tokenizer.tokenize(text) if
i.lower() not in NLTK_STOPWORDS]
def get_data():
fact_file = glob.glob(DIR + 'facts.json')[0]
transactions_file = glob.glob(DIR + 'factTransaction.json')[0]
facts = json.load(open(fact_file), object_hook=decoder)
transactions = json.load(open(transactions_file), object_hook=decoder)
return facts, transactions
def get_users():
user_files = glob.glob(DIR + 'user_tweets/' + 'user_*.json')
print('{} users'.format(len(user_files)))
if len(user_files) < 10: print('WRONG DIR?')
users = []
for user_file in user_files:
user = json.loads(open(user_file).readline(), object_hook=decoder)
users.append(user)
return users
def get_corpus():
corpus_file = glob.glob('model_data/bow_corpus.json')[0]
bow_corpus = json.loads(open(corpus_file).readline())
return bow_corpus
def save_corpus(bow_corpus):
with open('model_data/bow_corpus.json', 'w') as out_file:
out_file.write(json.dumps(bow_corpus, default=datetime_converter) + '\n')
def build_bow_corpus(users):
print("Building a new Bow corpus")
bow_corpus_cnt = {}
for user in users:
if user.tweets is None:
print(user.user_id)
continue
for tweet in user.tweets:
# Tweets <text, created_at, *quoted_status
tokens = tokenize_text(tweet['text'])
for token in tokens:
if token in bow_corpus_cnt:
bow_corpus_cnt[token] += 1
else:
bow_corpus_cnt[token] = 1
return bow_corpus_cnt
def build_user_vector(user, i):
global fact_to_words
if i % 50 == 0: print(i)
user_data = {}
if not user.tweets: print("PROBLEM DUE TO: {}".format(user.user_id)); return
user_fact_words = fact_to_words[user.fact]
if len(user_fact_words) == 0:
print("%%%%%%%%")
print(user.user_id)
print(user.fact)
print(user_fact_words)
return
user_fact_words = fact_to_words[user.fact]
# If X doesnt need to be rebuild, comment out
for tweet in user.tweets:
tokens = tokenize_text(tweet['text'], only_retweets=False)
for token in tokens:
if token not in word_to_idx: continue
if token not in word_vectors.vocab: continue
if len(user_fact_words) == 0: user_fact_words = [token]
increment = 1 - np.average(word_vectors.distances(token, other_words=user_fact_words))
if increment > 1: increment = 1
if increment < 0: increment = 0
if token in user_data:
user_data[word_to_idx[token]] += increment
else:
user_data[word_to_idx[token]] = increment
this_position = []
this_data = []
for tuple in user_data.items():
this_position.append(tuple[0])
this_data.append(tuple[1])
package = {
'index': i,
'positions': this_position,
'data': this_data,
'user_id': user.user_id,
'y': int(user.was_correct)
}
return package
def build_user_vector_retweets_topic_independent(user, i):
global fact_to_words
if i % 50 == 0: print(i)
user_data = {}
if not user.tweets: print("PROBLEM DUE TO: {}".format(user.user_id)); return
# If X doesnt need to be rebuild, comment out
for tweet in user.tweets:
tokens = tokenize_text(tweet['text'], only_retweets=True)
for token in tokens:
if token not in word_to_idx: continue
if token in user_data:
user_data[word_to_idx[token]] += 1
else:
user_data[word_to_idx[token]] = 1
this_position = []
this_data = []
for tuple in user_data.items():
this_position.append(tuple[0])
this_data.append(tuple[1])
package = {
'index': i,
'positions': this_position,
'data': this_data,
'user_id': user.user_id,
'y': int(user.was_correct)
}
return package
def build_sparse_matrix_word2vec(users, retweets_only=False):
def rebuild_sparse(users):
global fact_to_words
print("Building sparse vectors")
_, transactions = get_data()
fact_topics = build_fact_topics()
fact_to_words = {r['hash']: [w for w in r['fact_terms'] if w in word_vectors.vocab] for index, r in fact_topics[['hash', 'fact_terms']].iterrows()}
users = sorted(users, key=lambda x: x.fact_text_ts)
for user in users:
if not user.tweets: users.pop(users.index(user))
for t in transactions:
if user.user_id == t.user_id:
user.fact = t.fact
transactions.pop(transactions.index(t))
break
if user.fact is None: print(user.user_id)
if retweets_only:
classification_data = Parallel(n_jobs=num_jobs)(
delayed(build_user_vector)(user, i) for i, user in enumerate(users))
return sorted([x for x in classification_data if x != None], key=lambda x: x['index'])
classification_data = Parallel(n_jobs=num_jobs)(
delayed(build_user_vector)(user, i) for i, user in enumerate(users))
classification_data = [x for x in classification_data if x != None]
classification_data = sorted(classification_data, key=lambda x: x['index'])
with open('model_data/classification_data_w2v', 'wb') as tmpfile:
pickle.dump(classification_data, tmpfile)
return classification_data
positions = []
data = []
y = []
user_order = []
classification_data = []
if not BUILD_NEW_SPARSE and not retweets_only:
with open('model_data/classification_data_w2v', 'rb') as f:
classification_data = pickle.load(f)
else:
classification_data = rebuild_sparse(users)
for item in classification_data:
positions.append(item['positions'])
data.append(item['data'])
user_order.append(item['user_id'])
y.append(item['y'])
# Only considering supports and denials [0,1], not comments etc. [-1]
mask = [el != -1 for el in y]
positions = np.asarray(positions)[mask]
data = np.asarray(data)[mask]
y = np.asarray(y)[mask]
user_order = np.asarray(user_order)[mask]
X = lil_matrix((len(data), len(word_to_idx)))
X.rows = positions
X.data = data
X.tocsr()
print(X.shape, y.shape, user_order.shape)
return X, y, user_order
def build_fact_topics():
print("Build fact topics")
fact_file = glob.glob(DIR + 'facts_annotated.json')[0]
facts_df = | pd.read_json(fact_file) | pandas.read_json |
import sys, os
sys.path.append('./src/common/image_processor/feature_extractor')
import cv2
import numpy as np
import pandas as pd
from operator import itemgetter
from collections import defaultdict
from feature_extractor_utils import (show_image,
smooth_contour,
compute_erosioned_image)
def judge_max_min(max_val, min_val, candidate_val):
if candidate_val is not None:
max_val = candidate_val if candidate_val>max_val else max_val
min_val = candidate_val if candidate_val<min_val else min_val
return min_val, max_val
def judge_min(min_val, candidate_val):
if candidate_val is not None:
min_val = candidate_val if candidate_val<min_val else min_val
return min_val
def vertex(major_axis):
if major_axis<100:
return 5
else:
return 10
def calculate_ellipse_axis(contour):
ellipse = cv2.fitEllipse(contour)
major_axis = max(ellipse[1][0], ellipse[1][1])
minor_axis = min(ellipse[1][0], ellipse[1][1])
return minor_axis, major_axis
def compute_max_min_diamiter(scanning_axis, measure_axis, scan_gap=10):
left_top = (scanning_axis.min(), measure_axis.min())
right_bottom = (scanning_axis.max(), measure_axis.max())
mergin = (right_bottom[0] - left_top[0])//10
pre_point = None
max_val, min_val = 0, float('inf')
for x in range(left_top[0]+mergin, right_bottom[0]-mergin+1):
idx = np.where((scanning_axis>=x-scan_gap)&(scanning_axis<=x+scan_gap))[0]
if len(idx)%2!=0: continue
for i_idx in range(0, len(idx),2):
start, end = idx[i_idx], idx[i_idx+1]
val = np.abs(measure_axis[start]-measure_axis[end])
if val<10: continue
min_val, max_val = judge_max_min(max_val, min_val, val)
return min_val, max_val,
def calculate_one_cnt_diameter(min_v, max_v, contour, major_axis_sub, \
low_threshold, high_threshold, src_image=None):
cnt_size = contour.size
if cnt_size < 10: # specification of openCV
return min_v, max_v
try:
contour = smooth_contour(contour)
# fitting
minor_axis, major_axis = calculate_ellipse_axis(contour)
except:
# fitting
minor_axis, major_axis = calculate_ellipse_axis(contour)
return int(minor_axis), int(major_axis)
# excluding axis too small or too big
if major_axis<low_threshold-major_axis_sub or \
major_axis>high_threshold:
return min_v, max_v
contour = contour.reshape((contour.shape[0], 2))
if src_image is not None:
cv2.drawContours(src_image,[contour],-1,(0,255,0),3)
show_image(src_image)
x_len = max(contour[:,0]) - min(contour[:,0])
y_len = max(contour[:,1]) - min(contour[:,1])
if y_len >= x_len:
contour_y = np.array(sorted(contour, key=itemgetter(1,0)))
# t_min_v, t_max_v = compute_max_min_diamiter(contour_y, scan_axis='x')
t_min_v, t_max_v = compute_max_min_diamiter(contour_y[:,1], contour_y[:,0])
else:
contour_x = np.array(sorted(contour, key=itemgetter(0,1)))
# t_min_v, t_max_v = compute_max_min_diamiter(contour_x, scan_axis='y')
t_min_v, t_max_v = compute_max_min_diamiter(contour_x[:,0], contour_x[:,1])
min_v, max_v = judge_max_min(max_v, min_v, t_min_v)
min_v, max_v = judge_max_min(max_v, min_v, t_max_v)
return min_v, max_v
def compute_diameter(img, src_image=None, threshold_schale=100,
low_threshold=95, high_threshold=300):
h, w = img.shape
# find corner
contours = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]
# filtered with area over (all area / 100 )
area_th = h*w/threshold_schale
contours_large = list(filter(lambda c:cv2.contourArea(c) > area_th, contours))
result = src_image.copy() if src_image is not None else None
min_v, max_v = float('inf'), 0
major_axis_sub = 0
while max_v==0 and major_axis_sub<low_threshold-1:
for contour in contours_large:
min_v, max_v = calculate_one_cnt_diameter(min_v, max_v, contour, \
major_axis_sub, low_threshold, \
high_threshold, src_image=src_image)
major_axis_sub += 10
max_diamiter = max(max_v, min_v)
if max_diamiter==float('inf'):
max_diamiter = 0
min_diamiter = min(max_v, min_v)
return min_diamiter, max_diamiter,
def compute_nucleus_diameter(src_image, threshold_schale=100, debug=False):
image = compute_erosioned_image(src_image, threshold_schale, debug)
if debug:
return compute_diameter(image, src_image, threshold_schale=threshold_schale)
else:
return compute_diameter(image, threshold_schale=threshold_schale)
if __name__=='__main__':
import subprocess
image_dir = './data/6k_rivised'
# image_dir = './data/20170117_revised_13000_Images'
# image_dir = './data/sys_val400'
# image_dir = './data/sys_val28'
cmd = 'find {} -name "*.jpg"'.format(image_dir)
process = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,)
b_out, _ = process.communicate()
out = b_out.decode('utf-8').rstrip().split('\n')
dict_results = defaultdict(list)
for image_path in out:
# print(image_path)
label = image_path.split('/')[-1].split('_')[0]
image = cv2.imread(image_path)
min_diam, max_diam = \
compute_nucleus_diameter(image, threshold_schale=100, debug=False)
dict_results['label'].append(label)
dict_results['min_diam'].append(min_diam)
dict_results['max_diam'].append(max_diam)
# print(label, min_diam, max_diam)
data = | pd.DataFrame(dict_results) | pandas.DataFrame |
import pandas as pd
import numpy as np
import statsmodels.api as sm
from numpy import NaN
import seaborn as sns
#from sklearn.linear_model import LinearRegression
df = pd.read_csv("C:/Users/LIUM3478/OneDrive Corp/OneDrive - Atkins Ltd/Work_Atkins/Docker/hjulanalys/wheel_prediction_data.csv", encoding = 'ISO 8859-1', sep = ";", decimal=",")
df.head()
# let's use the lmplot function within seaborn
grid = sns.lmplot(x = "LeftWheelDiameter",
y = "km_till_OMS",
row = "Littera", col = "VehicleOperatorName",
data = df, height = 5)
def model(df):
y = df[['km_till_OMS']].values
X = df[["LeftWheelDiameter"]].values
# With Statsmodels, we need to add our intercept term, B0, manually
X = sm.add_constant(X)
ols_model = sm.OLS(y, X, missing='drop')
#fitted_ols_model = ols_model.fit()
#print(fitted_ols_model.summary())
return pd.Series(ols_model.fit().predict(X))
def group_predictions(df):
predict_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import ray.tune
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.cloud_io import load as pl_load
import json
import pytorch_lightning as pl
import pandas as pd
import sklearn
from ray import tune
import numpy as np
import seaborn
import matplotlib.pyplot as plt
import argparse
import os
from ray.tune import CLIReporter
from ray.tune.schedulers import ASHAScheduler, PopulationBasedTraining
from ray.tune.integration.pytorch_lightning import TuneReportCallback
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.optim import SGD, Adam
from torchvision import transforms
import MLmodels as m
from ray.tune.integration.pytorch_lightning import TuneReportCheckpointCallback
from ray.tune.suggest.bayesopt import BayesOptSearch
class ATTENTION_VAE(pl.LightningModule):
def __init__(self,
config,
pretrained=True,
resnet_model=152,
image_channels=1,
hidden_dims=[8, 16, 32, 64, 128],
out_image_channels=1,
output_size=4,
fcl_layers=[]):
super(ATTENTION_VAE, self).__init__()
#Lightning Additions
self.criterion = m.SmoothCrossEntropyLoss(smoothing=0.01, reduction='sum')
self.eps = 1.0
self.replicas = 4
self.__dict__.update(locals())
optimizers = {'adam': Adam, 'sgd': SGD}
self.optimizer = optimizers['adam']
# hyperparameters
self.lr = config['lr']
self.batch_size = config['batch_size']
# for importing different versions of the data
self.datatype = config['datatype']
self.dr = config['dr']
kld = 1./self.batch_size
self.train_criterion = m.SymmetricMSE(1.0, 0.3, kld)
if 'B' in self.datatype and '20' not in self.datatype:
self.data_length = 40
else:
self.data_length = 20
self.training_data = None
self.validation_data = None
self.image_channels = image_channels
self.hidden_dims = hidden_dims
self.z_dim = config['z_dim']
self.out_image_channels = out_image_channels
self.encoder = None
self.decoder = None
self.pretrained = pretrained
# self.resnet_model = resnet_model
# if self.resnet_model == 18:
# resnet = models.resnet18(pretrained=self.pretrained)
# elif self.resnet_model == 34:
# resnet = models.resnet34(pretrained=self.pretrained)
# elif self.resnet_model == 50:
# resnet = models.resnet50(pretrained=self.pretrained)
# elif self.resnet_model == 101:
# resnet = models.resnet101(pretrained=self.pretrained)
# elif self.resnet_model == 152:
# resnet = models.resnet152(pretrained=self.pretrained)
self.conv = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=(3, 3), padding=(2, 2)), # 64@96*96
nn.ReLU(inplace=True),
)
self.encoder_block1 = self.encoder_block(
64, 48, (4,4), 1, (1,1))
self.encoder_atten1 = m.Self_Attention(48)
self.encoder_block2 = self.encoder_block(
48, 32, (4,4), 1, (1,1))
self.encoder_atten2 = m.Self_Attention(32)
self.encoder_block3 = self.encoder_block(
32, 16, (5, 15), (1,2), (1,1))
self.encoder_atten3 = m.Self_Attention(16)
# Add extra output channel if we are using Matt's physical constraint
if self.out_image_channels > 1:
self.hidden_dims = [
self.out_image_channels * x for x in self.hidden_dims
]
self.fc1 = self.make_fcn(128, self.z_dim, [128, 112], self.dr)
self.fc2 = self.make_fcn(128, self.z_dim, [128, 112], self.dr)
self.fc3 = self.make_fcn(self.z_dim, 128, [128, 128], self.dr)
self.fcn = self.make_fcn(self.z_dim, output_size, [128, 64], self.dr)
self.decoder_block2 = self.decoder_block(
self.hidden_dims[4], self.hidden_dims[3], (4, 5), (1, 5), 0)
self.decoder_atten2 = m.Self_Attention(self.hidden_dims[3])
self.decoder_block3 = self.decoder_block(
self.hidden_dims[3], self.hidden_dims[2], (1, 2), (1, 2), 0)
self.decoder_atten3 = m.Self_Attention(self.hidden_dims[2])
self.decoder_block4 = self.decoder_block(
self.hidden_dims[2], self.hidden_dims[1], (1, 2), (1, 2), 0)
self.decoder_atten4 = m.Self_Attention(self.hidden_dims[1])
self.decoder_block5 = self.decoder_block(
self.hidden_dims[1], self.out_image_channels, (1, 1), (1, 1), 0)
self.decoder_atten5 = m.Self_Attention(self.hidden_dims[0])
# self.load_weights()
def encoder_block(self, dim1, dim2, kernel_size, stride, padding):
return nn.Sequential(
m.SpectralNorm(
nn.Conv2d(dim1, dim2, kernel_size=kernel_size,
stride=stride, padding=padding)
),
nn.BatchNorm2d(dim2),
nn.LeakyReLU()
)
def decoder_block(self, dim1, dim2, kernel_size, stride, padding, sigmoid=False):
return nn.Sequential(
m.SpectralNorm(
nn.ConvTranspose2d(
dim1, dim2, kernel_size=kernel_size, stride=stride, padding=padding)
),
nn.BatchNorm2d(dim2),
nn.LeakyReLU() if not sigmoid else nn.Sigmoid()
)
def make_fcn(self, input_size, output_size, fcl_layers, dr):
if len(fcl_layers) > 0:
fcn = [
nn.Dropout(dr),
nn.Linear(input_size, fcl_layers[0]),
nn.BatchNorm1d(fcl_layers[0]),
torch.nn.LeakyReLU()
]
if len(fcl_layers) == 1:
fcn.append(nn.Linear(fcl_layers[0], output_size))
else:
for i in range(len(fcl_layers) - 1):
fcn += [
nn.Linear(fcl_layers[i], fcl_layers[i + 1]),
nn.BatchNorm1d(fcl_layers[i + 1]),
torch.nn.LeakyReLU(),
nn.Dropout(dr)
]
fcn.append(nn.Linear(fcl_layers[i + 1], output_size))
else:
fcn = [
nn.Dropout(dr),
nn.Linear(input_size, output_size)
]
if output_size > 1:
fcn.append(torch.nn.LogSoftmax(dim=1))
return nn.Sequential(*fcn)
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
# return torch.normal(mu, std)
esp = torch.randn(*mu.size()).to(std.device)
z = mu + std * esp
return z
def bottleneck(self, h):
mu, logvar = self.fc1(h), self.fc2(h)
z = self.reparameterize(mu, logvar)
return z, mu, logvar
def encode(self, x):
h = self.conv(x)
h = self.encoder_block1(h)
h, att_map1 = self.encoder_atten1(h)
h = self.encoder_block2(h)
h, att_map2 = self.encoder_atten2(h)
h = self.encoder_block3(h)
h, att_map3 = self.encoder_atten3(h)
h = h.view(h.size(0), -1) # flatten
z, mu, logvar = self.bottleneck(h)
return h, z, mu, logvar, [att_map1, att_map2, att_map3]
def decode(self, z):
z = self.fc3(z)
z = z.view(z.size(0), self.hidden_dims[-1], 1, 1) # flatten/reshape
z = self.decoder_block2(z)
z, att_map2 = self.decoder_atten2(z)
z = self.decoder_block3(z)
z, att_map3 = self.decoder_atten3(z)
z = self.decoder_block4(z)
z, att_map4 = self.decoder_atten4(z)
z = self.decoder_block5(z)
return z, [att_map2, att_map3, att_map4]
def forward(self, x):
h, z, mu, logvar, encoder_att = self.encode(x)
out = self.fcn(z)
z, decoder_att = self.decode(z)
return out, z, mu, logvar
# def load_weights(self):
# # Load weights if supplied
# if os.path.isfile(self.weights):
# # Load the pretrained weights
# model_dict = torch.load(
# self.weights,
# map_location=lambda storage, loc: storage
# )
# self.load_state_dict(model_dict["model_state_dict"])
# return
#
# for p in self.parameters():
# if p.dim() > 1:
# nn.init.xavier_uniform_(p)
#
# return
#Lightning Methods
def configure_optimizers(self):
return self.optimizer(self.parameters(), lr=self.lr)
def prepare_data(self):
# import our data
train, validate, weights = m.get_rawdata(self.datatype, 10, 5, round=8)
_train = train.copy()
_validate = validate.copy()
# Assigns labels for learning
_train["binary"] = _train["affinity"].apply(m.bi_labelM)
#print(_train[_train["binary"] == 1].count())
#print(_train[_train["binary"] == 0].count())
_validate["binary"] = _validate["affinity"].apply(m.bi_labelM)
#print(_validate[_validate["binary"] == 1].count())
#print(_validate[_validate["binary"] == 0].count())
_weights = torch.FloatTensor(weights)
# instantiate loss criterion, need weights so put this here
self.criterion = m.SmoothCrossEntropyLoss(weight=_weights, smoothing=0.01, reduction='sum')
self.training_data = _train
self.validation_data = _validate
def train_dataloader(self):
# Data Loading
train_reader = m.NAReader(self.training_data, shuffle=True)
train_loader = torch.utils.data.DataLoader(
train_reader,
batch_size=self.batch_size,
# batch_size=self.batch_size,
collate_fn=m.my_collate,
num_workers=4,
# pin_memory=True,
shuffle=True
)
return train_loader
def training_step(self, batch, batch_idx):
seq, x, y = batch
# get output from the model, given the inputs
predictions, xp, mu, logvar = self(x)
xpp = torch.where(xp > 0.5, 1.0, 0.0)
recon_acc = (x == xpp).float().mean()
seq_acc = recon_acc.item()
loss = self.criterion(predictions, y)
vae_loss, bce, kld = self.train_criterion(x, xpp, mu, logvar)
_epoch = self.current_epoch+1 # lightning module member
_eps = self.eps / (1 + 0.06 * _epoch)
train_loss = (1 - _eps) * loss + _eps * vae_loss
# Convert to labels
preds = torch.argmax(predictions, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = list(y.detach().cpu().numpy())
train_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# perform logging
self.log("ptl/train_loss", train_loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log("ptl/train_accuracy", train_acc, on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log("ptl/train_seq_accuracy", seq_acc, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return train_loss
def val_dataloader(self):
# Data Loading
# train_reader = m.NAContrast(_train, n=n, shuffle=True)
val_reader = m.NAReader(self.validation_data, shuffle=False)
val_loader = torch.utils.data.DataLoader(
val_reader,
batch_size=self.batch_size,
collate_fn=m.my_collate,
num_workers=4,
# pin_memory=True,
shuffle=False
)
return val_loader
def validation_step(self, batch, batch_idx):
seq, x, y = batch
seq_aves = []
pred_aves = []
for _ in range(self.replicas):
predictions, xp, mu, logvar = self(x)
seq_aves.append(xp)
pred_aves.append(predictions)
predictions = torch.mean(torch.stack(pred_aves, dim=0), dim=0)
xp = torch.mean(torch.stack(seq_aves, dim=0), dim=0)
xpp = torch.where(xp > 0.5, 1.0, 0.0)
recon_acc = (x == xpp).float().mean()
seq_acc = recon_acc.item()
# get loss for the predicted output
val_loss = torch.nn.CrossEntropyLoss(reduction='sum')(predictions, y)
vae_loss, bce, kld = self.train_criterion(x, xp, mu, logvar)
# Convert to labels
preds = torch.argmax(predictions, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = list(y.detach().cpu().numpy())
val_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# perform logging
self.log("ptl/val_loss", val_loss, on_epoch=True, prog_bar=True, logger=True)
self.log("ptl/val_accuracy", val_acc, on_epoch=True, prog_bar=True, logger=True)
self.log("ptl/val_seq_accuracy", seq_acc, on_epoch=True, prog_bar=True, logger=True)
return {"val_loss": val_loss, "val_acc": val_acc}
### Train the model
def train_vae(config, checkpoint_dir=None, num_epochs=10, num_gpus=0):
trainer = pl.Trainer(
# default_root_dir="~/ray_results/",
max_epochs=num_epochs,
# If fractional GPUs passed in, convert to int.
gpus=num_gpus,
logger=TensorBoardLogger(
save_dir=tune.get_trial_dir(), name="", version="."),
progress_bar_refresh_rate=0,
callbacks=[
TuneReportCheckpointCallback(
metrics={
"loss": "ptl/val_loss",
"acc": "ptl/val_accuracy",
"train_seq_acc": "ptl/train_seq_accuracy",
"val_seq_acc": "ptl/val_seq_accuracy"
},
filename="checkpoint",
on="validation_end")
]
)
if checkpoint_dir:
# Workaround:
ckpt = pl_load(
os.path.join(checkpoint_dir, "checkpoint"),
map_location=lambda storage, loc: storage)
model = ATTENTION_VAE._load_model_state(
ckpt, config=config)
trainer.current_epoch = ckpt["epoch"]
else:
model = ATTENTION_VAE(config, True, 152, image_channels=1,
hidden_dims=[128, 128, 128, 128, 128], out_image_channels=1, output_size=2, fcl_layers=[])
trainer.fit(model)
def tune_asha(datatype, num_samples=10, num_epochs=10, gpus_per_trial=0, cpus_per_trial=1):
config = {
"lr": tune.loguniform(1e-5, 1e-3),
"batch_size": tune.choice([32, 64, 128]),
"dr": tune.loguniform(0.005, 0.5),
"z_dim": tune.choice([10, 100, 200]),
"datatype": datatype
}
scheduler = ASHAScheduler(
max_t=num_epochs,
grace_period=5,
reduction_factor=2)
reporter = CLIReporter(
parameter_columns=["lr", "batch_size"],
metric_columns=["loss", "acc", "training_iteration", "train_seq_accuracy", "val_seq_accuracy"])
analysis = tune.run(
tune.with_parameters(
train_vae,
num_epochs=num_epochs,
num_gpus=gpus_per_trial),
resources_per_trial={
"cpu": cpus_per_trial,
"gpu": gpus_per_trial
},
metric="acc",
mode="max",
config=config,
num_samples=num_samples,
local_dir="./ray_results/",
scheduler=scheduler,
progress_reporter=reporter,
name="tune_vae_asha")
print("Best hyperparameters found were: ", analysis.best_config)
# analysis.to_csv('~/ray_results/' + config['datatype'])
def tune_asha_search(datatype, num_samples=10, num_epochs=10, gpus_per_trial=0, cpus_per_trial=1):
config = {
"lr": tune.uniform(1e-5, 1e-3),
"batch_size": 64,
"dr": tune.uniform(0.005, 0.5),
"z_dim": 100,
"datatype": datatype
}
scheduler = ASHAScheduler(
max_t=num_epochs,
grace_period=5,
reduction_factor=2)
reporter = CLIReporter(
parameter_columns=["lr", "batch_size"],
metric_columns=["loss", "acc", "training_iteration", "train_seq_accuracy", "val_seq_accuracy"])
bayesopt = BayesOptSearch(metric="mean_acc", mode="max")
analysis = tune.run(
tune.with_parameters(
train_vae,
num_epochs=num_epochs,
num_gpus=gpus_per_trial),
resources_per_trial={
"cpu": cpus_per_trial,
"gpu": gpus_per_trial
},
metric="acc",
mode="max",
local_dir="./ray_results/",
config=config,
num_samples=num_samples,
search_alg=bayesopt,
scheduler=scheduler,
progress_reporter=reporter,
name="tune_vae_bayopt")
print("Best hyperparameters found were: ", analysis.best_config)
class CustomStopper(tune.Stopper):
def __init__(self):
self.should_stop = False
def __call__(self, trial_id, result):
max_iter = 100
if not self.should_stop and result["acc"] > 0.96:
self.should_stop = True
return self.should_stop or result["training_iteration"] >= max_iter
def stop_all(self):
return self.should_stop
def pbt_vae(datatype, num_samples=10, num_epochs=10, gpus_per_trial=0, cpus_per_trial=1):
config = {
"lr": tune.uniform(1e-5, 1e-3),
"batch_size": 64,
"dr": tune.uniform(0.005, 0.5),
"z_dim": 100,
"datatype": datatype
}
scheduler = PopulationBasedTraining(
time_attr="training_iteration",
perturbation_interval=5,
hyperparam_mutations={
# distribution for resampling
"lr": lambda: np.random.uniform(0.00001, 0.001),
"dr": lambda: np.random.uniform(0.005, 0.5),
# allow perturbations within this set of categorical values
# "momentum": [0.8, 0.9, 0.99],
})
reporter = CLIReporter(
parameter_columns=["lr", "dr"],
metric_columns=["loss", "acc", "training_iteration"])
stopper = CustomStopper()
analysis = tune.run(
tune.with_parameters(
train_vae,
num_epochs=num_epochs,
num_gpus=gpus_per_trial),
resources_per_trial={
"cpu": cpus_per_trial,
"gpu": gpus_per_trial
},
metric="acc",
mode="max",
local_dir="./ray_results/",
config=config,
num_samples=num_samples,
name="tune_vae_pbt",
scheduler=scheduler,
progress_reporter=reporter,
verbose=1,
stop=stopper,
# export_formats=[ExportFormat.MODEL],
checkpoint_score_attr="acc",
keep_checkpoints_num=4)
print("Best hyperparameters found were: ", analysis.best_config)
def exp_results_check(checkpoint_path, result_path, title):
# example
# checkpoint_file = './ray_results/tune_vae_asha/train_vae_a45d1_00000_0_batch_size=64,dr=0.029188,lr=0.0075796,z_dim=10_2021-07-13_12-50-57/checkpoints/epoch=28-step=15891.ckpt'
checkpoint_file = checkpoint_path
param_file = open(result_path, 'r')
check_epoch = int(checkpoint_file.split("epoch=", 1)[1].split('-', 1)[0])
resultjsons = param_file.read().split('\n')
results = json.loads(resultjsons[check_epoch+1])
params = results['config']
lr = params['lr']
dr = params['dr']
batch_size = params['batch_size']
datatype = params['datatype']
z_dim = params['z_dim']
con = {'lr': lr, 'dr': dr, 'batch_size': batch_size, 'z_dim': z_dim, 'datatype': datatype}
model = ATTENTION_VAE(con, True, 152, image_channels=1, hidden_dims=[128, 128, 128, 128, 128], out_image_channels=1, output_size=2, fcl_layers=[])
checkpoint = torch.load(checkpoint_file)
model.criterion.weight = torch.tensor([0., 0.]) # need to add as this is saved by the checkpoint file
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_set = m.test_set_corr
verdict = {'sequence':list(test_set.keys()), 'binary':list(test_set.values())}
_verification = pd.DataFrame(verdict)
ver_reader = m.NAReader(_verification, shuffle=False)
ver_loader = torch.utils.data.DataLoader(
ver_reader,
batch_size=len(test_set.keys()),
collate_fn=m.my_collate,
# num_workers=4,
# pin_memory=True,
shuffle=False
)
for i, batch in enumerate(ver_loader):
seqs, ohe, labels = batch
seq_aves = []
pred_aves = []
replicas = 25
for _ in range(replicas):
predictions, xp, mu, logvar = model(ohe)
seq_aves.append(xp)
pred_aves.append(predictions)
predictions = torch.mean(torch.stack(pred_aves, dim=0), dim=0)
xp = torch.mean(torch.stack(seq_aves, dim=0), dim=0)
xpp = torch.where(xp > 0.5, 1.0, 0.0)
recon_acc = (ohe == xpp).float().mean()
ver_seq_acc = recon_acc.item()
preds = torch.argmax(predictions, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = labels
# ver_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# Make confusion Matrix
y_true = ycpu.detach().cpu().numpy().astype('bool').tolist()
y_pred = np.asarray(predcpu, dtype=np.bool).tolist()
score = np.asarray([1 if x == y_pred[xid] else 0 for xid, x in enumerate(y_true)])
ver_acc = np.mean(score)
cm = sklearn.metrics.confusion_matrix(y_true, y_pred, normalize='true')
df_cm = pd.DataFrame(cm, index=[0, 1], columns=[0, 1])
plt.figure(figsize=(10, 7))
ax = plt.subplot()
seaborn.set(font_scale=3.0)
seaborn.heatmap(df_cm, annot=True, ax=ax)
label_font = {'size': '26'}
ax.tick_params(axis='both', which='major', labelsize=40)
ax.xaxis.set_ticklabels(["0", "1"])
ax.yaxis.set_ticklabels(["0", "1"])
# plt.title(title)
plt.savefig('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title)
o = open('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + 'results.txt', "w+")
print("Train Seq Acc", results['train_seq_acc'], file=o)
print("Validation Seq Acc", results['val_seq_acc'], file=o)
print("Validation Loss", results['loss'], file=o)
print("Validation Accuracy", results['acc'], file=o)
print("Verification Accuracy:", ver_acc, "of dataset size:", len(test_set.keys()), file=o)
print("Verification Sequence Accuracy:", ver_seq_acc, "of dataset size:", len(test_set.keys()), file=o)
o.close()
def val_results_check(checkpoint_path, result_path, title):
# example
# checkpoint_file = './ray_results/tune_vae_asha/train_vae_a45d1_00000_0_batch_size=64,dr=0.029188,lr=0.0075796,z_dim=10_2021-07-13_12-50-57/checkpoints/epoch=28-step=15891.ckpt'
checkpoint_file = checkpoint_path
param_file = open(result_path, 'r')
check_epoch = int(checkpoint_file.split("epoch=", 1)[1].split('-', 1)[0])
resultjsons = param_file.read().split('\n')
results = json.loads(resultjsons[check_epoch+1])
params = results['config']
lr = params['lr']
dr = params['dr']
batch_size = params['batch_size']
datatype = params['datatype']
z_dim = params['z_dim']
con = {'lr': lr, 'dr': dr, 'batch_size': batch_size, 'z_dim': z_dim, 'datatype': datatype}
model = ATTENTION_VAE(con, True, 152, image_channels=1, hidden_dims=[128, 128, 128, 128, 128], out_image_channels=1, output_size=2, fcl_layers=[])
checkpoint = torch.load(checkpoint_file)
model.criterion.weight = torch.tensor([0., 0.]) # need to add as this is saved by the checkpoint file
model.load_state_dict(checkpoint['state_dict'])
model.eval()
model.prepare_data()
vd = model.val_dataloader()
yt, yp = [], []
for i, batch in enumerate(vd):
seqs, ohe, labels = batch
seq_aves = []
pred_aves = []
replicas = 25
for _ in range(replicas):
predictions, xp, mu, logvar = model(ohe)
seq_aves.append(xp)
pred_aves.append(predictions)
predictions = torch.mean(torch.stack(pred_aves, dim=0), dim=0)
xp = torch.mean(torch.stack(seq_aves, dim=0), dim=0)
xpp = torch.where(xp > 0.5, 1.0, 0.0)
recon_acc = (ohe == xpp).float().mean()
ver_seq_acc = recon_acc.item()
preds = torch.argmax(predictions, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = labels
ver_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# Make confusion Matrix
y_true = ycpu.detach().cpu().numpy().astype('bool').tolist()
y_pred = np.asarray(predcpu, dtype=np.bool).tolist()
yt += y_true
yp += y_pred
cm = sklearn.metrics.confusion_matrix(yt, yp, normalize='true')
df_cm = | pd.DataFrame(cm, index=[0, 1], columns=[0, 1]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
from datetime import datetime
import plotly.graph_objects as go
import time
def timer():
return '['+datetime.now().strftime("%d/%m/%Y %H:%M:%S")+']'
#Cases where the client does not have access to the links Cargo
try:
from confidential.secrets import url_cargo, url_vessels
except:
url_cargo=0
url_vessels=0
print('\033[1;31;48m'+timer()+'[WARNING] Cargos update links are not accessible. Did you set up the secrets.py file correctly? More information in the READ.ME \033[0m')
path="data/external/cargos/"
config = {'displayModeBar': False}
# Functions
def conversion(old):
direction = {'N':1, 'S':-1, 'E': 1, 'W':-1}
new = old.replace(u'°',' ').replace('\'',' ').replace('"',' ')
new = new.split()
new_dir = new.pop()
new.extend([0,0,0])
return (int(new[0])+int(new[1])/60.0+int(new[2])/3600.0) * direction[new_dir]
####### First part : Update data
print(timer()+'[INFO] Check cargo files to update...')
ls=[path+"UpdateCargo",path+"UpdateVessels",path+"archives",path+"archives/HistoriqueMarchandises",path+"archives/HistoriqueNavires"]
for elem in ls:
if(os.path.isdir(elem)==0):
try:
os.mkdir(elem)
except OSError:
print('\033[1;31;48m'+timer()+'[ERROR] Creation of the directory failed \033[0m')
#Create files to avoid bugs in HistoriqueNavires
if len(os.listdir(path+"archives/HistoriqueNavires"))==0:
pd.DataFrame(data=[["03/01/2010 09:00","04/01/2010 04:00","HANJINAG - HANJIN","HANJINAG - HANJIN","ATP00014315","HNHL0049W","HNHL0049E","<NAME>","FOS","Validée","Validée"]], columns=["ARRIVEE","DEPART","Rep. Transp EM","Rep. Transp SM","id.AP+","Référence Arrivée","Référence Départ","<NAME>","<NAME>","Statut Arrivée","Statut Départ"]).to_csv(path+'archives/HistoriqueNavires/AMU_VESSEL_2010.txt', sep=' ', encoding = "ISO-8859-1")
## Update Cargo trafic
#Read files with "cargo" in title
folder="UpdateCargo/"
files=os.listdir(path+folder[:-1])
ls=[os.path.getmtime(path+folder+elem) for elem in files]
last_file=pd.to_datetime(files[ls.index(max(ls))][-10:-4], format='%d%m%y')
last_file_cargo=pd.to_datetime(files[ls.index(max(ls))][-10:-4], format='%d%m%y').strftime('%d/%m/%y')
today=pd.Timestamp.today()
#check if client can access to the links
if url_cargo != 0:
#check time of precedent file
if((last_file.year < today.year) | (last_file.week < (today.week-1))):
try:
cargo_update=pd.read_csv(url_cargo, encoding = "ISO-8859-1", sep=";")
new_file=pd.to_datetime(cargo_update["Date fin"][1], format="%d/%m/%Y").strftime(folder[:-1]+'S%W%d%m%y.csv')
except Exception as e:
print('\033[1;31;48m'+timer()+'[WARNING] Cargos except error:',e,'. Maybe the file given have not the good columns structure? \033[0m')
else:
#Save if not exist
if new_file not in os.listdir(path+folder):
cargo_update.to_csv(path+folder+new_file, sep=";", encoding = "ISO-8859-1")
## Update vessels trafic
folder="UpdateVessels/"
files=os.listdir(path+folder[:-1])
ls=[os.path.getmtime(path+folder+elem) for elem in files]
last_file=pd.to_datetime(files[ls.index(max(ls))][-10:-4], format='%d%m%y')
last_file_vessels=pd.to_datetime(files[ls.index(max(ls))][-10:-4], format='%d%m%y').strftime('%d/%m/%y')
today=pd.Timestamp.today()
if url_vessels != 0:
if((last_file.year < today.year) | (last_file.week < today.week)):
try:
cargo_update=pd.read_csv(url_vessels, encoding = "ISO-8859-1", sep=";")
new_file=pd.Timestamp.today().strftime(folder[:-1]+'S%W%d%m%y.csv')
except Exception as e:
print('\033[1;31;48m'+timer()+'[WARNING] Vessels except error:',e,'. Maybe the file given have not the good columns structure? \033[0m')
else:
#Save if not exist
if new_file not in os.listdir(path+folder):
#Remove previous file
os.remove(path+folder+files[ls.index(max(ls))])
#Save new file
cargo_update.to_csv(path+folder+new_file, sep=";", encoding = "ISO-8859-1")
#Correction if file doesn't exist to force the condition IF == TRUE
if os.path.isfile(path+'../../processed/CARGO_2010-2020.xlsx'):
datetime_cargos=datetime.fromtimestamp(os.path.getmtime(path+'../../processed/CARGO_2010-2020.xlsx'))
else:
datetime_cargos=datetime.fromtimestamp(1326244364)
## Update main file: Cargo
folder="UpdateCargo/"
## IF (Last Update file time > Main excel file time) => We update the main file
if datetime.fromtimestamp(max([os.path.getmtime(path+folder+elem) for elem in os.listdir(path+folder[:-1])])) > datetime_cargos:
#Read previous file
if os.path.isfile(path+'../../processed/CARGO_2010-2020.xlsx'):
cargo=pd.read_excel(path+'../../processed/CARGO_2010-2020.xlsx', encoding = "ISO-8859-1", index_col=0).reset_index(drop=True)
else:
cargo=pd.DataFrame()
#Read update files
files=os.listdir(path+folder)
ls=[i for i in os.listdir(path+folder)]
#concat update files
cargo_update=pd.DataFrame()
for elem in ls:
cargo_update_add=pd.read_csv(path+folder+elem, encoding = "ISO-8859-1", sep=";", index_col=0)
cargo_update_add["date"]= | pd.to_datetime(elem[-10:-4], format='%d%m%y') | pandas.to_datetime |
# =======================================
# PACKAGE IMPORTS
# =======================================
# python built-in packages
import bisect
import os
# 3rd party packages
import h5py
import numpy as np
import pandas as pd
# local
from custom_errors import (
MissingFilesException,
CorruptHDF5Exception,
MissingColumnException,
)
# =======================================
# DEFINE HELPER FUNCTIONS
# =======================================
# function that goes through an **already sorted** array-like
# data structure and finds the index of the element that
# is closest to a passed numeric value
def find_closest_index(sorted_arr, search_val):
# use the built-in library bisect's binary search function
# to find the index where the input value would be inserted
# in order to keep the array sorted
insert_index = bisect.bisect(sorted_arr, search_val)
# if the search value is greater than the highest value
# in the array, return the highest index of the array
if len(sorted_arr) == insert_index:
return insert_index - 1
# if the search value is less than the least value
# in the array, return index 0
if insert_index == 0:
return 0
# check which value is the closest to 'search_val' out of the value
# right before the 'insertion index slot', or right after
# it. if there is a tie, return the lower of the two indexes.
before_dist = abs(sorted_arr[insert_index-1] - search_val)
after_dist = abs(sorted_arr[insert_index] - search_val)
if after_dist < before_dist:
return insert_index
else:
return insert_index-1
def reformat_data(exp_data_dir, output_dir):
"""
Reformats PsychoPy data output, where each experiment run
produces one CSV file and one HDF5 file, named according to
the same logic, eg 'foobar_myexp_2021_Aug_09_1904.csv'
and 'foobar_myexp_2021_Aug_09_1904_hdf5.hdf5'.
:param exp_data_dir: Full path to raw experiment output directory.
:param output_dir: Full path to directory to output reformatted
data to.
"""
# =======================================
# FIND ALL DATA FILES
# =======================================
# list all files & directories in data input directory,
# excluding hidden files
input_fnames = os.listdir(exp_data_dir)
# find names of all CSV and HDF5 files in the
# data input directory
csv_fnames = [fname for fname in input_fnames if fname.endswith('csv')]
hdf5_fnames = [fname for fname in input_fnames if fname.endswith('hdf5')]
# if no CSV files, or no HDF5 files, were found
# (ie at least one of the lists is empty,
# producing False values below),
# then raise an error
if not (csv_fnames and hdf5_fnames):
raise MissingFilesException(
"The data directory doesn't contain both CSV and HDF5 files. "
"Please check to make sure that you've selected the correct "
"data directory."
)
# if there aren't as many CSV files as there are HDF5 files,
# raise an error (since this means that for some experiment run
# there are only 'core PsychoPy' data, or only 'eyetracker data')
if len(csv_fnames) != len(hdf5_fnames):
raise MissingFilesException(
"The selected data directory contains an unequal number of "
f"CSV files ({len(csv_fnames)}) and HDF5 files ({len(hdf5_fnames)}). "
"This means that for at least one experiment, there is only "
"'core PsychoPy' experiment output, or there is only eyetracker "
"recording output.\n\n"
"The most likely reason for this problem is that an experiment was "
"aborted/closed down before it could finish, meaning no HDF5 file was saved. "
"Please make sure that the data directory only includes complete "
"experiment data sets (ie where there is an HDF5 file and CSV file "
"corresponding to an experiment run), by moving incomplete data to "
"a separate folder.\n"
"When you're done, click the reformat button again."
)
# =======================================
# SORT DATA FILES BY NAMES
# =======================================
csv_fnames.sort()
hdf5_fnames.sort()
# =======================================
# DEFINE RELEVANT 'CORE PSYCHOPY' COLUMNS
# =======================================
# define list of 'core PsychoPy' experiment output data columns
# that are of interest for combining with/inserting into the
# eyetracker data frame (see below)
core_interesting_colnames = [
'att_grab_start_time_intended',
'gaze_to_audio_delay_intended',
'audio_to_visual_delay_intended',
'visual_duration_intended',
'end_blank_duration_intended',
'att_grab_start_time_actual',
'gaze_captured_time',
'audio_onset_time',
'visual_onset_time',
'visual_offset_time',
'trial_end_time',
'attention_sounds_played',
'visual_stimuli_duration_nframes',
'visual_social_prop',
'visual_geometric_prop',
'visual_manmade_prop',
'visual_natural_prop',
'visual_social_filepath',
'visual_social_pos_x',
'visual_social_pos_y',
'visual_geometric_filepath',
'visual_geometric_pos_x',
'visual_geometric_pos_y',
'visual_manmade_filepath',
'visual_manmade_pos_x',
'visual_manmade_pos_y',
'visual_natural_filepath',
'visual_natural_pos_x',
'visual_natural_pos_y',
'audio_filepath',
'audio_volume',
]
# =======================================
# START LOOPING THROUGH FILES
# =======================================
# loop through all of the CSV/HDF5 files and slightly correct
# and combine their data, then export results to one CSV
# file per set of experiment data
for csv_fname, et_hdf5_fname in zip(csv_fnames, hdf5_fnames):
et_hdf5_path = os.path.join(exp_data_dir, et_hdf5_fname)
csv_path = os.path.join(exp_data_dir, csv_fname)
# =======================================
# EXTRACT EYETRACKER DATA WITH H5PY
# =======================================
try:
h5f = h5py.File(et_hdf5_path)
except OSError as e:
raise CorruptHDF5Exception(
f"HDF5 file '{et_hdf5_path}' appears to be corrupt and cannot "
'be processed.'
f'(original exception: {e})'
)
h5f_events = h5f['data_collection']['events']
# traverse hierarchical data structure to get at eye tracking data.
# check if 'mock' (using the mouse to simulate eyetracker gaze recording)
# data are used (in which case 'monocular' events
# have been registered), or if actual data are used
# (in which case 'BinocularEyeSampleEvent' have been registered,
# usually)
if 'BinocularEyeSampleEvent' in h5f_events['eyetracker'].keys():
eye_dataset = h5f_events['eyetracker']['BinocularEyeSampleEvent']
elif 'MonocularEyeSampleEvent' in h5f_events['eyetracker'].keys():
eye_dataset = h5f_events['eyetracker']['MonocularEyeSampleEvent']
else:
raise CorruptHDF5Exception(
f'HDF5 file {et_hdf5_path} appears to be corrupt and cannot '
'be processed.'
)
# traverse hierarchical data structure to get at data describing messages
# 'sent from PsychoPy', eg 'trial start' messages
message_dataset = h5f_events['experiment']['MessageEvent']
# convert eyetracker/message data to numpy arrays
eye_data_arr = np.array(eye_dataset)
message_data_arr = np.array(message_dataset)
# convert the eyetracker data numpy array to a pandas data frame
et_df = pd.DataFrame(eye_data_arr)
# convert 'messages' data numpy array to pandas data frame
msg_df = pd.DataFrame(message_data_arr)
# convert the 'messages' data frame's messages from 'byte' dtype
# to string literal dtype
msg_df['text'] = msg_df.text.str.decode('utf-8')
# add a 'message' column to the eyetracker data frame
et_df['message'] = np.nan
# for each recorded message, find the row in the 'et_df' which matches
# most closely with regard to time of recording, and
# insert the message into 'et_df'
for _, row in msg_df.iterrows():
closest_i = find_closest_index(et_df.time, row.time)
et_df.loc[closest_i, 'message'] = row.text
# close the connection to the hdf5 file
h5f.close()
# =======================================
# LOAD 'CORE PSYCHOPY' EXPERIMENT DATA
# =======================================
# import psychopy experiment output data
# (stimuli onset times/file names/positions et c.)
psyp_df = | pd.read_csv(csv_path) | pandas.read_csv |
import pandas as pd
import numpy as np
import math
import os
from scipy.interpolate import interp1d
import time
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from information_measures import *
from joblib import Parallel, delayed
#from arch import arch_model
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_from_book_pd(book_stock_time):
wap = compute_wap(book_stock_time)
returns = log_return(wap)
volatility = realized_volatility(returns)
return volatility
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
def garch_fit_predict_volatility(returns_series, N=10000):
model = arch_model(returns_series * N, p=1, q=1)
model_fit = model.fit(update_freq=0, disp='off')
yhat = model_fit.forecast(horizon=600, reindex=False)
pred_volatility = np.sqrt(np.sum(yhat.variance.values)) / N
return pred_volatility
def garch_volatility_per_time_id(file_path, prediction_column_name):
# read the data
df_book_data = pd.read_parquet(file_path)
# calculate the midprice (not the WAP)
df_book_data['midprice'] =(df_book_data['bid_price1'] + df_book_data['ask_price1'])/2
# leave only WAP for now
df_book_data = df_book_data[['time_id', 'seconds_in_bucket', 'midprice']]
df_book_data = df_book_data.sort_values('seconds_in_bucket')
# make the book updates evenly spaced
df_book_data_evenly = pd.DataFrame({'time_id':np.repeat(df_book_data['time_id'].unique(), 600),
'second':np.tile(range(0,600), df_book_data['time_id'].nunique())})
df_book_data_evenly['second'] = df_book_data_evenly['second'].astype(np.int16)
df_book_data_evenly = df_book_data_evenly.sort_values('second')
df_book_data_evenly = pd.merge_asof(df_book_data_evenly,
df_book_data,
left_on='second',right_on='seconds_in_bucket',
by = 'time_id')
# Ordering for easier use
df_book_data_evenly = df_book_data_evenly[['time_id', 'second', 'midprice']]
df_book_data_evenly = df_book_data_evenly.sort_values(['time_id','second']).reset_index(drop=True)
# calculate log returns
df_book_data_evenly['log_return'] = df_book_data_evenly.groupby(['time_id'])['midprice'].apply(log_return)
df_book_data_evenly = df_book_data_evenly[~df_book_data_evenly['log_return'].isnull()]
# fit GARCH(1, 1) and predict the volatility of returns
df_garch_vol_per_stock = \
pd.DataFrame(df_book_data_evenly.groupby(['time_id'])['log_return'].agg(garch_fit_predict_volatility)).reset_index()
df_garch_vol_per_stock = df_garch_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
# add row_id column to the data
stock_id = file_path.split('=')[1]
df_garch_vol_per_stock['row_id'] = df_garch_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
# return the result
return df_garch_vol_per_stock[['row_id', prediction_column_name]]
def garch_volatility_per_stock(list_file, prediction_column_name):
df_garch_predicted = pd.DataFrame()
for file in list_file:
df_garch_predicted = pd.concat([df_garch_predicted,
garch_volatility_per_time_id(file, prediction_column_name)])
return df_garch_predicted
def entropy_from_book(book_stock_time,last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 3:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_wap(wap,seconds,last_seconds):
if last_seconds < 600:
idx = np.where(seconds >= last_seconds)[0]
if len(idx) < 3:
return 0
else:
wap = wap[idx]
seconds = seconds[idx]
# Closest neighbour interpolation (no changes in wap between lines)
t_new = np.arange(np.min(seconds),np.max(seconds))
nearest = interp1d(seconds, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
# sampleEntropy = ApEn_new(resampled_wap,3,0.001)
return sampleEntropy
def linearFit(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = np.array(compute_wap(book_stock_time))
t_init = book_stock_time['seconds_in_bucket']
return (wap[-1] - wap[0])/(np.max(t_init) - np.min(t_init))
def wapStat(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
return np.std(resampled_wap)
def entropy_Prediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test,all_stocks_ids,test_file):
# Compute features
book_features_encoded_test = computeFeatures_1(book_path_test,'test',test_file,all_stocks_ids)
book_features_encoded_train = computeFeatures_1(book_path_train,'train',train_targets_pd,all_stocks_ids)
X = book_features_encoded_train.drop(['row_id','target','stock_id'],axis=1)
y = book_features_encoded_train['target']
# Modeling
catboost_default = CatBoostRegressor(verbose=0)
catboost_default.fit(X,y)
# Predict
X_test = book_features_encoded_test.drop(['row_id','stock_id'],axis=1)
yhat = catboost_default.predict(X_test)
# Formatting
yhat_pd = pd.DataFrame(yhat,columns=['target'])
predictions = pd.concat([test_file,yhat_pd],axis=1)
return predictions
def computeFeatures_1(book_path,prediction_column_name,train_targets_pd,all_stocks_ids):
book_all_features = pd.DataFrame()
encoder = np.eye(len(all_stocks_ids))
stocks_id_list, row_id_list = [], []
volatility_list, entropy2_list = [], []
linearFit_list, linearFit5_list, linearFit2_list = [], [], []
wap_std_list, wap_std5_list, wap_std2_list = [], [], []
for file in book_path:
start = time.time()
book_stock = pd.read_parquet(file)
stock_id = file.split('=')[1]
print('stock id computing = ' + str(stock_id))
stock_time_ids = book_stock['time_id'].unique()
for time_id in stock_time_ids:
# Access book data at this time + stock
book_stock_time = book_stock[book_stock['time_id'] == time_id]
# Create feature matrix
stocks_id_list.append(stock_id)
row_id_list.append(str(f'{stock_id}-{time_id}'))
volatility_list.append(realized_volatility_from_book_pd(book_stock_time=book_stock_time))
entropy2_list.append(entropy_from_book(book_stock_time=book_stock_time,last_min=2))
linearFit_list.append(linearFit(book_stock_time=book_stock_time,last_min=10))
linearFit5_list.append(linearFit(book_stock_time=book_stock_time,last_min=5))
linearFit2_list.append(linearFit(book_stock_time=book_stock_time,last_min=2))
wap_std_list.append(wapStat(book_stock_time=book_stock_time,last_min=10))
wap_std5_list.append(wapStat(book_stock_time=book_stock_time,last_min=5))
wap_std2_list.append(wapStat(book_stock_time=book_stock_time,last_min=2))
print('Computing one stock entropy took', time.time() - start, 'seconds for stock ', stock_id)
# Merge targets
stocks_id_pd = pd.DataFrame(stocks_id_list,columns=['stock_id'])
row_id_pd = pd.DataFrame(row_id_list,columns=['row_id'])
volatility_pd = pd.DataFrame(volatility_list,columns=['volatility'])
entropy2_pd = pd.DataFrame(entropy2_list,columns=['entropy2'])
linearFit_pd = pd.DataFrame(linearFit_list,columns=['linearFit_coef'])
linearFit5_pd = pd.DataFrame(linearFit5_list,columns=['linearFit_coef5'])
linearFit2_pd = pd.DataFrame(linearFit2_list,columns=['linearFit_coef2'])
wap_std_pd = pd.DataFrame(wap_std_list,columns=['wap_std'])
wap_std5_pd = pd.DataFrame(wap_std5_list,columns=['wap_std5'])
wap_std2_pd = pd.DataFrame(wap_std2_list,columns=['wap_std2'])
book_all_features = pd.concat([stocks_id_pd,row_id_pd,volatility_pd,entropy2_pd,linearFit_pd,linearFit5_pd,linearFit2_pd,
wap_std_pd,wap_std5_pd,wap_std2_pd],axis=1)
# This line makes sure the predictions are aligned with the row_id in the submission file
book_all_features = train_targets_pd.merge(book_all_features, on = ['row_id'])
# Add encoded stock
encoded = list()
for i in range(book_all_features.shape[0]):
stock_id = book_all_features['stock_id'][i]
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(book_all_features.shape[0],np.array(all_stocks_ids).shape[0]))
book_all_features_encoded = pd.concat([book_all_features, encoded_pd],axis=1)
return book_all_features_encoded
def calc_wap(df):
return (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
def calc_wap2(df):
return (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap3(df):
return (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap4(df):
return (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
def mid_price(df):
return df['bid_price1'] /2 + df['ask_price1'] / 2
def calc_rv_from_wap_numba(values, index):
log_return = np.diff(np.log(values))
realized_vol = np.sqrt(np.sum(np.square(log_return[1:])))
return realized_vol
def load_book_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'book_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def load_trades_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'trade_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def entropy_from_df(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df2(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap2'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df3(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap3'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def financial_metrics(df):
wap_imbalance = np.mean(df['wap'] - df['wap2'])
price_spread = np.mean((df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2))
bid_spread = np.mean(df['bid_price1'] - df['bid_price2'])
ask_spread = np.mean(df['ask_price1'] - df['ask_price2']) # Abs to take
total_volume = np.mean((df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2']))
volume_imbalance = np.mean(abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2'])))
return [wap_imbalance,price_spread,bid_spread,ask_spread,total_volume,volume_imbalance]
def financial_metrics_2(df):
wap_imbalance = df['wap'] - df['wap2']
price_spread = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2)
bid_spread = df['bid_price1'] - df['bid_price2']
ask_spread = df['ask_price1'] - df['ask_price2'] # Abs to take
total_volume = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
volume_imbalance = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# New features here
wap_imbalance_mean = np.mean(wap_imbalance)
wap_imbalance_sum = np.sum(wap_imbalance)
wap_imbalance_std = np.std(wap_imbalance)
wap_imbalance_max = np.max(wap_imbalance)
wap_imbalance_min = np.min(wap_imbalance)
price_spread_mean = np.mean(price_spread)
price_spread_sum = np.sum(price_spread)
price_spread_std = np.std(price_spread)
price_spread_max = np.max(price_spread)
price_spread_min = np.min(price_spread)
bid_spread_mean = np.mean(bid_spread)
bid_spread_sum = np.sum(bid_spread)
bid_spread_std = np.std(bid_spread)
bid_spread_max = np.max(bid_spread)
bid_spread_min = np.min(bid_spread)
ask_spread_mean = np.mean(ask_spread)
ask_spread_sum = np.sum(ask_spread)
ask_spread_std = np.std(ask_spread)
ask_spread_max = np.max(ask_spread)
ask_spread_min = np.min(ask_spread)
total_volume_mean = np.mean(total_volume)
total_volume_sum = np.sum(total_volume)
total_volume_std = np.std(total_volume)
total_volume_max = np.max(total_volume)
total_volume_min = np.min(total_volume)
volume_imbalance_mean = np.mean(volume_imbalance)
volume_imbalance_sum = np.sum(volume_imbalance)
volume_imbalance_std = np.std(volume_imbalance)
volume_imbalance_max = np.max(volume_imbalance)
volume_imbalance_min = np.min(volume_imbalance)
return [wap_imbalance_mean,price_spread_mean,bid_spread_mean,ask_spread_mean,total_volume_mean,volume_imbalance_mean, wap_imbalance_sum,price_spread_sum,bid_spread_sum,ask_spread_sum,total_volume_sum,volume_imbalance_sum, wap_imbalance_std,price_spread_std,bid_spread_std,ask_spread_std,total_volume_std,volume_imbalance_std, wap_imbalance_max,price_spread_max,bid_spread_max,ask_spread_max,total_volume_max,volume_imbalance_max, wap_imbalance_min,price_spread_min,bid_spread_min,ask_spread_min,total_volume_min,volume_imbalance_min]
def other_metrics(df):
if df.shape[0] < 2:
linearFit = 0
linearFit2 = 0
linearFit3 = 0
std_1 = 0
std_2 = 0
std_3 = 0
else:
linearFit = (df['wap'].iloc[-1] - df['wap'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit2 = (df['wap2'].iloc[-1] - df['wap2'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit3 = (df['wap3'].iloc[-1] - df['wap3'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
# Resampling
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
nearest2 = interp1d(t_init, df['wap2'], kind='nearest')
nearest3 = interp1d(t_init, df['wap3'], kind='nearest')
std_1 = np.std(nearest(t_new))
std_2 = np.std(nearest2(t_new))
std_3 = np.std(nearest3(t_new))
return [linearFit, linearFit2, linearFit3, std_1, std_2, std_3]
def load_book_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/book_{train_test}.parquet/stock_id={stock_id}')
return df
def load_trades_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/trade_{train_test}.parquet/stock_id={stock_id}')
return df
def computeFeatures_wEntropy(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute entropy
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_ent = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df).to_frame().reset_index().fillna(0)
df_ent2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df2).to_frame().reset_index().fillna(0)
df_ent3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df3).to_frame().reset_index().fillna(0)
df_ent['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_ent['time_id']]
df_ent = df_ent.rename(columns={'time_id':'row_id',0:'entropy'})
df_ent2 = df_ent2.rename(columns={0:'entropy2'}).drop(['time_id'],axis=1)
df_ent3 = df_ent3.rename(columns={0:'entropy3'}).drop(['time_id'],axis=1)
df_ent = pd.concat([df_ent,df_ent2,df_ent3],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['entropy'])
temp2 = pd.DataFrame([0],columns=['entropy2'])
temp3 = pd.DataFrame([0],columns=['entropy3'])
df_ent = pd.concat([times_pd,temp,temp2,temp3],axis=1)
list_ent.append(df_ent)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_july(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent_noCode(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
return df_book_features
def computeFeatures_newTest_Laurent_wTrades(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
list_trades1, list_trades2 = [], []
list_vlad_book, list_vlad_trades = [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
trades_stock = load_trades_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
trades_stock = load_trades_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap4(book_stock)
book_stock['mid_price'] = mid_price(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2': 'rv2_300', 'wap3': 'rv3_300', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2': 'rv2_480', 'wap3': 'rv3_480', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance_mean','price_spread_mean','bid_spread_mean','ask_spread_mean','total_vol_mean','vol_imbalance_mean','wap_imbalance_sum','price_spread_sum','bid_spread_sum','ask_spread_sum','total_vol_sum','vol_imbalance_sum','wap_imbalance_std','price_spread_std','bid_spread_std','ask_spread_std','total_vol_std','vol_imbalance_std','wap_imbalance_max','price_spread_max','bid_spread_max','ask_spread_max','total_vol_max','vol_imbalance_max','wap_imbalance_min','price_spread_min','bid_spread_min','ask_spread_min','total_vol_min','vol_imbalance_min']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance_mean','price_spread_mean','bid_spread_mean','ask_spread_mean','total_vol_mean','vol_imbalance_mean','wap_imbalance_sum','price_spread_sum','bid_spread_sum','ask_spread_sum','total_vol_sum','vol_imbalance_sum','wap_imbalance_std','price_spread_std','bid_spread_std','ask_spread_std','total_vol_std','vol_imbalance_std','wap_imbalance_max','price_spread_max','bid_spread_max','ask_spread_max','total_vol_max','vol_imbalance_max','wap_imbalance_min','price_spread_min','bid_spread_min','ask_spread_min','total_vol_min','vol_imbalance_min']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
df_sub_book_feats_300 = df_sub_book_feats.copy()
for col in df_sub_book_feats_300.columns:
df_sub_book_feats_300[col].values[:] = 0
list_fin2.append(df_sub_book_feats_300)
# Trades features (sum, mean, std, max, min)
df_sub_trades_feats = trades_stock.groupby(['time_id'])['price','size','order_count'].agg(['sum','mean','std','max','min']).reset_index()
df_sub_trades_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_trades_feats['time_id']]
df_sub_trades_feats = df_sub_trades_feats.rename(columns={'time_id':'row_id'})
list_trades1.append(df_sub_trades_feats)
# Query segments
bucketQuery300_trades = trades_stock.query(f'seconds_in_bucket >= 300')
isEmpty300_trades = bucketQuery300_trades.empty
if isEmpty300_trades == False:
df_sub_trades_300 = bucketQuery300_trades.groupby(['time_id'])['price','size','order_count'].agg(['sum','mean','std','max','min']).reset_index()
df_sub_trades_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_trades_300['time_id']]
df_sub_trades_300 = df_sub_trades_300.rename(columns={'time_id':'row_id'})
else:
df_sub_trades_300 = df_sub_trades_feats.copy()
for col in df_sub_trades_300.columns:
df_sub_trades_300[col].values[:] = 0
list_trades2.append(df_sub_trades_300)
# Fin metrics book
df_fin_metrics_book = book_stock.groupby(['time_id']).apply(fin_metrics_book_data).to_frame().reset_index()
df_fin_metrics_book = df_fin_metrics_book.rename(columns={0:'embedding'})
df_fin_metrics_book[['spread','depth_imb']] = pd.DataFrame(df_fin_metrics_book.embedding.tolist(), index=df_fin_metrics_book.index)
df_fin_metrics_book['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_fin_metrics_book['time_id']]
df_fin_metrics_book = df_fin_metrics_book.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_vlad_book.append(df_fin_metrics_book)
# Fin metrics trades
df_fin_metrics_trades = trades_stock.groupby(['time_id']).apply(fin_metrics_trades_data).to_frame().reset_index()
df_fin_metrics_trades = df_fin_metrics_trades.rename(columns={0:'embedding'})
df_fin_metrics_trades[['roll_measure', 'roll_impact', 'mkt_impact', 'amihud']] = pd.DataFrame(df_fin_metrics_trades.embedding.tolist(), index=df_fin_metrics_trades.index)
df_fin_metrics_trades['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_fin_metrics_trades['time_id']]
df_fin_metrics_trades = df_fin_metrics_trades.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_vlad_trades.append(df_fin_metrics_trades)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_trades1 = | pd.concat(list_trades1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 12:05:22 2017
@author: rgryan
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob
import datetime
import os
from decimal import Decimal
#from os import path
sh = True # Plotting the scale height info?
zc = False # Plotting the zero height concentration info?
re = False # Plotting the a.p. relative error info?
op = False # Plotting a.p. aerosol optical properties info?
path = 'E:\\Sciatran2\\TRACEGAS_RETRIEVAL_v-1-4\\Campaign\\'
testset = 'apElev'
# OPTIONS: aerGext aerSH tgSH tgGC aerAsy aerAng aerSSA apElev tgRErr
tg = 'HONO'
tg1 = 'HCHO'
#tg1 = tg
date = '20170307'
time = '130130'
startdate = datetime.datetime(2017, 3, 7, 6)
enddate = datetime.datetime(2017, 3, 7, 20)
tests = ['t131', 't102','t132']
dates = ['20170307','20170308', '20170309']
#scale_height = [0.2, 0.4, 0.6, 0.8, 1.0,1.2]
#ground extinction = [0.02,0.04, 0.06, 0.08, 0.1, 0.12]
#relerror = [0.1, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0]
#opprop = [0.3, 0.8, 1.29, 1.8]
#asy = [0.66, 0.69, 0.72, 0.75]
#ssa = [0.7, 0.8, 0.89, 1.0]
values = [-1.5, -1, -0.5]
colours = ['red', 'orange', 'yellow', 'green', 'lightseagreen',
'skyblue', 'mediumblue', 'midnightblue',
'darkviolet', 'darkmagenta', 'magenta', 'pink']
mm_rms = []
ppm_ave = []
vcd_ave = []
ppm_vals = pd.DataFrame()
ppm_err_vals = pd.DataFrame()
vcd_vals = | pd.DataFrame() | pandas.DataFrame |
"""
Functions for writing to .csv
September 2020
Written by <NAME>
"""
import os
import pandas as pd
import datetime
def define_deciles(regions):
"""
Allocate deciles to regions.
"""
regions = regions.sort_values(by='population_km2', ascending=True)
regions['decile'] = regions.groupby([
'GID_0',
'scenario',
'strategy',
'confidence'
], as_index=True).population_km2.apply( #cost_per_sp_user
pd.qcut, q=11, precision=0,
labels=[100,90,80,70,60,50,40,30,20,10,0],
duplicates='drop') # [0,10,20,30,40,50,60,70,80,90,100]
return regions
def write_demand(regional_annual_demand, folder):
"""
Write all annual demand results.
"""
print('Writing annual_mno_demand')
regional_annual_demand = pd.DataFrame(regional_annual_demand)
regional_annual_demand = regional_annual_demand.loc[
regional_annual_demand['scenario'] == 'baseline_10_10_10']
# regional_annual_mno_demand = regional_annual_demand[[
# 'GID_0', 'GID_id', 'scenario', 'strategy',
# 'confidence', 'year', 'population', 'area_km2', 'population_km2',
# 'geotype', 'arpu_discounted_monthly',
# 'penetration', 'population_with_phones','phones_on_network',
# 'smartphone_penetration', 'smartphones_on_network', 'revenue'
# ]]
# filename = 'regional_annual_mno_demand.csv'
# path = os.path.join(folder, filename)
# regional_annual_mno_demand.to_csv(path, index=False)
print('Writing annual_market_demand')
regional_annual_market_demand = regional_annual_demand[[
'GID_0', 'GID_id', 'scenario', 'strategy',
'confidence', 'year', 'population',
# 'population_f_over_10', 'population_m_over_10',
'area_km2', 'population_km2',
'geotype', 'arpu_discounted_monthly',
# 'penetration_female',
# 'penetration_male',
'penetration',
'population_with_phones',
# 'population_with_phones_f_over_10',
# 'population_with_phones_m_over_10',
'smartphone_penetration',
'population_with_smartphones',
# 'population_with_smartphones_f_over_10',
# 'population_with_smartphones_m_over_10',
'revenue'
]]
filename = 'regional_annual_market_demand.csv'
path = os.path.join(folder, filename)
regional_annual_market_demand.to_csv(path, index=False)
def write_results(regional_results, folder, metric):
"""
Write all results.
"""
print('Writing national MNO results')
national_results = pd.DataFrame(regional_results)
national_results = national_results[[
'GID_0', 'scenario', 'strategy', 'confidence', 'population_total', 'area_km2',
'phones_on_network', 'smartphones_on_network', 'total_estimated_sites',
'existing_mno_sites', 'upgraded_mno_sites', 'new_mno_sites',
'total_mno_revenue', 'total_mno_cost',
]]
national_results = national_results.drop_duplicates()
national_results = national_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence'], as_index=True).sum()
national_results['cost_per_network_user'] = (
national_results['total_mno_cost'] / national_results['phones_on_network'])
national_results['cost_per_smartphone_user'] = (
national_results['total_mno_cost'] / national_results['smartphones_on_network'])
path = os.path.join(folder,'national_mno_results_{}.csv'.format(metric))
national_results.to_csv(path, index=True)
print('Writing national cost composition results')
national_cost_results = pd.DataFrame(regional_results)
national_cost_results = national_cost_results[[
'GID_0', 'scenario', 'strategy', 'confidence', 'population_total',
'phones_on_network', 'smartphones_on_network', 'total_mno_revenue',
'ran', 'backhaul_fronthaul', 'civils', 'core_network',
'administration', 'spectrum_cost', 'tax', 'profit_margin',
'total_mno_cost', 'available_cross_subsidy', 'deficit',
'used_cross_subsidy', 'required_state_subsidy',
]]
national_cost_results = national_cost_results.drop_duplicates()
national_cost_results = national_cost_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence'], as_index=True).sum()
national_cost_results['cost_per_network_user'] = (
national_cost_results['total_mno_cost'] /
national_cost_results['phones_on_network'])
national_cost_results['cost_per_smartphone_user'] = (
national_cost_results['total_mno_cost'] /
national_cost_results['smartphones_on_network'])
#Calculate private, govt and societal costs
national_cost_results['private_cost'] = national_cost_results['total_mno_cost']
national_cost_results['government_cost'] = (
national_cost_results['required_state_subsidy'] -
(national_cost_results['spectrum_cost'] + national_cost_results['tax']))
national_cost_results['societal_cost'] = (
national_cost_results['private_cost'] + national_cost_results['government_cost'])
path = os.path.join(folder,'national_mno_cost_results_{}.csv'.format(metric))
national_cost_results.to_csv(path, index=True)
print('Writing general decile results')
decile_results = pd.DataFrame(regional_results)
decile_results = define_deciles(decile_results)
decile_results = decile_results[[
'GID_0', 'scenario', 'strategy', 'decile', 'confidence',
'population_total', 'area_km2', 'phones_on_network',
'smartphones_on_network', 'total_estimated_sites',
'existing_mno_sites', 'upgraded_mno_sites', 'new_mno_sites',
'total_mno_revenue', 'total_mno_cost',
]]
decile_results = decile_results.drop_duplicates()
decile_results = decile_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence', 'decile'], as_index=True).sum()
decile_results['population_km2'] = (
decile_results['population_total'] / decile_results['area_km2'])
decile_results['phone_density_on_network_km2'] = (
decile_results['phones_on_network'] / decile_results['area_km2'])
decile_results['sp_density_on_network_km2'] = (
decile_results['smartphones_on_network'] / decile_results['area_km2'])
decile_results['total_estimated_sites_km2'] = (
decile_results['total_estimated_sites'] / decile_results['area_km2'])
decile_results['existing_mno_sites_km2'] = (
decile_results['existing_mno_sites'] / decile_results['area_km2'])
decile_results['cost_per_network_user'] = (
decile_results['total_mno_cost'] / decile_results['phones_on_network'])
decile_results['cost_per_smartphone_user'] = (
decile_results['total_mno_cost'] / decile_results['smartphones_on_network'])
path = os.path.join(folder,'decile_mno_results_{}.csv'.format(metric))
decile_results.to_csv(path, index=True)
print('Writing cost decile results')
decile_cost_results = | pd.DataFrame(regional_results) | pandas.DataFrame |
import sys
sys.path.insert(0, "../")
import xalpha as xa
from xalpha.exceptions import FundTypeError
import pandas as pd
import pytest
ioconf = {"save": True, "fetch": True, "path": "pytest", "form": "csv"}
ca = xa.cashinfo(interest=0.0002, start="2015-01-01")
zzhb = xa.indexinfo("0000827", **ioconf)
hs300 = xa.fundinfo("000311")
zogqb = xa.mfundinfo("001211", **ioconf)
def test_fundreport():
# somehow fragile, to be checked
r = xa.FundReport("000827")
assert r.get_report()[0][:2] == "广发"
assert r.analyse_report(1)["bank"][:2] == "兴业"
assert r.show_report_list(type_=0)[0]["FUNDCODE"] == "000827"
assert r.get_report(id_="AN202003171376532533")[0][:2] == "广发"
def test_cash():
assert (
round(ca.price[ca.price["date"] == "2018-01-02"].iloc[0].netvalue, 4) == 1.2453
)
assert ca.code == "mf"
date, value, share = ca.shuhui(
300, "2018-01-01", [[pd.Timestamp("2017-01-03"), 200]]
)
assert date == pd.Timestamp("2018-01-02")
assert value == 249.06
assert share == -200
ca.bcmkset(ca)
assert ca.alpha() == 0
assert round(ca.total_annualized_returns("2018-01-01"), 4) == 0.0757
def test_index():
assert (
round(zzhb.price[zzhb.price["date"] == "2012-02-01"].iloc[0].totvalue, 3)
== 961.406
)
assert (
round(zzhb.price[zzhb.price["date"] == "2015-02-02"].iloc[0].netvalue, 2)
== 1.62
)
assert zzhb.name == "中证环保"
assert zzhb.shengou(100, "2018-01-02")[2] == 55.24
assert zzhb.shuhui(100, "2016-01-01", [[pd.Timestamp("2017-01-03"), 200]])[2] == 0
zzhb.info()
zzhb.ma(window=10)
zzhb.md()
zzhb.ema(col="totvalue")
zzhb.macd()
zzhb.mtm()
zzhb.roc()
zzhb.boll()
zzhb.bias()
zzhb.rsi()
zzhb.kdj()
zzhb.wnr()
zzhb.dma(col="totvalue")
zzhb.bbi()
zzhb.trix(col="totvalue")
zzhb.psy()
row = zzhb.price[zzhb.price["date"] == "2018-08-01"].iloc[0]
assert round(row["MD5"], 3) == 0.012
assert round(row["MA10"], 3) == 1.361
assert round(row["MACD_OSC_12_26"], 4) == 0.0076
assert round(row["EMA5"], 1) == 1318.8
assert round(row["MTM10"], 4) == 0.0078
assert round(row["ROC10"], 4) == 0.0058
assert round(row["BOLL_UPPER"], 3) == 1.398
assert round(row["BIAS10"], 3) == -0.012
assert round(row["RSI14"], 3) == 0.411
assert round(row["KDJ_J"], 4) == 0.0456
assert round(row["WNR14"], 2) == 0.27
assert round(row["AMA"], 2) == -87.71
assert round(row["BBI"], 3) == 1.356
assert round(row["TRIX10"], 4) == 0.0005
assert round(row["PSYMA12"], 2) == 0.47
zzhb.v_techindex(col=["TRIX10"])
def test_fund():
assert hs300.round_label == 1
assert hs300.name == "景顺长城沪深300指数增强" ## "景顺长城沪深300增强", 蜜汁改名。。。
assert hs300.fenhongdate[1] == pd.Timestamp("2017-08-15")
assert hs300.get_holdings(2019, 4).iloc[0]["name"] == "中国平安"
assert (
float(hs300.special[hs300.special["date"] == "2017-08-04"]["comment"]) == 0.19
)
hs300.rate = 0.12
hs300.segment = [[0, 7], [7, 365], [365, 730], [730]]
with pytest.raises(Exception) as excinfo:
hs300.shuhui(
100,
"2014-01-04",
[[pd.Timestamp("2014-01-03"), 200], [pd.Timestamp("2017-01-03"), 200]],
)
assert str(excinfo.value) == "One cannot move share before the lastest operation"
assert (
hs300.shuhui(
320,
"2018-01-01",
[[ | pd.Timestamp("2011-01-03") | pandas.Timestamp |
"""Tools for generating and forecasting with ensembles of models."""
import datetime
import numpy as np
import pandas as pd
import json
from autots.models.base import PredictionObject
from autots.models.model_list import no_shared
from autots.tools.impute import fill_median
horizontal_aliases = ['horizontal', 'probabilistic']
def summarize_series(df):
"""Summarize time series data. For now just df.describe()."""
df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
return df_sum
def mosaic_or_horizontal(all_series: dict):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
"""
first_value = all_series[next(iter(all_series))]
if isinstance(first_value, dict):
return "mosaic"
else:
return "horizontal"
def parse_horizontal(all_series: dict, model_id: str = None, series_id: str = None):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
model_id (str): name of model to find series for
series_id (str): name of series to find models for
Returns:
list
"""
if model_id is None and series_id is None:
raise ValueError(
"either series_id or model_id must be specified in parse_horizontal."
)
if mosaic_or_horizontal(all_series) == 'mosaic':
if model_id is not None:
return [ser for ser, mod in all_series.items() if model_id in mod.values()]
else:
return list(set(all_series[series_id].values()))
else:
if model_id is not None:
return [ser for ser, mod in all_series.items() if mod == model_id]
else:
# list(set([mod for ser, mod in all_series.items() if ser == series_id]))
return [all_series[series_id]]
def BestNEnsemble(
ensemble_params,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime: dict,
prediction_interval: float = 0.9,
):
"""Generate mean forecast for ensemble of models.
Args:
ensemble_params (dict): BestN ensemble param dict
should have "model_weights": {model_id: weight} where 1 is default weight per model
forecasts (dict): {forecast_id: forecast dataframe} for all models
same for lower_forecasts, upper_forecasts
forecast_runtime (dict): dictionary of {forecast_id: timedelta of runtime}
prediction_interval (float): metadata on interval
"""
startTime = datetime.datetime.now()
forecast_keys = list(forecasts.keys())
model_weights = dict(ensemble_params.get("model_weights", {}))
ensemble_params['model_weights'] = model_weights
ensemble_params['models'] = {
k: v
for k, v in dict(ensemble_params.get('models')).items()
if k in forecast_keys
}
model_count = len(forecast_keys)
if model_count < 1:
raise ValueError("BestN failed, no component models available.")
sample_df = next(iter(forecasts.values()))
columnz = sample_df.columns
indices = sample_df.index
model_divisor = 0
ens_df = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_lower = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_upper = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in forecasts.items():
current_weight = float(model_weights.get(idx, 1))
ens_df = ens_df + (x * current_weight)
# also .get(idx, 0)
ens_df_lower = ens_df_lower + (lower_forecasts[idx] * current_weight)
ens_df_upper = ens_df_upper + (upper_forecasts[idx] * current_weight)
model_divisor = model_divisor + current_weight
ens_df = ens_df / model_divisor
ens_df_lower = ens_df_lower / model_divisor
ens_df_upper = ens_df_upper / model_divisor
ens_runtime = datetime.timedelta(0)
for x in forecasts_runtime.values():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for distance ensemble."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
first_model_index = forecasts_list.index(ensemble_params['FirstModel'])
second_model_index = forecasts_list.index(ensemble_params['SecondModel'])
forecast_length = forecasts[0].shape[0]
dis_frac = ensemble_params['dis_frac']
first_bit = int(np.ceil(forecast_length * dis_frac))
second_bit = int(np.floor(forecast_length * (1 - dis_frac)))
ens_df = (
forecasts[first_model_index]
.head(first_bit)
.append(forecasts[second_model_index].tail(second_bit))
)
ens_df_lower = (
lower_forecasts[first_model_index]
.head(first_bit)
.append(lower_forecasts[second_model_index].tail(second_bit))
)
ens_df_upper = (
upper_forecasts[first_model_index]
.head(first_bit)
.append(upper_forecasts[second_model_index].tail(second_bit))
)
id_list = list(ensemble_params['models'].keys())
model_indexes = [idx for idx, x in enumerate(forecasts_list) if x in id_list]
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in model_indexes:
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result_obj = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result_obj
def horizontal_classifier(df_train, known: dict, method: str = "whatever"):
"""
CLassify unknown series with the appropriate model for horizontal ensembling.
Args:
df_train (pandas.DataFrame): historical data about the series. Columns = series_ids.
known (dict): dict of series_id: classifier outcome including some but not all series in df_train.
Returns:
dict.
"""
# known = {'EXUSEU': 'xx1', 'MCOILWTICO': 'xx2', 'CSUSHPISA': 'xx3'}
columnz = df_train.columns.tolist()
X = summarize_series(df_train).transpose()
X = fill_median(X)
known_l = list(known.keys())
unknown = list(set(columnz) - set(known_l))
Xt = X.loc[known_l]
Xf = X.loc[unknown]
Y = np.array(list(known.values()))
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(Xt, Y)
result = clf.predict(Xf)
result_d = dict(zip(Xf.index.tolist(), result))
# since this only has estimates, overwrite with known that includes more
final = {**result_d, **known}
# temp = pd.DataFrame({'series': list(final.keys()), 'model': list(final.values())})
# temp2 = temp.merge(X, left_on='series', right_index=True)
return final
def mosaic_classifier(df_train, known):
"""CLassify unknown series with the appropriate model for mosaic ensembles."""
known.index.name = "forecast_period"
upload = pd.melt(
known,
var_name="series_id",
value_name="model_id",
ignore_index=False,
).reset_index(drop=False)
upload['forecast_period'] = upload['forecast_period'].astype(int)
missing_cols = df_train.columns[
~df_train.columns.isin(upload['series_id'].unique())
]
if not missing_cols.empty:
forecast_p = np.arange(upload['forecast_period'].max() + 1)
p_full = np.tile(forecast_p, len(missing_cols))
missing_rows = pd.DataFrame(
{
'forecast_period': p_full,
'series_id': np.repeat(missing_cols.values, len(forecast_p)),
'model_id': np.nan,
},
index=None if len(p_full) > 1 else [0],
)
upload = pd.concat([upload, missing_rows])
X = fill_median(
(summarize_series(df_train).transpose()).merge(
upload, left_index=True, right_on="series_id"
)
)
X.set_index("series_id", inplace=True) # .drop(columns=['series_id'], inplace=True)
to_predict = X[X['model_id'].isna()].drop(columns=['model_id'])
X = X[~X['model_id'].isna()]
Y = X['model_id']
Xf = X.drop(columns=['model_id'])
# from sklearn.linear_model import RidgeClassifier
# from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(Xf, Y)
predicted = clf.predict(to_predict)
result = pd.concat(
[to_predict.reset_index(drop=False), pd.Series(predicted, name="model_id")],
axis=1,
)
cols_needed = ['model_id', 'series_id', 'forecast_period']
final = pd.concat(
[X.reset_index(drop=False)[cols_needed], result[cols_needed]], sort=True, axis=0
)
final['forecast_period'] = final['forecast_period'].astype(str)
final = final.pivot(values="model_id", columns="series_id", index="forecast_period")
try:
final = final[df_train.columns]
if final.isna().to_numpy().sum() > 0:
raise KeyError("NaN in mosaic generalization")
except KeyError as e:
raise ValueError(
f"mosaic_classifier failed to generalize for all columns: {repr(e)}"
)
return final
def generalize_horizontal(
df_train, known_matches: dict, available_models: list, full_models: list = None
):
"""generalize a horizontal model trained on a subset of all series
Args:
df_train (pd.DataFrame): time series data
known_matches (dict): series:model dictionary for some to all series
available_models (dict): list of models actually available
full_models (dict): models that are available for every single series
"""
org_idx = df_train.columns
org_list = org_idx.tolist()
# remove any unnecessary series
known_matches = {ser: mod for ser, mod in known_matches.items() if ser in org_list}
# here split for mosaic or horizontal
if mosaic_or_horizontal(known_matches) == "mosaic":
# make it a dataframe
mosaicy = pd.DataFrame.from_dict(known_matches)
# remove unavailable models
mosaicy = pd.DataFrame(mosaicy[mosaicy.isin(available_models)])
# so we can fill some missing by just using a forward fill, should be good enough
mosaicy.fillna(method='ffill', limit=5, inplace=True)
mosaicy.fillna(method='bfill', limit=5, inplace=True)
if mosaicy.isna().any().any() or mosaicy.shape[1] != df_train.shape[1]:
if full_models is not None:
k2 = pd.DataFrame(mosaicy[mosaicy.isin(full_models)])
else:
k2 = mosaicy.copy()
final = mosaic_classifier(df_train, known=k2)
return final.to_dict()
else:
return mosaicy.to_dict()
else:
# remove any unavailable models
k = {ser: mod for ser, mod in known_matches.items() if mod in available_models}
# check if any series are missing from model list
if not k:
raise ValueError("Horizontal template has no models matching this data!")
# test if generalization is needed
if len(set(org_list) - set(list(k.keys()))) > 0:
# filter down to only models available for all
# print(f"Models not available: {[ser for ser, mod in known_matches.items() if mod not in available_models]}")
# print(f"Series not available: {[ser for ser in df_train.columns if ser not in list(known_matches.keys())]}")
if full_models is not None:
k2 = {ser: mod for ser, mod in k.items() if mod in full_models}
else:
k2 = k.copy()
all_series_part = horizontal_classifier(df_train, k2)
# since this only has "full", overwrite with known that includes more
all_series = {**all_series_part, **k}
else:
all_series = known_matches
return all_series
def HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Generate forecast for per_series ensembling."""
startTime = datetime.datetime.now()
# this is meant to fill in any failures
available_models = [mod for mod, fcs in forecasts.items() if fcs.shape[0] > 0]
train_size = df_train.shape
# print(f"running inner generalization with training size: {train_size}")
full_models = [
mod for mod, fcs in forecasts.items() if fcs.shape[1] == train_size[1]
]
if not full_models:
print("No full models available for horizontal generalization!")
full_models = available_models # hope it doesn't need to fill
# print(f"FULLMODEL {len(full_models)}: {full_models}")
if prematched_series is None:
prematched_series = ensemble_params['series']
all_series = generalize_horizontal(
df_train, prematched_series, available_models, full_models
)
# print(f"ALLSERIES {len(all_series.keys())}: {all_series}")
org_idx = df_train.columns
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in all_series.items():
try:
c_fore = forecasts[mod_id][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
print(f"Horizontal ensemble unable to add model {mod_id} {repr(e)}")
# upper
c_fore = upper_forecasts[mod_id][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[mod_id][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
# make sure columns align to original
forecast_df = forecast_df.reindex(columns=org_idx)
u_forecast_df = u_forecast_df.reindex(columns=org_idx)
l_forecast_df = l_forecast_df.reindex(columns=org_idx)
# combine runtimes
try:
ens_runtime = sum(list(forecasts_runtime.values()), datetime.timedelta())
except Exception:
ens_runtime = datetime.timedelta(0)
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for per_series per distance ensembling."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
id_list = list(ensemble_params['models'].keys())
mod_dic = {x: idx for idx, x in enumerate(forecasts_list) if x in id_list}
forecast_length = forecasts[0].shape[0]
dist_n = int(np.ceil(ensemble_params['dis_frac'] * forecast_length))
dist_last = forecast_length - dist_n
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series1'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
forecast_df2, u_forecast_df2, l_forecast_df2 = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series2'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df2 = pd.concat([forecast_df2, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df2 = pd.concat([u_forecast_df2, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df2 = pd.concat([l_forecast_df2, c_fore], axis=1)
forecast_df = pd.concat(
[forecast_df.head(dist_n), forecast_df2.tail(dist_last)], axis=0
)
u_forecast_df = pd.concat(
[u_forecast_df.head(dist_n), u_forecast_df2.tail(dist_last)], axis=0
)
l_forecast_df = pd.concat(
[l_forecast_df.head(dist_n), l_forecast_df2.tail(dist_last)], axis=0
)
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in list(mod_dic.values()):
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def EnsembleForecast(
ensemble_str,
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Return PredictionObject for given ensemble method."""
ens_model_name = ensemble_params['model_name'].lower().strip()
s3list = ['best3', 'best3horizontal', 'bestn']
if ens_model_name in s3list:
ens_forecast = BestNEnsemble(
ensemble_params,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
elif ens_model_name == 'dist':
ens_forecast = DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
elif ens_model_name in horizontal_aliases:
ens_forecast = HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=df_train,
prematched_series=prematched_series,
)
return ens_forecast
elif ens_model_name == "mosaic":
ens_forecast = MosaicEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=df_train,
prematched_series=prematched_series,
)
return ens_forecast
elif ens_model_name == 'hdist':
ens_forecast = HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
else:
raise ValueError("Ensemble model type not recognized.")
def _generate_distance_ensemble(dis_frac, forecast_length, initial_results):
"""Constructs a distance ensemble dictionary."""
dis_frac = 0.5
first_bit = int(np.ceil(forecast_length * dis_frac))
last_bit = int(np.floor(forecast_length * (1 - dis_frac)))
not_ens_list = initial_results.model_results[
initial_results.model_results['Ensemble'] == 0
]['ID'].tolist()
ens_per_ts = initial_results.per_timestamp_smape[
initial_results.per_timestamp_smape.index.isin(not_ens_list)
]
first_model = ens_per_ts.iloc[:, 0:first_bit].mean(axis=1).idxmin()
last_model = (
ens_per_ts.iloc[:, first_bit : (last_bit + first_bit)].mean(axis=1).idxmin()
)
ensemble_models = {}
best3 = (
initial_results.model_results[
initial_results.model_results['ID'].isin([first_model, last_model])
]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[['Model', 'ModelParameters', 'TransformationParameters']]
)
ensemble_models = best3.to_dict(orient='index')
return {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': 'Dist',
'model_count': 2,
'model_metric': 'smape',
'models': ensemble_models,
'dis_frac': dis_frac,
'FirstModel': first_model,
'SecondModel': last_model,
}
),
'TransformationParameters': '{}',
'Ensemble': 1,
}
def _generate_bestn_dict(
best,
model_name: str = 'BestN',
model_metric: str = "best_score",
model_weights: dict = None,
):
ensemble_models = best.to_dict(orient='index')
model_parms = {
'model_name': model_name,
'model_count': best.shape[0],
'model_metric': model_metric,
'models': ensemble_models,
}
if model_weights is not None:
model_parms['model_weights'] = model_weights
return {
'Model': 'Ensemble',
'ModelParameters': json.dumps(model_parms),
'TransformationParameters': '{}',
'Ensemble': 1,
}
def EnsembleTemplateGenerator(
initial_results,
forecast_length: int = 14,
ensemble: str = "simple",
score_per_series=None,
):
"""Generate class 1 (non-horizontal) ensemble templates given a table of results."""
ensemble_templates = pd.DataFrame()
ens_temp = initial_results.model_results.drop_duplicates(subset='ID')
# filter out horizontal ensembles
ens_temp = ens_temp[ens_temp['Ensemble'] <= 1]
if 'simple' in ensemble:
# best 3, all can be of same model type
best3nonunique = ens_temp.nsmallest(3, columns=['Score']).set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
n_models = best3nonunique.shape[0]
if n_models == 3:
best3nu_params = pd.DataFrame(
_generate_bestn_dict(
best3nonunique, model_name='BestN', model_metric="best_score"
),
index=[0],
)
ensemble_templates = pd.concat([ensemble_templates, best3nu_params], axis=0)
# best 3, by SMAPE, RMSE, SPL
bestsmape = ens_temp.nsmallest(1, columns=['smape_weighted'])
bestrmse = ens_temp.nsmallest(2, columns=['rmse_weighted'])
bestmae = ens_temp.nsmallest(3, columns=['spl_weighted'])
best3metric = pd.concat([bestsmape, bestrmse, bestmae], axis=0)
best3metric = (
best3metric.drop_duplicates()
.head(3)
.set_index("ID")[['Model', 'ModelParameters', 'TransformationParameters']]
)
n_models = best3metric.shape[0]
if n_models == 3:
best3m_params = pd.DataFrame(
_generate_bestn_dict(
best3metric, model_name='BestN', model_metric="mixed_metric"
),
index=[0],
)
ensemble_templates = pd.concat([ensemble_templates, best3m_params], axis=0)
# best 3, all must be of different model types
ens_temp = (
ens_temp.sort_values('Score', ascending=True, na_position='last')
.groupby('Model')
.head(1)
.reset_index(drop=True)
)
best3unique = ens_temp.nsmallest(3, columns=['Score']).set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
n_models = best3unique.shape[0]
if n_models == 3:
best3u_params = pd.DataFrame(
_generate_bestn_dict(
best3unique, model_name='BestN', model_metric="best_score_unique"
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
if 'distance' in ensemble:
dis_frac = 0.2
distance_params = pd.DataFrame(
_generate_distance_ensemble(dis_frac, forecast_length, initial_results),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, distance_params], axis=0, ignore_index=True
)
dis_frac = 0.5
distance_params2 = pd.DataFrame(
_generate_distance_ensemble(dis_frac, forecast_length, initial_results),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, distance_params2], axis=0, ignore_index=True
)
# in previous versions per_series metrics were only captured if 'horizontal' was passed
if 'simple' in ensemble:
if score_per_series is None:
per_series = initial_results.per_series_mae
else:
per_series = score_per_series
per_series = per_series[per_series.index.isin(ens_temp['ID'].tolist())]
# make it ranking based! Need bigger=better for weighting
per_series_ranked = per_series.rank(ascending=False)
# choose best n based on score per series
n = 3
chosen_ones = per_series_ranked.sum(axis=1).nlargest(n)
bestn = ens_temp[ens_temp['ID'].isin(chosen_ones.index.tolist())].set_index(
"ID"
)[['Model', 'ModelParameters', 'TransformationParameters']]
n_models = bestn.shape[0]
if n_models == n:
best3u_params = pd.DataFrame(
_generate_bestn_dict(
bestn,
model_name='BestN',
model_metric="bestn_horizontal",
model_weights=chosen_ones.to_dict(),
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
# cluster and then make best model per cluster
if per_series.shape[1] > 4:
try:
from sklearn.cluster import AgglomerativeClustering
max_clusters = 8
n_clusters = round(per_series.shape[1] / 3)
n_clusters = max_clusters if n_clusters > max_clusters else n_clusters
X = per_series_ranked.transpose()
clstr = AgglomerativeClustering(n_clusters=n_clusters).fit(X)
series_labels = clstr.labels_
for cluster in np.unique(series_labels).tolist():
current_ps = per_series_ranked[
per_series_ranked.columns[series_labels == cluster]
]
n = 3
chosen_ones = current_ps.sum(axis=1).nlargest(n)
bestn = ens_temp[
ens_temp['ID'].isin(chosen_ones.index.tolist())
].set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
n_models = bestn.shape[0]
if n_models == n:
best3u_params = pd.DataFrame(
_generate_bestn_dict(
bestn,
model_name='BestN',
model_metric=f"cluster_{cluster}",
model_weights=chosen_ones.to_dict(),
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params],
axis=0,
ignore_index=True,
)
except Exception as e:
print(f"cluster-based simple ensemble failed with {repr(e)}")
mods = | pd.Series() | pandas.Series |
import numpy as np
import pandas as pd
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.losses import Loss
from sklearn import preprocessing
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
plt.close("all")
from scoringRules import es_sample, crps_sample
from igep_models_all_tem_noplot import igep
import tensorflow.compat.v1 as tfv
tfv.disable_v2_behavior()
DIM = 10 # dimension of target values
dist_samples = pd.read_csv('/home/chen_jieyu/IGEP/ws_dist_10samples.csv', header=None)
# Read data
path = '/home/chen_jieyu/IGEP/ECMWF_wind_data.feather'
t2m_ens_complete = | pd.read_feather(path) | pandas.read_feather |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 5 21:12:17 2020
@author: sergiomarconi
"""
import numpy as np
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import BaggingClassifier
from mlxtend.classifier import StackingCVClassifier
from sklearn.linear_model import LogisticRegressionCV
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from mlxtend.classifier import SoftmaxRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
from imblearn.ensemble import RUSBoostClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import SVC
#define models
rf = RandomForestClassifier(random_state=0, oob_score = True, n_jobs = 1,
n_estimators = 500, max_features = 'sqrt', criterion = 'entropy')
knn = KNeighborsClassifier(n_jobs = 1, weights = 'uniform', n_neighbors = 1, p=2)
gb = HistGradientBoostingClassifier(random_state=0, max_iter = 1000, learning_rate = 0.1,
max_depth = 25, loss = 'categorical_crossentropy', l2_regularization = 0.5)
bsvc = BaggingClassifier(base_estimator=SVC(probability = True, C = 1000),
n_jobs = 1, random_state=0)
rf = CalibratedClassifierCV(rf, cv=2, method='sigmoid')
knn = CalibratedClassifierCV(knn, cv=2, method='sigmoid')
gb = CalibratedClassifierCV(gb, cv=2, method='sigmoid')
bsvc = CalibratedClassifierCV(bsvc, cv=2, method='sigmoid')
# StackingCVClassifier models
clf_bl = StackingCVClassifier(classifiers = [make_pipeline(StandardScaler(),rf), #StackingCVClassifier
make_pipeline(StandardScaler(),gb),
make_pipeline(StandardScaler(),bsvc),
make_pipeline(StandardScaler(),knn)],
use_probas=True,
#average_probas=False,
meta_classifier= LogisticRegressionCV(max_iter =10000, Cs = 5))
# params = {
# 'meta_classifier__Cs': [0.1, 5, 10]
# }
# grid = GridSearchCV(estimator=clf_bl,
# param_grid=params,
# cv=3,
# refit=True)
# grid.fit(X_res, y_res.taxonID.ravel())
clf_bl.fit(X_res, y_res.taxonID.ravel())
print(clf_bl.score(X_test, y_test['taxonID'].ravel()))
#rf_check = knn.fit(X_res, y_res.taxonID)
#rf_check.score(X_test, y_test['taxonID'].ravel())
# #hipertune imbalanced models
# params = {'kneighborsclassifier__n_neighbors': [1, 5],
# 'randomforestclassifier__n_estimators': [10, 50],
# 'meta_classifier__C': [0.1, 10.0]}
# grid = GridSearchCV(estimator=sclf,
# param_grid=params,
# cv=3,
# refit=True)
# grid.fit(X, y)
# cv_keys = ('mean_test_score', 'std_test_score', 'params')
predict_an = clf_bl.predict_proba(X_test)
predict_an = | pd.DataFrame(predict_an) | pandas.DataFrame |
import pandas as pd
import sys
import os
from functools import reduce
from utils.misc_utils import pandas_to_db
class TemporalFeatureFactory(object):
def __init__(self, time_granularity, start_date, end_date):
'''
Level of Aggregation in space depends on the mapping table
Guidelines to create new features:
- Each Feature should be a new method
- Name of the function will become name of the feature
- Use column_name decorator to map which column of hectopunten does
the feature apply to
- Each method expects a group of hectopuntens and returns one value for it.
- If a feature requires multiple columns, @column_name can be custom and for
our purpose be same as the name of eventual feature/method.
'''
self.times = pd.DataFrame( | pd.date_range(start_date, end_date, freq=time_granularity) | pandas.date_range |
from src.BandC.Parser import Parser
import arff
import pandas as pd
from pandas.core.frame import DataFrame
class Arff(Parser):
"""
An Arff Parser that can automatically detect the correct format.
"""
def parse_file(self):
column_names = [attribute[0] for attribute in self.attributes]
return | pd.DataFrame.from_records(self.data, columns=column_names) | pandas.DataFrame.from_records |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": | pandas.StringDtype() | pandas.StringDtype |
from LIMBR import simulations
import pandas as pd
sims = {}
for i in range(1,21):
analysis = simulations.analyze('standard_' + str(i) + '_true_classes.txt')
analysis.add_data('standard_' + str(i) + '_LIMBR_processed__jtkout_GammaP.txt','LIMBR')
analysis.add_data('standard_' + str(i) + '_old_processed__jtkout_GammaP.txt','traditional')
analysis.add_data('standard_' + str(i) + '_baseline__jtkout_GammaP.txt','baseline')
analysis.add_data('standard_eigenMS_' + str(i) + '__jtkout_GammaP.txt','eigenMS')
analysis.calculate_auc()
sims[i] = analysis.roc_auc
data = pd.DataFrame(sims).T
data['Noise'] = 'standard'
sims = {}
for i in range(1,21):
analysis = simulations.analyze('double_noise_' + str(i) + '_true_classes.txt')
analysis.add_data('double_noise_' + str(i) + '_LIMBR_processed__jtkout_GammaP.txt','LIMBR')
analysis.add_data('double_noise_' + str(i) + '_old_processed__jtkout_GammaP.txt','traditional')
analysis.add_data('double_noise_' + str(i) + '_baseline__jtkout_GammaP.txt','baseline')
analysis.add_data('double_noise_eigenMS_' + str(i) + '__jtkout_GammaP.txt','eigenMS')
analysis.calculate_auc()
sims[i] = analysis.roc_auc
temp_data = pd.DataFrame(sims).T
temp_data['Noise'] = 'double'
data = | pd.concat([data, temp_data]) | pandas.concat |
#!/usr/bin/env python
import json
import os
import pandas as pd
from pandas import Series
try:
import requests
except ImportError:
requests = None
from . import find_pmag_dir
from . import data_model3 as data_model
from pmag_env import set_env
pmag_dir = find_pmag_dir.get_pmag_dir()
data_model_dir = os.path.join(pmag_dir, 'pmagpy', 'data_model')
# if using with py2app, the directory structure is flat,
# so check to see where the resource actually is
if not os.path.exists(data_model_dir):
data_model_dir = os.path.join(pmag_dir, 'data_model')
VOCAB = {}
class Vocabulary(object):
def __init__(self, dmodel=None):
global VOCAB
self.vocabularies = []
self.possible_vocabularies = []
self.all_codes = []
self.code_types = []
self.methods = []
self.age_methods = []
if len(VOCAB):
self.set_vocabularies()
else:
if isinstance(dmodel, data_model.DataModel):
self.data_model = dmodel
Vocabulary.dmodel = dmodel
else:
try:
self.data_model = Vocabulary.dmodel
except AttributeError:
Vocabulary.dmodel = data_model.DataModel()
self.data_model = Vocabulary.dmodel
self.get_all_vocabulary()
VOCAB['vocabularies'] = self.vocabularies
VOCAB['possible_vocabularies'] = self.possible_vocabularies
VOCAB['all_codes'] = self.all_codes
VOCAB['code_types'] = self.code_types
VOCAB['methods'] = self.methods
VOCAB['age_methods'] = self.age_methods
VOCAB['suggested'] = self.suggested
def set_vocabularies(self):
self.vocabularies = VOCAB['vocabularies']
self.possible_vocabularies = VOCAB['possible_vocabularies']
self.all_codes = VOCAB['all_codes']
self.code_types = VOCAB['code_types']
self.methods = VOCAB['methods']
self.age_methods = VOCAB['age_methods']
self.suggested = VOCAB['suggested']
## Get method codes
def get_json_online(self, url):
"""
Use requests module to json from Earthref.
If this fails or times out, return false.
Returns
---------
result : requests.models.Response, or [] if unsuccessful
"""
if not requests:
return False
try:
req = requests.get(url, timeout=3)
if not req.ok:
return []
return req
except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout):
return []
def get_meth_codes(self):
if len(VOCAB):
self.set_vocabularies()
return
raw_codes = []
# try to get meth codes online
if not set_env.OFFLINE:
try:
raw = self.get_json_online('https://www2.earthref.org/MagIC/method-codes.json')
raw_codes = pd.DataFrame(raw.json())
print('-I- Getting method codes from earthref.org')
except Exception as ex:
#print(ex, type(ex))
print("-I- Couldn't connect to earthref.org, using cached method codes")
# if you couldn't get them online, use the cache
if not len(raw_codes):
print("-I- Using cached method codes")
raw_codes = pd.io.json.read_json(os.path.join(data_model_dir, "method_codes.json"), encoding='utf-8-sig')
# parse codes
code_types = raw_codes.loc['label']
all_codes = []
for code_name in code_types.index:
if code_name == 'geoid':
continue
df = pd.DataFrame(raw_codes[code_name]['codes'])
# remake the dataframe with the code (i.e., 'SM_VAR') as the index
df.index = df['code']
del df['code']
# add a column with the code type (i.e., 'anisotropy_estimation')
df['dtype'] = code_name
little_series = df['definition']
big_series = Series()
if any(all_codes):
try: # retains pandas backwards compatibility
all_codes = pd.concat([all_codes, df], sort=True)
big_series = pd.concat([big_series, little_series], sort=True)
except TypeError:
all_codes = pd.concat([all_codes, df])
big_series = | pd.concat([big_series, little_series]) | pandas.concat |
import pytest
from constants import (
HISTONE_QC_FIELDS,
HISTONE_PEAK_FILES_QUERY,
EXPERIMENT_FIELDS_QUERY,
LIMIT_ALL_JSON,
REPORT_TYPES,
REPORT_TYPE_DETAILS
)
from general_qc_report import (
parse_json,
make_url,
get_data,
get_experiments_and_files,
build_rows_from_experiment,
build_rows_from_file,
get_dx_details_from_job_id,
get_job_id_from_file,
filter_related_files,
filter_related_experiments,
frip_in_output,
get_row_builder,
collapse_quality_metrics,
is_nonoverlapping,
process_qc,
build_url_from_accession,
calculate_read_depth,
contains_columns,
add_read_depth,
resolve_spikein_description
)
from mock import patch
@pytest.mark.parametrize(
'key, value', [
('F1', None),
('F2', None),
('Fp', None),
('Ft', 0.681145282904541),
('npeak_overlap', 94053),
('nreads', 30399406),
('nreads_in_peaks', 20706412),
('Ft', 0.681145282904541)
]
)
def test_parse_json(key, value, histone_qc):
parsed_qc = parse_json(histone_qc, HISTONE_QC_FIELDS)
assert parsed_qc[key] == value
def test_make_url(base_url):
assert make_url(base_url, HISTONE_PEAK_FILES_QUERY, '') == (
base_url + HISTONE_PEAK_FILES_QUERY
)
assert make_url(base_url, HISTONE_PEAK_FILES_QUERY) == (
base_url + HISTONE_PEAK_FILES_QUERY + LIMIT_ALL_JSON
)
assert make_url(base_url, HISTONE_PEAK_FILES_QUERY, [LIMIT_ALL_JSON]) == (
base_url + HISTONE_PEAK_FILES_QUERY + LIMIT_ALL_JSON
)
assert make_url(base_url, HISTONE_PEAK_FILES_QUERY, [EXPERIMENT_FIELDS_QUERY, LIMIT_ALL_JSON]) == (
base_url + HISTONE_PEAK_FILES_QUERY + EXPERIMENT_FIELDS_QUERY + LIMIT_ALL_JSON
)
@patch('common.encoded_get')
def test_get_data(mock_get, base_url, keypair):
mock_get.return_value = {'@graph': [{'test': 1}]}
results = get_data(base_url, keypair)
assert results[0]['test'] == 1
@patch('common.encoded_get')
def test_get_experiments_and_files(mock_get, base_url, keypair, test_args, file_query, experiment_query):
mock_get.side_effect = [file_query, experiment_query]
f, e = get_experiments_and_files(
base_url,
keypair,
test_args.report_type,
test_args.assembly
)
assert len(f) == len(e) == 2
@patch('dxpy.describe')
def test_get_dx_details_from_job_id(mock_dx, dx_describe, test_args):
mock_dx.return_value = dx_describe
dx_details = get_dx_details_from_job_id('123', skip_dnanexus=False)
assert dx_details.get('job_id') == '123'
assert 'frip' in dx_details.get('output')
@patch('dxpy.describe')
def test_get_dx_details_from_job_id_skip_dnanexus(mock_dx, dx_describe):
mock_dx.return_value = dx_describe
dx_details = get_dx_details_from_job_id('123', skip_dnanexus=True)
assert dx_details.get('job_id') == '123'
assert dx_details.get('output') == {}
assert dx_details.get('project') is None
def test_get_job_id_from_file(file_query):
job_id = get_job_id_from_file(file_query['@graph'][0])
assert job_id == 'job-123'
def test_frip_in_output(dx_describe):
output = dx_describe.pop('output', None)
assert frip_in_output(output) is True
def test_filter_related_files(experiment_query, file_query):
experiment_id = experiment_query['@graph'][0]['@id']
f = filter_related_files(experiment_id, file_query['@graph'])
assert len(f) == 1
assert f[0]['accession'] == 'ENCFF660DGD'
def test_filter_related_experiments(experiment_query, file_query):
dataset = file_query['@graph'][0]['dataset']
e = filter_related_experiments(dataset, experiment_query['@graph'])
assert len(e) == 1
assert e[0]['@id'] == '/experiments/ENCSR656SIB/'
@patch('dxpy.describe')
def test_build_rows(mock_dx, experiment_query, file_query,
references_query, test_args, base_url,
dx_describe):
mock_dx.return_value = dx_describe
rows = build_rows_from_experiment(
experiment_query['@graph'],
file_query['@graph'],
references_query['@graph'],
test_args.report_type,
base_url,
test_args
)
assert len(rows) == 2
@patch('dxpy.describe')
def test_build_rows_missing_file(mock_dx, experiment_query, file_query,
references_query, test_args, base_url,
dx_describe):
mock_dx.return_value = dx_describe
rows = build_rows_from_experiment(
experiment_query['@graph'],
file_query['@graph'][:1],
references_query['@graph'],
test_args.report_type,
base_url,
test_args
)
assert len(rows) == 1
@patch('dxpy.describe')
def test_build_rows_skip_multiple_qc(mock_dx, experiment_query, file_query,
references_query, histone_qc, test_args,
base_url, dx_describe):
mock_dx.return_value = dx_describe
file = file_query['@graph'][0]
file['quality_metrics'] = [histone_qc, histone_qc]
rows = build_rows_from_experiment(
experiment_query['@graph'],
[file],
references_query['@graph'],
test_args.report_type,
base_url,
test_args
)
assert len(rows) == 0
def test_report_type_constants():
assert 'histone_qc' in REPORT_TYPES
assert 'rna_replication' in REPORT_TYPES
def test_row_builder_returns_correct_function():
assert get_row_builder('rna_replication') is build_rows_from_experiment
assert get_row_builder('rna_mapping') is build_rows_from_file
def test_row_builder_raises_error():
with pytest.raises(KeyError):
get_row_builder('blah')
def test_collapse_quality_metrics():
assert collapse_quality_metrics([]) == {}
assert collapse_quality_metrics([{'a': 1}, {'b': 2}]) == {'a': 1, 'b': 2}
assert collapse_quality_metrics([{'a': 1}, {'a': 2}]) == {'a': 2}
@patch('constants.REPORT_TYPE_DETAILS')
def test_is_nonoverlapping(replaced_details):
REPORT_TYPE_DETAILS['rna_replication']['qc_fields'] = ['a', 'b', 'c']
replaced_details.return_value = REPORT_TYPE_DETAILS
is_nonoverlapping([{'a': 1}, {'b': 2}, {'c': 3}], 'rna_replication')
with pytest.raises(KeyError):
is_nonoverlapping({'a': 1}, 'rna_qc')
with pytest.raises(ValueError):
is_nonoverlapping([{'a': 1}, {'a': 2}, {'c': 3}], 'rna_replication')
def test_process_qc(base_url):
qc = process_qc(
base_url,
{
'@id': '/123/',
'attachment': {
'href': '@@download/abc'
}
},
'google_sheets'
)
qc1 = process_qc(base_url, {}, 'google_sheets')
assert qc['attachment'] == (
'=hyperlink("https://www.encodeproject.org//123/@@download/abc",'
' image("https://www.encodeproject.org//123/@@download/abc", 2))'
)
assert '@id' not in qc
assert qc1 == {}
def test_build_url_from_accession(base_url):
accession = 'ENC123'
url = build_url_from_accession(accession, base_url, 'tsv')
assert url == base_url + accession
link = build_url_from_accession(accession, base_url, 'google_sheets')
assert link == '=hyperlink("https://www.encodeproject.org/ENC123", "ENC123")'
def test_calculate_read_depth():
import pandas as pd
assert calculate_read_depth(20, 20) == 40
assert pd.isnull(calculate_read_depth('', 20))
def test_contains_columns(test_df):
assert contains_columns(test_df, ['a', 'b'])
assert not contains_columns(test_df, ['c'])
def test_add_read_depth(test_rna_mapping_df):
import pandas as pd
from pandas.util.testing import (
assert_frame_equal,
)
df = add_read_depth(test_rna_mapping_df)
assert 'read_depth' in df.columns
assert not | assert_frame_equal(test_rna_mapping_df, df) | pandas.util.testing.assert_frame_equal |
from os import listdir, sep
from os.path import isfile, join
import re
from bs4 import BeautifulSoup
# from DbManager import DatabaseManager
import json
from selenium import webdriver
# from SoccerMatch import SoccerMatch
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import time
import pandas as pd
import numpy as np
import sys
# url = 'https://www.oddsportal.com/soccer/ecuador/liga-pro/results/'
TIMEOUT = 100
import pandas as pd
from datetime import datetime as dt
from config import Config
import numpy as np
from tqdm import tqdm
class OddsParser(Config):
def __init__(self, Config, last_update=True):
super().__init__()
self.last_update=last_update
if self.last_update:
self.EVENT_PAGE = [
'https://www.oddsportal.com/soccer/england/premier-league/',
'https://www.oddsportal.com/soccer/germany/bundesliga/',
'https://www.oddsportal.com/soccer/france/ligue-1/',
'https://www.oddsportal.com/soccer/russia/premier-league/',
'https://www.oddsportal.com/soccer/spain/laliga/',
'https://www.oddsportal.com/soccer/italy/serie-a/',
]
else:
self.EVENT_PAGE = [
'https://www.oddsportal.com/soccer/england/premier-league/results/',
'https://www.oddsportal.com/soccer/england/premier-league/',
'https://www.oddsportal.com/soccer/germany/bundesliga/results/',
'https://www.oddsportal.com/soccer/germany/bundesliga/',
'https://www.oddsportal.com/soccer/france/ligue-1/results/',
'https://www.oddsportal.com/soccer/france/ligue-1/',
'https://www.oddsportal.com/soccer/russia/premier-league/results/',
'https://www.oddsportal.com/soccer/russia/premier-league/',
'https://www.oddsportal.com/soccer/spain/laliga/',
'https://www.oddsportal.com/soccer/spain/laliga/results/',
'https://www.oddsportal.com/soccer/italy/serie-a/',
'https://www.oddsportal.com/soccer/italy/serie-a/results/',
]
def get_all_odds_2018(self, df):
df_odds = pd.read_pickle('./Debug/parser_20182019_all.pkl')
df_odds = df_odds[df_odds['bet_tab']!='European Handicap']
# combined measures for concat understat
df_odds['Targets_oddsportal'] = np.where(
df_odds['bet_tab'].isin(['1X2','Double Chance'])
, df_odds['bet_tab'] + ' ' + df_odds['bet_type']
, np.where(
df_odds['bet_tab'].isin(['Over/Under', 'European Handicap'])
, df_odds['bet_tab'] + ' ' + df_odds['bet_type'] + ' ' + df_odds['bet_set']
, ''
)
)
df_odds = pd.merge(
df_odds,
pd.read_excel('./data/info/targets.xlsx'),
on = ['Targets_oddsportal']
)
df_odds = pd.merge(
df_odds,
| pd.read_excel('./data/info/v_teams.xlsx') | pandas.read_excel |
"""Linear electric grid models module."""
from multimethod import multimethod
import numpy as np
import pandas as pd
import pyomo.core
import pyomo.environ as pyo
import scipy.sparse
import scipy.sparse.linalg
import fledge.config
import fledge.electric_grid_models
import fledge.power_flow_solvers
import fledge.utils
logger = fledge.config.get_logger(__name__)
class LinearElectricGridModel(object):
"""Abstract linear electric model object, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
Note:
This abstract class only defines the expected variables of linear electric grid model objects,
but does not implement any functionality.
Attributes:
electric_grid_model (fledge.electric_grid_models.ElectricGridModel): Electric grid model object.
power_flow_solution (fledge.power_flow_solvers.PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (scipy.sparse.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (scipy.sparse.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (scipy.sparse.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (scipy.sparse.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (scipy.sparse.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (scipy.sparse.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (scipy.sparse.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (scipy.sparse.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_by_power_wye_active (scipy.sparse.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_by_power_wye_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_by_power_delta_active (scipy.sparse.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_by_power_delta_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_by_der_power_active (scipy.sparse.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_by_der_power_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_by_power_wye_active (scipy.sparse.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_by_power_wye_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_by_power_delta_active (scipy.sparse.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_by_power_delta_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_by_der_power_active (scipy.sparse.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_by_der_power_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (scipy.sparse.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (scipy.sparse.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (scipy.sparse.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (scipy.sparse.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (scipy.sparse.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (scipy.sparse.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (scipy.sparse.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
electric_grid_model: fledge.electric_grid_models.ElectricGridModel
power_flow_solution: fledge.power_flow_solvers.PowerFlowSolution
sensitivity_voltage_by_power_wye_active: scipy.sparse.spmatrix
sensitivity_voltage_by_power_wye_reactive: scipy.sparse.spmatrix
sensitivity_voltage_by_power_delta_active: scipy.sparse.spmatrix
sensitivity_voltage_by_power_delta_reactive: scipy.sparse.spmatrix
sensitivity_voltage_by_der_power_active: scipy.sparse.spmatrix
sensitivity_voltage_by_der_power_reactive: scipy.sparse.spmatrix
sensitivity_voltage_magnitude_by_power_wye_active: scipy.sparse.spmatrix
sensitivity_voltage_magnitude_by_power_wye_reactive: scipy.sparse.spmatrix
sensitivity_voltage_magnitude_by_power_delta_active: scipy.sparse.spmatrix
sensitivity_voltage_magnitude_by_power_delta_reactive: scipy.sparse.spmatrix
sensitivity_voltage_magnitude_by_der_power_active: scipy.sparse.spmatrix
sensitivity_voltage_magnitude_by_der_power_reactive: scipy.sparse.spmatrix
sensitivity_branch_power_1_by_power_wye_active: scipy.sparse.spmatrix
sensitivity_branch_power_1_by_power_wye_reactive: scipy.sparse.spmatrix
sensitivity_branch_power_1_by_power_delta_active: scipy.sparse.spmatrix
sensitivity_branch_power_1_by_power_delta_reactive: scipy.sparse.spmatrix
sensitivity_branch_power_1_by_der_power_active: scipy.sparse.spmatrix
sensitivity_branch_power_1_by_der_power_reactive: scipy.sparse.spmatrix
sensitivity_branch_power_2_by_power_wye_active: scipy.sparse.spmatrix
sensitivity_branch_power_2_by_power_wye_reactive: scipy.sparse.spmatrix
sensitivity_branch_power_2_by_power_delta_active: scipy.sparse.spmatrix
sensitivity_branch_power_2_by_power_delta_reactive: scipy.sparse.spmatrix
sensitivity_branch_power_2_by_der_power_active: scipy.sparse.spmatrix
sensitivity_branch_power_2_by_der_power_reactive: scipy.sparse.spmatrix
sensitivity_loss_active_by_power_wye_active: scipy.sparse.spmatrix
sensitivity_loss_active_by_power_wye_reactive: scipy.sparse.spmatrix
sensitivity_loss_active_by_power_delta_active: scipy.sparse.spmatrix
sensitivity_loss_active_by_power_delta_reactive: scipy.sparse.spmatrix
sensitivity_loss_active_by_der_power_active: scipy.sparse.spmatrix
sensitivity_loss_active_by_der_power_reactive: scipy.sparse.spmatrix
sensitivity_loss_reactive_by_power_wye_active: scipy.sparse.spmatrix
sensitivity_loss_reactive_by_power_wye_reactive: scipy.sparse.spmatrix
sensitivity_loss_reactive_by_power_delta_active: scipy.sparse.spmatrix
sensitivity_loss_reactive_by_power_delta_reactive: scipy.sparse.spmatrix
sensitivity_loss_reactive_by_der_power_active: scipy.sparse.spmatrix
sensitivity_loss_reactive_by_der_power_reactive: scipy.sparse.spmatrix
def define_optimization_variables(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
timesteps=pd.Index([0], name='timestep')
):
"""Define decision variables for given `optimization_problem`."""
# DERs.
optimization_problem.der_active_power_vector_change = (
pyo.Var(timesteps.to_list(), self.electric_grid_model.ders.to_list())
)
optimization_problem.der_reactive_power_vector_change = (
pyo.Var(timesteps.to_list(), self.electric_grid_model.ders.to_list())
)
# Voltage.
optimization_problem.voltage_magnitude_vector_change = (
pyo.Var(timesteps.to_list(), self.electric_grid_model.nodes.to_list())
)
# Branch flows.
optimization_problem.branch_power_vector_1_squared_change = (
pyo.Var(timesteps.to_list(), self.electric_grid_model.branches.to_list())
)
optimization_problem.branch_power_vector_2_squared_change = (
pyo.Var(timesteps.to_list(), self.electric_grid_model.branches.to_list())
)
# Loss.
optimization_problem.loss_active_change = pyo.Var(timesteps.to_list())
optimization_problem.loss_reactive_change = pyo.Var(timesteps.to_list())
def define_optimization_constraints(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
timesteps=pd.Index([0], name='timestep')
):
"""Define constraints to express the linear electric grid model equations for given `optimization_problem`."""
# Instantiate constraint list.
optimization_problem.linear_electric_grid_model_constraints = pyo.ConstraintList()
for timestep in timesteps:
# Voltage.
for node_index, node in enumerate(self.electric_grid_model.nodes):
optimization_problem.linear_electric_grid_model_constraints.add(
optimization_problem.voltage_magnitude_vector_change[timestep, node]
==
sum(
self.sensitivity_voltage_magnitude_by_der_power_active[node_index, der_index]
* optimization_problem.der_active_power_vector_change[timestep, der]
+ self.sensitivity_voltage_magnitude_by_der_power_reactive[node_index, der_index]
* optimization_problem.der_reactive_power_vector_change[timestep, der]
for der_index, der in enumerate(self.electric_grid_model.ders)
)
)
# Branch flows.
for branch_index, branch in enumerate(self.electric_grid_model.branches):
optimization_problem.linear_electric_grid_model_constraints.add(
optimization_problem.branch_power_vector_1_squared_change[timestep, branch]
==
sum(
self.sensitivity_branch_power_1_by_der_power_active[branch_index, der_index]
* optimization_problem.der_active_power_vector_change[timestep, der]
+ self.sensitivity_branch_power_1_by_der_power_reactive[branch_index, der_index]
* optimization_problem.der_reactive_power_vector_change[timestep, der]
for der_index, der in enumerate(self.electric_grid_model.ders)
)
)
optimization_problem.linear_electric_grid_model_constraints.add(
optimization_problem.branch_power_vector_2_squared_change[timestep, branch]
==
sum(
self.sensitivity_branch_power_2_by_der_power_active[branch_index, der_index]
* optimization_problem.der_active_power_vector_change[timestep, der]
+ self.sensitivity_branch_power_2_by_der_power_reactive[branch_index, der_index]
* optimization_problem.der_reactive_power_vector_change[timestep, der]
for der_index, der in enumerate(self.electric_grid_model.ders)
)
)
# Loss.
optimization_problem.linear_electric_grid_model_constraints.add(
optimization_problem.loss_active_change[timestep]
==
sum(
self.sensitivity_loss_active_by_der_power_active[0, der_index]
* optimization_problem.der_active_power_vector_change[timestep, der]
+ self.sensitivity_loss_active_by_der_power_reactive[0, der_index]
* optimization_problem.der_reactive_power_vector_change[timestep, der]
for der_index, der in enumerate(self.electric_grid_model.ders)
)
)
optimization_problem.linear_electric_grid_model_constraints.add(
optimization_problem.loss_reactive_change[timestep]
==
sum(
self.sensitivity_loss_reactive_by_der_power_active[0, der_index]
* optimization_problem.der_active_power_vector_change[timestep, der]
+ self.sensitivity_loss_reactive_by_der_power_reactive[0, der_index]
* optimization_problem.der_reactive_power_vector_change[timestep, der]
for der_index, der in enumerate(self.electric_grid_model.ders)
)
)
def get_optimization_results(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
power_flow_solution: fledge.power_flow_solvers.PowerFlowSolution = None,
timesteps=pd.Index([0], name='timestep'),
in_per_unit=False,
with_mean=False,
):
# Instantiate results variables.
# DER.
der_active_power_vector = (
pd.DataFrame(columns=self.electric_grid_model.ders, index=timesteps, dtype=np.float)
)
der_reactive_power_vector = (
pd.DataFrame(columns=self.electric_grid_model.ders, index=timesteps, dtype=np.float)
)
# Voltage.
voltage_magnitude_vector = (
| pd.DataFrame(columns=self.electric_grid_model.nodes, index=timesteps, dtype=np.float) | pandas.DataFrame |
from __future__ import division
import numpy as np
import os.path
import sys
import pandas as pd
from base.uber_model import UberModel, ModelSharedInputs
from .therps_functions import TherpsFunctions
import time
from functools import wraps
def timefn(fn):
@wraps(fn)
def measure_time(*args, **kwargs):
t1 = time.time()
result = fn(*args, **kwargs)
t2 = time.time()
print("therps_model_rest.py@timefn: " + fn.func_name + " took " + "{:.6f}".format(t2 - t1) + " seconds")
return result
return measure_time
class TherpsInputs(ModelSharedInputs):
"""
Input class for Therps.
"""
def __init__(self):
"""Class representing the inputs for Therps"""
super(TherpsInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
"""
Therps constructor.
:param chem_name:
:param use:
:param formu_name:
:param percent_act_ing:
:param foliar_diss_hlife:
:param num_apps:
:param app_interval:
:param application_rate:
:param ld50_bird:
:param lc50_bird:
:param noaec_bird:
:param noael_bird:
:param species_of_the_tested_bird_avian_ld50:
:param species_of_the_tested_bird_avian_lc50:
:param species_of_the_tested_bird_avian_noaec:
:param species_of_the_tested_bird_avian_noael:
:param tw_bird_ld50:
:param tw_bird_lc50:
:param tw_bird_noaec:
:param tw_bird_noael:
:param mineau_sca_fact:
:param aw_herp_sm:
:param aw_herp_md:
:param aw_herp_slg:
:param awc_herp_sm:
:param awc_herp_md:
:param awc_herp_lg:
:param bw_frog_prey_mamm:
:param bw_frog_prey_herp:
:return:
"""
self.use = pd.Series([], dtype="object", name="use")
self.formu_name = pd.Series([], dtype="object", name="formu_name")
self.percent_act_ing = pd.Series([], dtype="float", name="percent_act_ing")
self.foliar_diss_hlife = pd.Series([], dtype="float64", name="foliar_diss_hlife")
self.num_apps = pd.Series([], dtype="int64", name="num_apps")
self.app_interval = pd.Series([], dtype="int", name="app_interval")
self.application_rate = pd.Series([], dtype="float", name="application_rate")
self.ld50_bird = pd.Series([], dtype="float", name="ld50_bird")
self.lc50_bird = pd.Series([], dtype="float", name="lc50_bird")
self.noaec_bird = pd.Series([], dtype="float", name="noaec_bird")
self.noael_bird = | pd.Series([], dtype="float", name="noael_bird") | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 21:01:59 2019
@author: innerm
"""
import json
import pandas as pd
file_en='stage41.csv'
file_ru='stage42.csv'
df=pd.DataFrame()
df1=pd.read_csv(file_en)
df2= | pd.read_csv(file_ru) | pandas.read_csv |
import pandas as pd
from fbprophet import Prophet
import matplotlib.pyplot as plt
from covid_data import country_list, feature_list, PANDAMIC_FORCAST_DIR_PATH, VACCINATION_WITH_PANDEMIC, DATA_FOR_LSP_PATH
def get_calculated_data(country: str, col_name: str):
filename = country + '.csv'
file_pandemic_bef_vacc = open(
'./covid_data/cache/pandemic_before/'+filename, 'r', encoding='utf-8')
df = pd.read_csv(file_pandemic_bef_vacc)
df['date'] = df['date'].apply(pd.to_datetime)
df.columns = df.columns.map(lambda x: "ds" if x == 'date' else (
"y" if x == col_name else x))
file_pandemic_aft_vacc = open(
'./covid_data/cache/pandemic_after/'+filename, 'r', encoding='utf-8')
df2 = pd.read_csv(file_pandemic_aft_vacc)
df2['date'] = df2['date'].apply(pd.to_datetime)
df2.columns = df2.columns.map(lambda x: "ds" if x == 'date' else (
"y" if x == col_name else x))
last_date_bef_vacc = df['ds'].max()
last_date_aft_vacc = df2['ds'].max()
periods = (last_date_aft_vacc - last_date_bef_vacc).days
prophet = Prophet()
prophet.fit(df)
future = prophet.make_future_dataframe(
periods=periods, include_history=False)
forecast = prophet.predict(future)
df3 = forecast[['ds', 'yhat']]
df3 = df3[df3['ds'] > last_date_bef_vacc]
return df, df2, df3
def draw_curve(country: str, col_name: str, description: str):
df, df2, df3 = get_calculated_data(country, col_name)
plt.figure(figsize=(12, 6))
plt.plot('ds', 'y', data=df, color='black',
label=f'{description} Before Vaccination')
plt.plot('ds', 'y', data=df2, color='blue',
label=f'Actual {description} After Vaccination')
plt.plot('ds', 'yhat', data=df3, color='orange',
label=f'Forecasted {description} After Vaccination')
plt.legend()
plt.show()
def draw_forecast():
for country in country_list:
draw_curve(country, 'cumulative_total_deaths',
'Cumulative Total Deaths')
draw_curve(country, 'active_cases', 'Active Cases')
draw_curve(country, 'cumulative_total_cases', 'Cumulative Total Cases')
draw_curve(country, 'daily_new_cases', 'Daily New Cases')
draw_curve(country, 'daily_new_deaths', 'Daily New Deaths')
def generate_forecast_data():
for country in country_list:
for idx, feature in enumerate(feature_list):
_, _, df = get_calculated_data(country, feature)
df.columns = df.columns.map(
lambda x: "date" if x == 'ds' else (feature if x == 'yhat' else x))
df_merge = df if idx == 0 else pd.merge(
left=df_merge, right=df, left_on='date', right_on='date')
df_merge.to_csv(PANDAMIC_FORCAST_DIR_PATH+country+'.csv', index=False)
def round_forecast():
for country in country_list:
df_file = open(
PANDAMIC_FORCAST_DIR_PATH+country+'.csv', 'r', encoding='utf-8')
df = pd.read_csv(df_file)
for feature in feature_list:
df[feature] = df[feature].map(lambda x: round(float(x)))
df.to_csv(PANDAMIC_FORCAST_DIR_PATH+country+'.csv', index=False)
def set_date():
for country in country_list:
vacc_with_pan_file = open(
VACCINATION_WITH_PANDEMIC+country+'.csv', 'r', encoding='utf-8')
df_vacc_with_pan = | pd.read_csv(vacc_with_pan_file) | pandas.read_csv |
import logging as log
from datetime import datetime as dt
from multiprocessing import Pool
import gym
import numpy as np
import pandas as pd
from numpy.random import RandomState
class RandomPolicy(object):
def __init__(self, possible_actions=range(6), random_seed=0):
"""
A policy that will take random actions actions.
Arguments:
----------
possible_actions: list
A list of possible actions to sample from.
random_seed: int
The seed for the random number generator
"""
self.random_state = RandomState(random_seed)
self.possible_actions = possible_actions
def get_action(self, observation, reward):
"""
sample a random action.
Arguments of this function are not used but exist to allow
easy switching to a policy that makes use of the obs.
Arguments:
----------
observation: Numpy Array
As returned by gym's env.step(...)
reward: float
As returned by gym's env.step(...)
Returns:
--------
action:
One one possible_actions, uniformly sampled.
"""
randint = self.random_state.randint
action_id = randint(0, len(self.possible_actions))
action = self.possible_actions[action_id]
return action
class KerasModelPolicy(object):
def __init__(self,
possible_actions=range(6),
random_seed=0,
model=None,
probabilistic_mode=False):
"""
A policy that will use a Keras Model.
The observations will be given to the Keras Model.
The output of the model should match the shape of
possible actions.
Arguments:
----------
possible_actions: list
A list of possible actions to sample from.
random_seed: int
The seed for the random number generator
model: Keras model
The acutal model to translate observations
into actions. Is expected to take a 4d
array as (num, y, x, channel) to same shape
as possible_actions.
probabilistic_mode: bool
If true will interpret the output of the
model as probabilities of chosing the actions.
The next action will be sampled accordingly.
If false will choose the action with the highest
value.
"""
self.random_state = RandomState(random_seed)
self.possible_actions = possible_actions
self.model = model
self.probabilistic_mode = probabilistic_mode
self.black_bound_shape = model.input_shape[1:3]
def get_action(self, observation, reward):
"""
Compute the next action by using the keras model.
Arguments:
----------
observation: Numpy Array
As returned by gym's env.step(...)
reward: float
As returned by gym's env.step(...)
Returns:
--------
action: ?
Selected action of self.possible_actions
"""
# Process observations to match model input
create_black_boundary = ExtractWorker.create_black_boundary
observation = create_black_boundary([observation],
self.black_bound_shape)[0]
observation = np.expand_dims(observation, axis=0)
# Query the model for an action
model_output = self.model.predict(observation)[0]
# Choose the action that has the highest output value
if not self.probabilistic_mode:
action_id = model_output.argmax()
action = self.possible_actions[action_id]
# Select next action by sampling according to the
# probabilities predicted by the model.
else:
# Normalize to one, just in case.
probabs = model_output / model_output.sum()
choice = self.random_state.choice
action = choice(self.possible_actions, p=probabs)
return action
class ExtractWorker(object):
def __init__(self, env_name='SpaceInvaders-v4', custom_frame_skip_length=1,
observation_callback=None):
"""
The worker process that derives episodes from gym environments
Arguments:
----------
env_name: String
The name of the environment to use. As expected by gym.make
use_custom_frame_skip: bool
Applies self.custom_frame_skip function to custom_frame_skip_length
outputs of env.step while repeating the action. No effect for
custom_frame_skip_length = 1.
observation_callback: None or fuction
If not None: Will be called as observation_callback(observation)
directly after env.step returns the observations. Will be applied
before custom_frame_skip functions gets into action.
"""
self.env = gym.make(env_name)
self.custom_frame_skip_length = custom_frame_skip_length
self.observation_callback = observation_callback
@staticmethod
def custom_frame_skip(observations, dones, rewards, infos):
"""
Improves visibility of laser beams.
"""
observation = np.max(np.stack(observations, axis=0), axis=0)
done = max(dones)
reward = sum(rewards)
info = infos[-1]
return observation, reward, done, info
def extract_episode(self, policy, max_steps=-1):
"""
Extract one episode of the environment and return it.
Arguments:
----------
policy: object
Will be called as policy.get_action(observation, reward) to derivce
the action for the next steps.
max_step: int or None
If int the maximum number of steps one episode should contain,
including the initial state before the game starts.
If -1 the episode will be run until it terminates i.e.
env.step returns done=True
Returns:
--------
observations: list
list of objects returned at each call of env.step
actions: list
list of objects returned at each call of env.step
rewards: list
list of objects returned at each call of env.step
infos: list
list of objects returned at each call of env.step
"""
observations = []
actions = []
rewards = []
infos = []
current_step = 0
# This is the observation before the episode starts.
observation = self.env.reset()
# Initialise to None to make clear that this is no output
# of gym.env or anything we computed (w.r.t. the action)
reward = None
action = None
info = None
# To let the first step run trough.
done = False
while True:
# Store the latest env output in the prepared lists.
observations.append(observation)
actions.append(action)
rewards.append(reward)
infos.append(info)
# Abort if the episode terminated or max_steps
# has been reached
current_step += 1
if done or current_step == max_steps:
break
action = policy.get_action(observation=observation, reward=reward)
if self.custom_frame_skip_length <= 1:
observation, reward, done, info = self.env.step(action)
if self.observation_callback is not None:
observation = self.observation_callback(observation)
continue
observations_step = []
rewards_steps = []
dones_stes = []
infos_steps = []
for i in range(self.custom_frame_skip_length):
observation, reward, done, info = self.env.step(action)
if self.observation_callback is not None:
observation = self.observation_callback(observation)
observations_step.append(observation)
rewards_steps.append(reward)
dones_stes.append(done)
infos_steps.append(info)
if done:
break
ordi = self.custom_frame_skip(observations=observations_step,
rewards=rewards_steps,
dones=dones_stes,
infos=infos_steps)
observation, reward, done, info = ordi
return observations, actions, rewards, infos
def extract_episode_statistics(self,
policy_class,
n_episodes,
start_seed=0,
max_steps=-1,
policy_kw_args=None,
return_observations=False):
"""
Compute episode information for many episodes
E.g. return, number of frames until don,
policy_kw_args={}e
Arguements:
-----------
policy_class: object
The class of the policy. Will be initiated with a
seed that is the episode number, counted from
start_seed to n_episodes+start_seed.
n_episodes: int
How many episodes shall be produced
start_seed: int
The first seed, see also policy_class.
policy: object
Will be called as policy.get_action(observation, reward) to derivce
the action for the next steps.
max_step: int or None
If int the maximum number of steps one episode should contain,
including the initial state before the game starts.
If -1 the episode will be run until it terminates i.e.
env.step returns done=True
policy_kw_args: dict or None
Additional keyword arguments passed to policy_class init.
return_observations: bool
if True also returns the observations of the episodes
Returns:
--------
episodes_df: Pandas Dataframe
with seed as index and columns: [number of
steps until done or max_steps, total reward]
all_observations: dict
Only if return_observations.
Keys are the seeds, values the lists of observed arrays.
"""
start_time = dt.utcnow()
number_of_steps = []
total_reward = []
seeds = []
if policy_kw_args is None:
policy_kw_args = {}
if return_observations:
all_observations = {}
for random_seed in range(start_seed, n_episodes + start_seed):
policy = policy_class(random_seed=random_seed, **policy_kw_args)
episode_data = self.extract_episode(policy=policy,
max_steps=max_steps)
observations, actions, rewards, infos = episode_data
if return_observations:
all_observations[random_seed] = observations
number_of_steps.append(len(observations))
total_reward.append(sum(rewards[1:]))
seeds.append(random_seed)
df_data = {'total_reward': total_reward,
'number_of_steps': number_of_steps}
episode_df = | pd.DataFrame(index=seeds, data=df_data) | pandas.DataFrame |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import sklearn as sk
import matplotlib.pyplot as plt
import gc
train = pd.read_csv("train.csv",parse_dates=["activation_date"])
test = pd.read_csv("test.csv",parse_dates=["activation_date"])
y_psudo_labels = train["deal_probability"] > 0
ytrain = train["deal_probability"].values
aggregated_features = pd.read_csv("aggregated_features.csv")
lda_features = | pd.read_csv("lda_features.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sbn
from datetime import date
import scipy.stats as stats
import math
from clean import clean_df
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, train_test_split
from sklearn.metrics import roc_auc_score, roc_curve, auc, accuracy_score,f1_score, accuracy_score, precision_score, recall_score, classification_report
from sklearn.tree import DecisionTreeClassifier
import pickle
def optimize_model2_randomCV(model, grid_params, X_train, y_train, scoring):
"""[Takes a model in, grid parameters, X and y values returns best model found using the input parameters]
Args:
model ([Classifcation model sklearn]): [Logistic Regression, Random Forrest, Gradient Boost, and others]
grid_params ([dictionary]): [keys are strings of parater, values are list of values to try]
X_train ([Pandas dataframe]): [training feature data]
y_train ([numpy array]): [array of target values]
scoring ([scoring type to measure]): [sklearn scoring options for given model]
Returns:
[type]: [description]
"""
model_search = RandomizedSearchCV(model
,grid_params
,n_jobs=-1
,verbose=False
,scoring=scoring)
model_search.fit(X_train, y_train)
print(f"Best Parameters for {model}: {model_search.best_params_}")
print(f"Best Model for {model}: {model_search.best_estimator_}")
print(f"Best Score for {model}: {model_search.best_score_:.4f}")
return model_search.best_estimator_
def best_model_predictor(model, X_test, y_test):
"""[returns Analysis of model on test set or valudation set]
Args:
model ([sklearn classifer model]): [Logistic regression, Random Forrest, Gradient Boosting, etc]
X_test ([Pandas dataframe]): [Test feature data]
y_test ([numpy array]): [target valudation data]
"""
y_hats = model.predict(X_test)
print(f"{model} ROC Score = {roc_auc_score(y_test, y_hats):.3f}")
print(f"{model} F1 Score = {f1_score(y_test, y_hats):.3f}")
print(f"{model} Accuracy Score = {accuracy_score(y_test, y_hats):.3f}")
print(classification_report(y_test, y_hats))
def roc_curve_grapher(model, X_test ,y_test):
"""[Makes ROC curve graph given model and data]
Args:
model ([SKlearn classifer model]): [Logistic regression, Random Forrest, Gradient Boosting, etc]]
X_test ([Pandas dataframe]): [Test feature data]
y_test ([numpy array]): [target valudation data]
"""
yhat = model.predict_proba(X_test)
yhat = yhat[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, yhat)
plt.plot([0,1], [0,1], linestyle='--', label='Random guess')
plt.plot(fpr, tpr, marker='.', label=f'Model')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.suptitle('Model ROC curve', fontsize=20)
plt.legend()
plt.show()
def numerical_df_maker(df):
"""[Turns airBnB cleaned dataframe from clean_df into numerial for modeling]
Args:
df ([pandas dataframe]): [Pandas Dataframe cleaned from clean_df function]
Returns:
[dataframe]: [df numerical values for modeling]
"""
# df5 = combined_df3.copy()
col_list = ['id_guest_anon', 'id_host_anon', 'id_listing_anon','contact_channel_first','length_of_stay',
'ts_interaction_first', 'ts_reply_at_first', 'ts_accepted_at_first','ds_checkin_first', 'ds_checkout_first','id_user_anon',
'country','booked', 'date_interaction_first', 'response_time','listing_neighborhood']
df.drop(col_list, axis = 1,inplace= True)
d2 = {
'past_booker':1
,'new':0}
df['guest_user_stage_first'].replace(d2, inplace= True)
df = pd.get_dummies(df, prefix=['A'], columns=['room_type'])
return df
if __name__ == "__main__":
listings_df = | pd.read_csv('~/Downloads/2018 DA Take Home Challenge/listings.csv') | pandas.read_csv |
import pyproj
import numpy as np
import pandas as pd
def find_closest_node(G, point):
"""find the closest node on the graph from a given point"""
distance = np.full((len(G.nodes)), fill_value=np.nan)
for ii, n in enumerate(G.nodes):
distance[ii] = point.distance(G.nodes[n]['geometry'])
name_node = list(G.nodes)[np.argmin(distance)]
distance_node = np.min(distance)
return name_node, distance_node
class EnergyCalculation:
"""
Add information on energy use and effects on energy use.
"""
def __init__(self, FG, vessel, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.FG = FG
self.vessel = vessel
self.energy_use = {"time_start": [],
"time_stop": [],
"edge_start": [],
"edge_stop": [],
"total_energy": [],
"total_emission_CO2": [],
"total_emission_PM10": [],
"total_emission_NOX": [],
"stationary": [],
"water depth": [],
"distance": [],
"delta_t": []}
self.co2_footprint = {"total_footprint": 0, "stationary": 0}
self.mki_footprint = {"total_footprint": 0, "stationary": 0}
def calculate_energy_consumption(self):
"""Calculation of energy consumption based on total time in system and properties"""
def calculate_distance(geom_start, geom_stop):
"""method to calculate the distance in meters between two geometries"""
wgs84 = pyproj.Geod(ellps='WGS84')
# distance between two points
return float(wgs84.inv(geom_start.x, geom_start.y,
geom_stop.x, geom_stop.y) [2])
def calculate_depth(geom_start, geom_stop):
"""method to calculate the depth of the waterway in meters between two geometries"""
depth = 0
#The node on the graph of vaarweginformatie.nl closest to geom_start and geom_stop
node_start = find_closest_node(self.FG, geom_start)[0]
node_stop = find_closest_node(self.FG, geom_stop)[0]
#Read from the FG data from vaarweginformatie.nl the General depth of each edge
try:#if node_start != node_stop:
depth = self.FG.get_edge_data(node_start, node_stop)["Info"]["GeneralDepth"]
except:
depth = np.nan #When there is no data of the depth available of this edge, it gives a message
h = depth
# depth of waterway between two points
return h
# log messages that are related to locking
stationary_phase_indicator = [
"Waiting to enter waiting area stop",
"Waiting in waiting area stop",
"Waiting in line-up area stop",
"Passing lock stop",
]
# extract relevant elements from the vessel log
times = self.vessel.log["Timestamp"]
messages = self.vessel.log["Message"]
geometries = self.vessel.log["Geometry"]
# now walk past each logged event (each 'time interval' in the log corresponds to an event)
for i in range(len(times) - 1):
# determine the time associated with the logged event (how long did it last)
delta_t = (times[i + 1] - times[i]).total_seconds()
if delta_t != 0:
# append time information to the variables for the dataframe
self.energy_use["time_start"].append(times[i])
self.energy_use["time_stop"].append(times[i + 1])
# append geometry information to the variables for the dataframe
self.energy_use["edge_start"].append(geometries[i])
self.energy_use["edge_stop"].append(geometries[i + 1])
# calculate the distance travelled and the associated velocity
distance = calculate_distance(geometries[i], geometries[i + 1])
V_0 = distance / delta_t
self.energy_use["distance"].append(distance)
# calculate the delta t
self.energy_use["delta_t"].append(delta_t)
# calculate the water depth
h = calculate_depth(geometries[i], geometries[i + 1])
# printstatements to check the output (can be removed later)
print('delta_t: {:.4f} s'. format(delta_t))
print('distance: {:.4f} m'. format(distance))
print('velocity: {:.4f} m/s'. format(V_0))
# we use the calculated velocity to determine the resistance and power required
self.vessel.calculate_total_resistance(V_0, h)
self.vessel.calculate_total_power_required()
self.vessel.calculate_emission_factors_total()
if messages[i + 1] in stationary_phase_indicator: # if we are in a stationary stage only log P_hotel
#Energy consumed per time step delta_t in the stationary stage
energy_delta = self.vessel.P_hotel * delta_t / 3600 # kJ/3600 = kWh
#Emissions CO2, PM10 and NOX, in gram - emitted in the stationary stage per time step delta_t, consuming 'energy_delta' kWh
emission_delta_CO2 = self.vessel.Emf_CO2 * energy_delta # in g
emission_delta_PM10 = self.vessel.Emf_PM10 * energy_delta # in g
emission_delta_NOX = self.vessel.Emf_NOX * energy_delta # in g
self.energy_use["total_energy"].append(energy_delta)
self.energy_use["stationary"].append(energy_delta)
self.energy_use["total_emission_CO2"].append(emission_delta_CO2)
self.energy_use["total_emission_PM10"].append(emission_delta_PM10)
self.energy_use["total_emission_NOX"].append(emission_delta_NOX)
if not np.isnan(h):
self.energy_use["water depth"].append(h)
else:
self.energy_use["water depth"].append(self.energy_use["water depth"].iloc[i])
else: # otherwise log P_tot
#Energy consumed per time step delta_t in the propulsion stage
energy_delta = self.vessel.P_tot * delta_t / 3600 # kJ/3600 = kWh
#Emissions CO2, PM10 and NOX, in gram - emitted in the propulsion stage per time step delta_t, consuming 'energy_delta' kWh
emission_delta_CO2 = self.vessel.Emf_CO2 * energy_delta #Energy consumed per time step delta_t in the stationary phase # in g
emission_delta_PM10 = self.vessel.Emf_PM10 * energy_delta # in g
emission_delta_NOX = self.vessel.Emf_NOX * energy_delta # in g
self.energy_use["total_energy"].append(energy_delta)
self.energy_use["stationary"].append(0)
self.energy_use["total_emission_CO2"].append(emission_delta_CO2)
self.energy_use["total_emission_PM10"].append(emission_delta_PM10)
self.energy_use["total_emission_NOX"].append(emission_delta_NOX)
self.energy_use["water depth"].append(h)
#self.energy_use["water depth info from vaarweginformatie.nl"].append(depth)
# TODO: er moet hier een heel aantal dingen beter worden ingevuld
# - de kruissnelheid is nu nog per default 1 m/s (zie de Movable mixin). Eigenlijk moet in de
# vessel database ook nog een speed_loaded en een speed_unloaded worden toegevoegd.
# - er zou nog eens goed gekeken moeten worden wat er gedaan kan worden rond kustwerken
# - en er is nog iets mis met de snelheid rond een sluis
def plot(self):
import folium
df = | pd.DataFrame.from_dict(self.energy_use) | pandas.DataFrame.from_dict |
import torch
import numpy as np
import pandas as pd
import os
import sys
from torchsummary import summary
import torch.nn as nn
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
from matplotlib import cm
import seaborn as sns
sns.set(
font_scale=1.5,
style="whitegrid",
rc={
'text.usetex' : False,
'lines.linewidth': 2
}
)
# sns.set_theme()
# sns.set_style('whitegrid')
import glob
import copy
import math
import models
import random
import torch.optim
import torch
import argparse
import utils
from sklearn.linear_model import LogisticRegression
try:
from tqdm import tqdm
except:
def tqdm(x): return x
"""
Plot the data contained in quant (keys: the name of the experiments), agains the reference (contained in stats_ref)
dirname: the output directory name
"""
def process_df(quant, dirname, stats_ref=None, args=None, args_model=None, save=True, split=False):
global table_format
col_names = ["experiment", "stat", "set", "layer"]
quant = utils.assert_col_order(quant, col_names, id_vars="var")
keys = list(quant.columns.levels[0].sort_values())
output_root = os.path.join(dirname, f"merge_" + "_".join(keys))
os.makedirs(output_root, exist_ok=True)
idx = pd.IndexSlice
cols_error = idx[:, 'error', :, :]
N_L = len(quant.columns.unique(level="layer")) # number of hidden layers
# errors = quant["error"]
# losses = quant["loss"]
quant.drop("val", axis=1,level="set", inplace=True, errors='ignore')
quant.drop(("test", "loss"), axis=1, inplace=True, errors='ignore')
if save:
quant.to_csv(os.path.join(output_root, 'merge.csv'))
if stats_ref is not None:
stats_ref.to_csv(os.path.join(output_root, 'stats_ref.csv'))
quant.sort_index(axis=1, inplace=True)
quant.loc[:, cols_error] *= 100 # in %
quant.groupby(level=["experiment", "stat", "set"], axis=1, group_keys=False).describe().to_csv(os.path.join(output_root, 'describe.csv'))
quant_ref = None
Ts = { -1: 0, 0: 0, 1: 12.71, 2: 4.303, 3: 3.182, 4: 2.776, 9: 2.262}
# quant.where(quant != 0, 6.1*10**(-5), inplace=True)
if args.yscale == "log":
quant_log = np.log10(quant)
# quant_log.loc[:, Idx['B', "loss", :, 10]]
if stats_ref is not None: # the reference to plot against
N_S = len(stats_ref.columns)
quant_ref_merge = pd.DataFrame()
stats_ref.loc[:, "error"] = stats_ref["error"].values * 100
if "layer" in stats_ref.columns.names:
stats_ref.columns = stats_ref.columns.droplevel('layer')
# confidence intervals for the reference loss
quant_ref = stats_ref.agg(['mean', 'count', 'std'])
quant_ref.loc['se'] = quant_ref.loc['std'] / np.sqrt(quant_ref.loc['count']) # standard error
quant_ref.loc['ci95'] = [ Ts[n-1] * se for (n, se) in zip(quant_ref.loc['count'], quant_ref.loc['se']) ] # 95% CI
if args.yscale == "log":
quant_ref_log = np.log10(stats_ref).agg(['mean', 'count', 'std'])
quant_ref_log.loc['se'] = quant_ref_log.loc['std'] / np.sqrt(quant_ref_log.loc['count'])
quant_ref_log.loc['ci95'] = [ Ts[n-1] * se for (n, se) in zip(quant_ref_log.loc['count'], quant_ref_log.loc['se']) ] # 95% CI
# if args_model is not None:
# else:
xlabels=[str(i) for i in range(N_L)]
logstr = "_log" if args.yscale == "log" else ""
has_ref = quant_ref is not None
# if len(keys) <= 2:
palette=sns.color_palette(n_colors=len(keys))
if not split:
fig, axes = plt.subplots(2, 1, figsize=(4, 8), sharex=False)
# sns.set(font_scale=1,rc={"lines.linewidth":3})
k = 0
# the confidence intervals
df_ci = quant.describe()
df_ci.loc["ymax", :] = [mean + Ts[int(n-1)] / np.sqrt(n) * std for (mean, std, n) in zip(df_ci.loc["mean", :], df_ci.loc["std", :], df_ci.loc["count", :])]
df_ci.loc["ymin", :] = [mean - Ts[int(n-1)] / np.sqrt(n) * std for (mean, std, n) in zip(df_ci.loc["mean", :], df_ci.loc["std", :], df_ci.loc["count", :])]
#confidence intervals for the log plot
if args.yscale == "log":
df_ci_log = quant_log.describe()
df_ci_log.loc["ymax", :] = [mean + Ts[int(n-1)] / np.sqrt(n) * std for (mean, std, n) in zip(df_ci_log.loc["mean", :], df_ci_log.loc["std", :], df_ci_log.loc["count", :])]
df_ci_log.loc["ymin", :] = [mean - Ts[int(n-1)] / np.sqrt(n) * std for (mean, std, n) in zip(df_ci_log.loc["mean", :], df_ci_log.loc["std", :], df_ci_log.loc["count", :])]
#rp.set_axis_labels("layer", "Loss", labelpad=10)
#quant.loc[1, Idx["loss", :, 0]].lineplot(x="layer_ids", y="value", hue="")
for i, stat in enumerate(["loss","error" ]):
for j, setn in enumerate(["train","test"]):
if stat == "loss" and setn=="test":
continue
if stat == "error" and setn=="train":
continue
# axes[k] = rp.axes[j,i]
log_plot = args.yscale == "log" and setn == "train"
if split:
fig, ax = plt.subplots(1, 1, figsize=(4, 4), sharex=False)
else:
ax = axes.flatten()[k]
if log_plot:
df_plot = quant_log.loc[:, Idx[:, stat, setn, :]]
df_ci_plot = df_ci_log
else:
df_plot = quant.loc[:, Idx[:, stat, setn, :]]#.min(axis=0).to_frame(name="value")
df_ci_plot = df_ci
df_plot = pd.melt(df_plot.reset_index(), id_vars="var")
lp = sns.lineplot(
#data=rel_losses.min(axis=0).to_frame(name="loss"),
data=df_plot,
#hue="width",
hue="experiment",
hue_order=keys,
x="layer",
y="value",
legend=None,
# style='set',
ci=None,
palette=palette,
#style='layer',
markers=False,
ax=ax,
dashes=True,
# linewidth=3.,
#legend_out=True,
#y="value",
)
lp.set(xticks=range(0, len(xlabels)))
# rp.set_xticklabels(xlabels)
# rp.axes[0,0].locator_params(axis='x', nbins=len(xlabels))
lp.set_xticklabels(xlabels)#, rotation=40*(is_vgg))
for j, exp in enumerate(keys):
xs =quant.loc[:, Idx[exp, stat, setn, :]].columns.get_level_values('layer').unique()
df_ci_pplot = df_ci_plot.loc[:, Idx[exp, stat, setn, xs]]
ax.fill_between(xs, df_ci_pplot.loc["ymax",:].values, df_ci_pplot.loc["ymin", :].values, color=ax.lines[j].get_color(), alpha=0.3)
# else:
# lp.set_xticklabels(len(xlabels)*[None])
if not split:
ax.set_title("{} {}{}".format(setn.title()+(setn=="train")*"ing", stat.title(), " (%)" if stat=="error" else ''))
# ylabel = stat if stat == "loss" else "error (%)"
ax.set_xlabel("layer index l")
ax.set_ylabel(None)
if setn == "test":
ax.set_ylim(df_plot["value"].min(), df_plot["value"].max())
if log_plot: # set the axis in power of 10 values
ax.get_yaxis().get_major_formatter().set_useMathText(True)
ax.get_yaxis().set_major_formatter(lambda x, pos: "$10^{" + f"{int(x)}" + "}$")
if has_ref:
# data_ref = quant_ref[stat, setn].reset_index()
if not log_plot:
ax.axline((0,quant_ref[stat, setn][0]), (1, quant_ref[stat, setn][0]), ls=":", zorder=2, c='g') # for the mean
y1 = quant_ref.loc['mean', (stat, setn)] + quant_ref.loc['ci95', (stat, setn)]#quant_ref.loc['std', (stat, setn)] #
y2 = quant_ref.loc['mean', (stat, setn)] - quant_ref.loc['ci95', (stat, setn)] #quant_ref.loc['ci95', (stat, setn)]
ax.axhspan(y1, y2, facecolor='g', alpha=0.5)
else:
ax.axline((0,quant_ref_log[stat, setn][0]), (1, quant_ref_log[stat, setn][0]), ls=":", zorder=2, c='g') # for the mean
y1 = quant_ref_log.loc['mean', (stat, setn)] + quant_ref_log.loc['ci95', (stat, setn)]#quant_ref_log.loc['std', (stat, setn)] #
y2 = quant_ref_log.loc['mean', (stat, setn)] - quant_ref_log.loc['ci95', (stat, setn)] #quant_ref_log.loc['ci95', (stat, setn)]
ax.axhspan(y1, y2, facecolor='g', alpha=0.5)
# data_ref.index = pd.Index(range(len(data_ref)))
# ax=ax,
# if setn == "train":
# ax.set_yscale(args.yscale)
if split:
# if k == 1:
labels=keys + has_ref*["ref."]
if setn == "test": # reset the name (not log)
logstr = ""
fig.legend(handles=ax.lines, labels=labels,
# title="Exp.",
loc="upper right", borderaxespad=0, bbox_to_anchor=(0.9,0.9))#, bbox_transform=fig.transFigure)
# fig.tight_layout()
plt.margins()
plt.savefig(fname=os.path.join(output_root, f"{setn}_{stat}{logstr}.pdf"), bbox_inches='tight')
k += 1
# fig.subplots_adjust(top=0.85)
# if is_vgg:
if not split:
labels=keys + has_ref*["ref."]
fig.legend(handles=ax.lines, labels=labels,
# title="Exp.",
loc="upper right", borderaxespad=0, bbox_to_anchor=(0.9,0.9))#, bbox_transform=fig.transFigure)
fig.tight_layout()
# plt.margins()
fig.savefig(fname=os.path.join(output_root, f"train_loss_test_error{logstr}.pdf"), bbox_inches='tight')
k=0
# sns.set(font_scale=1,rc={"lines.linewidth":3})
fig, axes = plt.subplots(1, 1, figsize=(4, 4), sharex=False)
for i, stat in enumerate(["error"]):
for j, setn in enumerate(["train"]):
if stat == "loss" and setn=="test":
continue
if stat=="error" and setn=="test":
continue
# axes[k] = rp.axes[j,i]
ax = axes
# df_plot = quant.loc[:, Idx[:, stat, setn, :]].min(axis=0).to_frame(name="value")
df_plot = quant.loc[:, Idx[:, stat, setn, :]]#.min(axis=0).to_frame(name="value")
df_plot = pd.melt(df_plot.reset_index(), id_vars="var")
lp = sns.lineplot(
#data=rel_losses.min(axis=0).to_frame(name="loss"),
data=df_plot,
#hue="width",
hue="experiment",
hue_order=keys,
x="layer",
y="value",
legend=None,
# style='set',
ci=95,
palette=palette,
#style='layer',
markers=False,
ax=ax,
dashes=True,
#legend_out=True,
#y="value",
)
lp.set(xticks=range(0, len(xlabels)))
# rp.set_xticklabels(xlabels)
# rp.axes[0,0].locator_params(axis='x', nbins=len(xlabels))
# rp.axes[0,1].locator_params(axis='x', nbins=len(xlabels))
lp.set_xticklabels(xlabels)#, rotation=40*(is_vgg))
if not split:
ax.set_title("{} {}{}".format(setn.title()+(setn=="train")*'ing', stat.title(), " (%)" if stat=="error" else ''))
# ylabel = stat if stat == "loss" else "error (%)"
ax.set_xlabel("layer index l")
ax.set_ylabel(None)
if setn == "train":
ax.set_yscale(args.yscale)
if quant_ref is not None:
# data_ref = quant_ref[stat, setn].reset_index()
ax.axline((0,quant_ref[stat, setn][0]), (1,quant_ref[stat, setn][0]), ls=":", zorder=2, c='g')
# data_ref.index = pd.Index(range(len(data_ref)))
# sns.lineplot(
# data=data_ref, # repeat the datasaet N_L times
# ax=ax,
# # x=range(len(data_ref)),
# # y="value",
# # xc np.tile(np.linspace(1, N_L, num=N_L), 2),
# # x='',
# # hue='r',
# # color='red',
# palette=['red'],
# # style='set',
# # x='index',
# # dashes=True,
# legend=False,
# # y="value"
# )
# for ax in ax.lines[-1:]: # the last two
# ax.set_linestyle('--')
k += 1
# fig.subplots_adjust(top=0.85)
# if is_vgg:
labels=keys + ["ref."]
fig.legend(handles=ax.lines, labels=keys,
#title="Exp.",
loc="upper right", bbox_to_anchor=(0.9,0.9),borderaxespad=0)#, bbox_transform=fig.transFigure)
plt.margins()
plt.savefig(fname=os.path.join(output_root, f"error_train{logstr}.pdf"), bbox_inches='tight')
if "B" in keys:
df_B = quant["B"]
elif "B2" in keys:
df_B = quant["B2"]
else:
return
n_draws = len(df_B.index)
# vary_draw=copy.deepcopy(df_B)
df_B_plot = pd.melt(df_B.reset_index(), id_vars="var")
cp = sns.FacetGrid(
data=df_B_plot,
# hue="experiment",
# hue_order=["A", "B"],
col="stat",
col_order=["loss", "error"],
row="set",
row_order=["train", "test"],
# x="layer",
# y="value",
# kind='line',
# legend="full",
# style='set',
# ci='sd',
# palette=palette,
#style='layer',
# markers=False,
# dashes=True,
#legend_out=True,
# facet_kws={
sharey= False,
sharex= True,
#y="value",
)
styles=['dotted', 'dashed', 'dashdot', 'solid']
# for i_k, k in enumerate([10, 50, 100, 200]):
draws = len(df_B.index)
df_bound = pd.DataFrame(columns=df_B.columns)
# df_bound.columns = df_B.columns
# for k in range(1, draws+1):
# # df_cut = pd.melt(df_B[:k].reset_index(), id_vars="draw")
# df_bound.loc[k, :] = df_B[:k].min(axis=0)
# # idx_min = df_cut.query('stat=="loss"idxmin")
# fig, axes= plt.subplots(2,2,figsize=(12,12), sharex=True)
# for i, stat in enumerate(["loss", "error"]):
# for j, setn in enumerate(["train", "test"]):
# df_bound_plot = df_bound[stat,setn].max(axis=1)
# ax=axes[i,j]
# ax.set_title("{} {}".format(setn.title(), stat.title()))
# sns.lineplot(
# data=df_bound_plot,
# ax=ax,
# )
# # cp.axes[j,i].set_title("{} {}".format(setn.title(), stat.title()))
# plt.savefig(fname=os.path.join(output_root, "comp_draws.pdf"), bbox_inches='tight')
plt.close('all')
# ylabel = stat if stat == "loss" else "error (%)"
# cp.axes[j,i].set_ylabel(ylabel)
# cp.axes[j,i].set_xlabel("layer index l")
# df_cut_plot = pd.melt(df_cut_min.query(f'stat=="{stat}" & set=="{setn}"'))
# if quant_ref is not None:
# data_ref = quant_ref[stat, setn].reset_index()
# data_ref.index = pd.Index(range(len(data_ref)))
# sns.lineplot(
# data=df_cut_plot, repeat the datasaet N_L times
# ax=cp.axes[j,i],
# x=range(len(data_ref)),
# y="value",
# xc np.tile(np.linspace(1, N_L, num=N_L), 2),
# x='layer',
# hue='r',
# color='red',
# palette=['red'],
# style='set',
# x='index',
# dashes=True,
# legend=False,
# y="value"
# )
# for ax in cp.axes[j,i].lines[-1:]: the last two
# ax.set_linestyle(styles[i_k])
def process_csv(file_csv):
'''Read and process a previously computed result stored inside a checkpoint'''
idx = pd.IndexSlice
quant = pd.read_csv(file_csv, header=[0,1], index_col=0)
process_df(quant, os.path.dirname(file_csv))
return
if __name__ == '__main__':
torch.autograd.set_detect_anomaly(True)
parser = argparse.ArgumentParser('Evaluating a copy of a classifier with removed units')
parser_device = parser.add_mutually_exclusive_group()
parser_device.add_argument('--cpu', action='store_true', dest='cpu', help='force the cpu model')
parser_device.add_argument('--cuda', action='store_false', dest='cpu')
#parser.add_argument('--end_layer', type=int, help='if set the maximum layer for which to compute the separation (forward indexing)')
parser.add_argument('--table_format', choices=["wide", "long"], default="long")
parser.add_argument('--experiments', nargs='*', default=['A', 'B'], help='whitelist for the experiments to cat')
parser.add_argument('--yscale', choices=["linear", "log"], default='linear', help='the scale for the y axis')
parser.add_argument('dirs', nargs='*', help='the directories to process')
parser.add_argument('--split', default=True, action='store_true', help='split the err/loss figures in two')
parser.set_defaults(cpu=False)
args = parser.parse_args()
table_format = args.table_format
device = torch.device('cuda' if torch.cuda.is_available() and not args.cpu else 'cpu')
#device = torch.device('cpu')
dtype = torch.float
num_gpus = torch.cuda.device_count()
def get_parent(path):
return os.path.basename(os.path.dirname(path))
def get_grand_parent(path):
return os.path.dirname(os.path.dirname(path.rstrip(os.sep)))
for directory in args.dirs:
lst_file = glob.glob(os.path.join(directory, "**", "min.csv"), recursive=True) # all the saved results
roots = set(map(get_grand_parent, lst_file))
for root in roots:
id_lst_file = glob.glob(os.path.join(root, "**", "min.csv"), recursive=True)
df_bundle = | pd.DataFrame() | pandas.DataFrame |
import asyncio
import datetime
import logging
from typing import List, Tuple, Union
import pandas as pd
import pytest
import core.signal_processing as csigproc
import helpers.hasyncio as hasynci
import helpers.hdbg as hdbg
import helpers.hunit_test as hunitest
import market_data as mdata
import oms.oms_db as oomsdb
import oms.order_processor as oordproc
import oms.portfolio as omportfo
import oms.portfolio_example as oporexam
import oms.process_forecasts as oprofore
import oms.test.oms_db_helper as omtodh
_LOG = logging.getLogger(__name__)
class TestSimulatedProcessForecasts1(hunitest.TestCase):
def test_initialization1(self) -> None:
with hasynci.solipsism_context() as event_loop:
hasynci.run(
self._test_simulated_system1(event_loop), event_loop=event_loop
)
async def _test_simulated_system1(
self, event_loop: asyncio.AbstractEventLoop
) -> None:
"""
Run `process_forecasts()` logic with a given prediction df to update a
Portfolio.
"""
config = {}
(
market_data,
get_wall_clock_time,
) = mdata.get_ReplayedTimeMarketData_example3(event_loop)
# Build predictions.
index = [
pd.Timestamp("2000-01-01 09:35:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:40:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:45:00-05:00", tz="America/New_York"),
]
columns = [101, 202]
prediction_data = [
[0.1, 0.2],
[-0.1, 0.3],
[-0.3, 0.0],
]
predictions = pd.DataFrame(prediction_data, index, columns)
volatility_data = [
[1, 1],
[1, 1],
[1, 1],
]
volatility = pd.DataFrame(volatility_data, index, columns)
# Build a Portfolio.
portfolio = oporexam.get_simulated_portfolio_example1(
event_loop,
market_data=market_data,
asset_ids=[101, 202],
)
config["order_type"] = "price@twap"
config["order_duration"] = 5
config["ath_start_time"] = datetime.time(9, 30)
config["trading_start_time"] = datetime.time(9, 35)
config["ath_end_time"] = datetime.time(16, 00)
config["trading_end_time"] = datetime.time(15, 55)
config["execution_mode"] = "batch"
# Run.
await oprofore.process_forecasts(
predictions,
volatility,
portfolio,
config,
)
actual = str(portfolio)
expected = r"""# historical holdings=
asset_id 101 202 -1
2000-01-01 09:35:00-05:00 0.0 0.0 1000000.00
2000-01-01 09:35:01-05:00 0.0 0.0 1000000.00
2000-01-01 09:40:01-05:00 33.32 66.65 900039.56
2000-01-01 09:45:01-05:00 -24.99 74.98 950024.38
# historical holdings marked to market=
asset_id 101 202 -1
2000-01-01 09:35:00-05:00 0.0 0.0 1000000.00
2000-01-01 09:35:01-05:00 0.0 0.0 1000000.00
2000-01-01 09:40:01-05:00 33329.65 66659.3 900039.56
2000-01-01 09:45:01-05:00 -24992.93 74978.79 950024.38
# historical statistics=
net_asset_holdings cash net_wealth gross_exposure leverage pnl realized_pnl unrealized_pnl
2000-01-01 09:35:00-05:00 0.00 1000000.00 1.00e+06 0.00 0.0 NaN NaN NaN
2000-01-01 09:35:01-05:00 0.00 1000000.00 1.00e+06 0.00 0.0 0.00 0.00 0.00
2000-01-01 09:40:01-05:00 99988.95 900039.56 1.00e+06 99988.95 0.1 28.51 -99960.44 99988.95
2000-01-01 09:45:01-05:00 49985.86 950024.38 1.00e+06 99971.72 0.1 -18.28 49984.81 -50003.09"""
self.assert_equal(actual, expected, fuzzy_match=True)
class TestMockedProcessForecasts1(omtodh.TestOmsDbHelper):
def test_mocked_system1(self) -> None:
with hasynci.solipsism_context() as event_loop:
# Build a Portfolio.
db_connection = self.connection
table_name = oomsdb.CURRENT_POSITIONS_TABLE_NAME
#
oomsdb.create_oms_tables(self.connection, incremental=False)
#
portfolio = oporexam.get_mocked_portfolio_example1(
event_loop,
db_connection,
table_name,
asset_ids=[101, 202],
)
# Build OrderProcessor.
get_wall_clock_time = portfolio._get_wall_clock_time
poll_kwargs = hasynci.get_poll_kwargs(get_wall_clock_time)
# poll_kwargs["sleep_in_secs"] = 1
poll_kwargs["timeout_in_secs"] = 60 * 10
delay_to_accept_in_secs = 3
delay_to_fill_in_secs = 10
broker = portfolio.broker
termination_condition = 3
order_processor = oordproc.OrderProcessor(
db_connection,
delay_to_accept_in_secs,
delay_to_fill_in_secs,
broker,
poll_kwargs=poll_kwargs,
)
order_processor_coroutine = order_processor.run_loop(
termination_condition
)
coroutines = [
self._test_mocked_system1(portfolio),
order_processor_coroutine,
]
hasynci.run(asyncio.gather(*coroutines), event_loop=event_loop)
async def _test_mocked_system1(
self,
portfolio,
) -> None:
"""
Run process_forecasts() logic with a given prediction df to update a
Portfolio.
"""
config = {}
# Build predictions.
index = [
pd.Timestamp("2000-01-01 09:35:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:40:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:45:00-05:00", tz="America/New_York"),
]
columns = [101, 202]
prediction_data = [
[0.1, 0.2],
[-0.1, 0.3],
[-0.3, 0.0],
]
predictions = pd.DataFrame(prediction_data, index, columns)
volatility_data = [
[1, 1],
[1, 1],
[1, 1],
]
volatility = pd.DataFrame(volatility_data, index, columns)
config["order_type"] = "price@twap"
config["order_duration"] = 5
config["ath_start_time"] = datetime.time(9, 30)
config["trading_start_time"] = datetime.time(9, 35)
config["ath_end_time"] = datetime.time(16, 00)
config["trading_end_time"] = datetime.time(15, 55)
config["execution_mode"] = "batch"
# Run.
await oprofore.process_forecasts(
predictions,
volatility,
portfolio,
config,
)
# TODO(Paul): Re-check the correctness after fixing the issue with
# pricing assets not currently in the portfolio.
actual = str(portfolio)
# TODO(Paul): Get this and the simulated test output to agree perfectly.
expected = r"""# historical holdings=
asset_id 101 202 -1
2000-01-01 09:35:00-05:00 0.0 0.0 1000000.00
2000-01-01 09:35:01-05:00 NaN NaN 1000000.00
2000-01-01 09:40:01-05:00 33.32 66.65 900039.56
2000-01-01 09:45:01-05:00 -24.99 74.98 950024.38
# historical holdings marked to market=
101 202 -1
2000-01-01 09:35:00-05:00 0.0 0.0 1000000.00
2000-01-01 09:35:01-05:00 NaN NaN 1000000.00
2000-01-01 09:40:01-05:00 33329.65 66659.3 900039.56
2000-01-01 09:45:01-05:00 -24992.93 74978.79 950024.38
# historical statistics=
net_asset_holdings cash net_wealth gross_exposure leverage pnl realized_pnl unrealized_pnl
2000-01-01 09:35:00-05:00 0.00 1000000.00 1.00e+06 0.00 0.0 NaN NaN NaN
2000-01-01 09:35:01-05:00 0.00 1000000.00 1.00e+06 0.00 0.0 0.00 0.00 0.00
2000-01-01 09:40:01-05:00 99988.95 900039.56 1.00e+06 99988.95 0.1 28.51 -99960.44 99988.95
2000-01-01 09:45:01-05:00 49985.86 950024.38 1.00e+06 99971.72 0.1 -18.28 49984.81 -50003.09"""
self.assert_equal(actual, expected, fuzzy_match=True)
class TestMockedProcessForecasts2(omtodh.TestOmsDbHelper):
def test_mocked_system1(self) -> None:
data = self._get_market_data_df1()
predictions, volatility = self._get_predictions_and_volatility1(data)
self._run_coroutines(data, predictions, volatility)
def test_mocked_system2(self) -> None:
data = self._get_market_data_df2()
predictions, volatility = self._get_predictions_and_volatility1(data)
self._run_coroutines(data, predictions, volatility)
def test_mocked_system3(self) -> None:
data = self._get_market_data_df1()
predictions, volatility = self._get_predictions_and_volatility2(data)
self._run_coroutines(data, predictions, volatility)
@pytest.mark.skip(
"This test times out because nothing interesting happens after the first set of orders."
)
def test_mocked_system4(self) -> None:
data = self._get_market_data_df2()
predictions, volatility = self._get_predictions_and_volatility2(data)
self._run_coroutines(data, predictions, volatility)
def _run_coroutines(self, data, predictions, volatility):
with hasynci.solipsism_context() as event_loop:
# Build MarketData.
initial_replayed_delay = 5
asset_id = [data["asset_id"][0]]
market_data, _ = mdata.get_ReplayedTimeMarketData_from_df(
event_loop,
initial_replayed_delay,
data,
)
# Create a portfolio with one asset (and cash).
db_connection = self.connection
table_name = oomsdb.CURRENT_POSITIONS_TABLE_NAME
oomsdb.create_oms_tables(self.connection, incremental=False)
portfolio = oporexam.get_mocked_portfolio_example1(
event_loop,
db_connection,
table_name,
market_data=market_data,
asset_ids=asset_id,
)
# Build OrderProcessor.
delay_to_accept_in_secs = 3
delay_to_fill_in_secs = 10
broker = portfolio.broker
poll_kwargs = hasynci.get_poll_kwargs(portfolio._get_wall_clock_time)
poll_kwargs["timeout_in_secs"] = 60 * 10
order_processor = oordproc.OrderProcessor(
db_connection,
delay_to_accept_in_secs,
delay_to_fill_in_secs,
broker,
poll_kwargs=poll_kwargs,
)
# Build order process coroutine.
termination_condition = 4
order_processor_coroutine = order_processor.run_loop(
termination_condition
)
coroutines = [
self._test_mocked_system1(predictions, volatility, portfolio),
order_processor_coroutine,
]
hasynci.run(asyncio.gather(*coroutines), event_loop=event_loop)
@staticmethod
def _get_market_data_df1() -> pd.DataFrame:
"""
Generate price series that alternates every 5 minutes.
"""
idx = pd.date_range(
start=pd.Timestamp(
"2000-01-01 09:31:00-05:00", tz="America/New_York"
),
end=pd.Timestamp("2000-01-01 09:55:00-05:00", tz="America/New_York"),
freq="T",
)
bar_duration = "1T"
bar_delay = "0T"
data = mdata.build_timestamp_df(idx, bar_duration, bar_delay)
price_pattern = [101.0] * 5 + [100.0] * 5
price = price_pattern * 2 + [101.0] * 5
data["price"] = price
data["asset_id"] = 101
return data
@staticmethod
def _get_market_data_df2() -> pd.DataFrame:
idx = pd.date_range(
start=pd.Timestamp(
"2000-01-01 09:31:00-05:00", tz="America/New_York"
),
end=pd.Timestamp("2000-01-01 09:55:00-05:00", tz="America/New_York"),
freq="T",
)
bar_duration = "1T"
bar_delay = "0T"
data = mdata.build_timestamp_df(idx, bar_duration, bar_delay)
data["price"] = 100
data["asset_id"] = 101
return data
@staticmethod
def _get_predictions_and_volatility1(
market_data_df,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Generate a signal that alternates every 5 minutes.
"""
# Build predictions.
asset_id = market_data_df["asset_id"][0]
index = [
pd.Timestamp("2000-01-01 09:35:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:40:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:45:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:50:00-05:00", tz="America/New_York"),
]
# Sanity check the index (e.g., in case we update the test).
hdbg.dassert_is_subset(index, market_data_df["end_datetime"].to_list())
columns = [asset_id]
prediction_data = [
[1],
[-1],
[1],
[-1],
]
predictions = pd.DataFrame(prediction_data, index, columns)
volatility_data = [
[1],
[1],
[1],
[1],
]
volatility = pd.DataFrame(volatility_data, index, columns)
return predictions, volatility
@staticmethod
def _get_predictions_and_volatility2(
market_data_df,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Generate a signal that is only long.
"""
# Build predictions.
asset_id = market_data_df["asset_id"][0]
index = [
pd.Timestamp("2000-01-01 09:35:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:40:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:45:00-05:00", tz="America/New_York"),
| pd.Timestamp("2000-01-01 09:50:00-05:00", tz="America/New_York") | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""Generator capacity factor plots .
This module contain methods that are related to the capacity factor
of generators and average output plots
"""
import logging
import numpy as np
import pandas as pd
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""capacity_factor MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The capacity_factor.py module contain methods that are
related to the capacity factor of generators.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.x = mconfig.parser("figure_size","xdimension")
self.y = mconfig.parser("figure_size","ydimension")
def avg_output_when_committed(self,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates barplots of the percentage average generation output when committed by technology type.
Each scenario is plotted by a different colored grouped bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Generation",self.Scenarios),
(True,"generator_Installed_Capacity",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
CF_all_scenarios = pd.DataFrame()
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {str(scenario)}")
Gen = self["generator_Generation"].get(scenario)
try: #Check for regions missing all generation.
Gen = Gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.warning(f'No data in {zone_input}')
continue
Gen = Gen.reset_index()
Gen = self.rename_gen_techs(Gen)
Gen.tech = Gen.tech.astype("category")
Gen.tech.cat.set_categories(self.ordered_gen, inplace=True)
Gen = Gen.rename(columns = {0:"Output (MWh)"})
# techs = list(Gen['tech'].unique())
Gen = Gen[Gen['tech'].isin(self.thermal_gen_cat)]
Cap = self["generator_Installed_Capacity"].get(scenario)
Cap = Cap.xs(zone_input,level = self.AGG_BY)
Cap = Cap.reset_index()
Cap = Cap.drop(columns = ['timestamp','tech'])
Cap = Cap.rename(columns = {0:"Installed Capacity (MW)"})
Gen = pd.merge(Gen,Cap, on = 'gen_name')
Gen.set_index('timestamp',inplace=True)
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
# sort_index added see https://github.com/pandas-dev/pandas/issues/35509
Gen = Gen.sort_index()[start_date_range : end_date_range]
if Gen.empty is True:
self.logger.warning('No data in selected Date Range')
continue
#Calculate CF individually for each plant, since we need to take out all zero rows.
tech_names = Gen['tech'].unique()
CF = pd.DataFrame(columns = tech_names,index = [scenario])
for tech_name in tech_names:
stt = Gen.loc[Gen['tech'] == tech_name]
if not all(stt['Output (MWh)'] == 0):
gen_names = stt['gen_name'].unique()
cfs = []
caps = []
for gen in gen_names:
sgt = stt.loc[stt['gen_name'] == gen]
if not all(sgt['Output (MWh)'] == 0):
# Calculates interval step to correct for MWh of generation
time_delta = sgt.index[1] - sgt.index[0]
duration = sgt.index[len(sgt)-1] - sgt.index[0]
duration = duration + time_delta #Account for last timestep.
# Finds intervals in 60 minute period
interval_count = 60/(time_delta/np.timedelta64(1, 'm'))
duration_hours = duration/np.timedelta64(1,'h') #Get length of time series in hours for CF calculation.
sgt = sgt[sgt['Output (MWh)'] !=0] #Remove time intervals when output is zero.
total_gen = sgt['Output (MWh)'].sum()/interval_count
cap = sgt['Installed Capacity (MW)'].mean()
#Calculate CF
cf = total_gen/(cap * duration_hours)
cfs.append(cf)
caps.append(cap)
#Find average "CF" (average output when committed) for this technology, weighted by capacity.
cf = np.average(cfs,weights = caps)
CF[tech_name] = cf
CF_all_scenarios = CF_all_scenarios.append(CF)
CF_all_scenarios.index = CF_all_scenarios.index.str.replace('_',' ')
if CF_all_scenarios.empty == True:
outputs[zone_input] = MissingZoneData()
continue
Data_Table_Out = CF_all_scenarios.T
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
mplt.barplot(CF_all_scenarios.T, color=self.color_list,
custom_tick_labels=list(CF_all_scenarios.columns),
ytick_major_fmt='percent')
ax.set_ylabel('Average Output When Committed', color='black', rotation='vertical')
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
# Add legend
mplt.add_legend()
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def cf(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates barplots of generator capacity factors by technology type.
Each scenario is plotted by a different colored grouped bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Generation",self.Scenarios),
(True,"generator_Installed_Capacity",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
cf_scen_chunks = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {str(scenario)}")
Gen = self["generator_Generation"].get(scenario)
try: #Check for regions missing all generation.
Gen = Gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.warning(f'No data in {zone_input}')
continue
Gen = self.df_process_gen_inputs(Gen)
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
Gen = Gen[start_date_range : end_date_range]
if Gen.empty is True:
self.logger.warning('No data in selected Date Range')
continue
# Calculates interval step to correct for MWh of generation
time_delta = Gen.index[1] - Gen.index[0]
duration = Gen.index[len(Gen)-1] - Gen.index[0]
duration = duration + time_delta #Account for last timestep.
# Finds intervals in 60 minute period
interval_count = 60/(time_delta/np.timedelta64(1, 'm'))
duration_hours = duration/np.timedelta64(1,'h') #Get length of time series in hours for CF calculation.
Gen = Gen/interval_count
Total_Gen = Gen.sum(axis=0)
Total_Gen.rename(scenario, inplace = True)
Cap = self["generator_Installed_Capacity"].get(scenario)
Cap = Cap.xs(zone_input,level = self.AGG_BY)
Cap = self.df_process_gen_inputs(Cap)
Cap = Cap.T.sum(axis = 1) #Rotate and force capacity to a series.
Cap.rename(scenario, inplace = True)
#Calculate CF
CF = Total_Gen/(Cap * duration_hours)
CF.rename(scenario, inplace = True)
cf_scen_chunks.append(CF)
CF_all_scenarios = pd.concat(cf_scen_chunks, axis=1, sort=False)
CF_all_scenarios = CF_all_scenarios.fillna(0, axis = 0)
if CF_all_scenarios.empty == True:
outputs[zone_input] = MissingZoneData()
continue
Data_Table_Out = CF_all_scenarios.T
mplt = PlotLibrary(figsize=(self.x*1.5, self.y*1.5))
fig, ax = mplt.get_figure()
mplt.barplot(CF_all_scenarios, color=self.color_list,
ytick_major_fmt='percent')
ax.set_ylabel('Capacity Factor', color='black', rotation='vertical')
# Add legend
mplt.add_legend()
# Add title
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def time_at_min_gen(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates barplots of generator percentage time at min-gen by technology type.
Each scenario is plotted by a different colored grouped bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Generation",self.Scenarios),
(True,"generator_Installed_Capacity",self.Scenarios),
(True,"generator_Hours_at_Minimum",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
time_at_min = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 11:38:55 2019
@author: <NAME>
"""
import numpy as np
import rdkit
from rdkit import Chem
from rdkit.Chem import Descriptors
import pandas as pd
def generate(smiles, verbose=False):
moldata= []
for elem in smiles:
mol=Chem.MolFromSmiles(elem)
moldata.append(mol)
baseData= np.arange(1,1)
i=0
for mol in moldata:
desc_MolWt = Descriptors.MolWt(mol)
desc_MolLogP = Descriptors.MolLogP(mol)
desc_MolMR = Descriptors.MolMR(mol)
desc_HeavyAtomCount = Descriptors.HeavyAtomCount(mol)
desc_NumHAcceptors = Descriptors.NumHAcceptors(mol)
desc_NumHDonors = Descriptors.NumHDonors(mol)
desc_NumHeteroatoms = Descriptors.NumHeteroatoms(mol)
desc_NumRotatableBonds = Descriptors.NumRotatableBonds(mol)
desc_NumValenceElectrons = Descriptors.NumValenceElectrons(mol)
desc_NumAromaticRings = Descriptors.NumAromaticRings(mol)
desc_NumSaturatedRings = Descriptors.NumSaturatedRings(mol)
desc_NumAliphaticRings = Descriptors.NumAliphaticRings(mol)
desc_RingCount = Descriptors.RingCount(mol)
desc_TPSA = Descriptors.TPSA(mol)
desc_LabuteASA = Descriptors.LabuteASA(mol)
desc_BalabanJ = Descriptors.BalabanJ(mol)
desc_BertzCT = Descriptors.BertzCT(mol)
row = np.array([desc_MolWt,desc_MolLogP,desc_MolMR,desc_HeavyAtomCount,desc_NumHAcceptors,desc_NumHDonors,desc_NumHeteroatoms,
desc_NumRotatableBonds,desc_NumValenceElectrons,desc_NumAromaticRings,desc_NumSaturatedRings,
desc_NumAliphaticRings,desc_RingCount,desc_TPSA,desc_LabuteASA,desc_BalabanJ,desc_BertzCT])
if(i==0):
baseData=row
else:
baseData=np.vstack([baseData, row])
i=i+1
columnNames=["MolWt","MolLogP","MolMR","HeavyAtomCount","NumHAcceptors","NumHDonors","NumHeteroatoms",
"NumRotatableBonds","NumValenceElectrons","NumAromaticRings","NumSaturatedRings",
"NumAliphaticRings","RingCount","TPSA","LabuteASA","BalabanJ","BertzCT"]
descriptors = | pd.DataFrame(data=baseData,columns=columnNames) | pandas.DataFrame |
from strategy.rebalance import get_relative_to_expiry_rebalance_dates, \
get_fixed_frequency_rebalance_dates, \
get_relative_to_expiry_instrument_weights
from strategy.calendar import get_mtm_dates
import pandas as pd
import pytest
from pandas.util.testing import assert_index_equal, assert_frame_equal
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key], check_names=False)
def test_tradeables_dates():
# no CME holdiays between this date range
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-03-23")
exchanges = ["CME"]
tradeable_dates = get_mtm_dates(sd, ed, exchanges)
exp_tradeable_dates = pd.date_range(
"2015-01-02", "2015-03-23", freq="B"
)
assert_index_equal(tradeable_dates, exp_tradeable_dates)
# with an adhoc holiday
holidays = [pd.Timestamp("2015-01-02")]
tradeable_dates = get_mtm_dates(sd, ed, exchanges, holidays=holidays)
exp_tradeable_dates = pd.date_range(
"2015-01-03", "2015-03-23", freq="B"
)
assert_index_equal(tradeable_dates, exp_tradeable_dates)
# with CME holiday (New Years day)
sd = pd.Timestamp("2015-01-01")
ed = pd.Timestamp("2015-01-02")
tradeable_dates = get_mtm_dates(sd, ed, exchanges)
exp_tradeable_dates = pd.DatetimeIndex([pd.Timestamp("2015-01-02")])
assert_index_equal(tradeable_dates, exp_tradeable_dates)
def test_relative_to_expiry_rebalance_dates():
# each contract rolling individually, same offset
# change to ES and TY
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-03-23")
expiries = pd.DataFrame(
[["2015ESH", "2015-03-20", "2015-03-20"],
["2015ESM", "2015-06-19", "2015-06-19"],
["2015TYH", "2015-02-27", "2015-03-20"],
["2015TYM", "2015-05-29", "2015-06-19"]],
columns=["contract", "first_notice", "last_trade"]
)
offsets = -3
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=False, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(
["2015-01-02", "2015-02-24", "2015-03-17"]
)
assert_index_equal(rebal_dates, exp_rebal_dates)
# rolling all monthly contracts together, same offset
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=True, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(["2015-01-02", "2015-02-24"])
assert_index_equal(rebal_dates, exp_rebal_dates)
# rolling each contract individually, different offset
offsets = {"ES": -3, "TY": -4}
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=False, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(
["2015-01-02", "2015-02-23", "2015-03-17"]
)
assert_index_equal(rebal_dates, exp_rebal_dates)
def test_relative_to_expiry_weights():
expiries = pd.DataFrame(
[["2015ESH", "2015-03-20", "2015-03-20"],
["2015ESM", "2015-06-19", "2015-06-19"],
["2015ESU", "2015-09-18", "2015-09-18"],
["2015TYH", "2015-03-16", "2015-03-20"],
["2015TYM", "2015-05-29", "2015-06-19"],
["2015TYU", "2015-08-31", "2015-09-21"]],
columns=["contract", "first_notice", "last_trade"]
)
# one generic and one product
dts = pd.date_range("2015-03-17", "2015-03-18", freq="B")
offsets = -3
root_gnrcs = {"ES": ["ES1"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame(
[1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-17"), "2015ESH"),
(pd.Timestamp("2015-03-18"), "2015ESM")],
names=("date", "contract")),
columns=["ES1"]
)
}
assert_dict_of_frames(wts, exp_wts)
# multiple products
dts = pd.date_range("2015-03-13", "2015-03-20", freq="B")
offsets = -1
root_gnrcs = {"ES": ["ES1"], "TY": ["TY1"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015ESH"),
(pd.Timestamp("2015-03-16"), "2015ESH"),
(pd.Timestamp("2015-03-17"), "2015ESH"),
(pd.Timestamp("2015-03-18"), "2015ESH"),
(pd.Timestamp("2015-03-19"), "2015ESH"),
(pd.Timestamp("2015-03-20"), "2015ESM"),],
names=("date", "contract")),
columns=["ES1"]
),
"TY": pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015TYH"),
(pd.Timestamp("2015-03-16"), "2015TYM"),
(pd.Timestamp("2015-03-17"), "2015TYM"),
(pd.Timestamp("2015-03-18"), "2015TYM"),
(pd.Timestamp("2015-03-19"), "2015TYM"),
(pd.Timestamp("2015-03-20"), "2015TYM"),],
names=("date", "contract")),
columns=["TY1"]
)
}
assert_dict_of_frames(wts, exp_wts)
# multiple generics
offsets = -1
dts = pd.date_range("2015-03-19", "2015-03-20", freq="B")
root_gnrcs = {"ES": ["ES1", "ES2"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-19"), "2015ESH"),
(pd.Timestamp("2015-03-19"), "2015ESM"),
(pd.Timestamp("2015-03-20"), "2015ESM"),
(pd.Timestamp("2015-03-20"), "2015ESU")],
names=("date", "contract")),
columns=["ES1", "ES2"]
)
}
assert_dict_of_frames(wts, exp_wts)
# with dict of offsets
offsets = {"ES": -4, "TY": -1}
root_gnrcs = {"ES": ["ES1"], "TY": ["TY1"]}
dts = pd.date_range("2015-03-13", "2015-03-17", freq="B")
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame([1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015ESH"),
(pd.Timestamp("2015-03-16"), "2015ESH"),
(pd.Timestamp("2015-03-17"), "2015ESM")],
names=("date", "contract")),
columns=["ES1"]
),
"TY": pd.DataFrame([1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015TYH"),
(pd.Timestamp("2015-03-16"), "2015TYM"),
(pd.Timestamp("2015-03-17"), "2015TYM")],
names=("date", "contract")),
columns=["TY1"]
)
}
assert_dict_of_frames(wts, exp_wts)
# with holidays for relative roll
offsets = -1
root_gnrcs = {"ES": ["ES1"]}
holidays = [pd.Timestamp("2015-03-19").date()]
dts = pd.date_range("2015-03-18", "2015-03-19", freq="B")
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets, holidays=holidays
)
exp_wts = {
"ES": pd.DataFrame([1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[( | pd.Timestamp("2015-03-18") | pandas.Timestamp |
import calendar
from ..utils import search_quote
from datetime import datetime, timedelta
from ..utils import process_dataframe_and_series
import rich
from jsonpath import jsonpath
from retry import retry
import pandas as pd
import requests
import multitasking
import signal
from tqdm import tqdm
from typing import (Dict,
List,
Union)
from ..shared import session
from ..common import get_quote_history as get_quote_history_for_stock
from ..common import get_history_bill as get_history_bill_for_stock
from ..common import get_today_bill as get_today_bill_for_stock
from ..common import get_realtime_quotes_by_fs
from ..utils import (to_numeric,
get_quote_id)
from .config import EASTMONEY_STOCK_DAILY_BILL_BOARD_FIELDS, EASTMONEY_STOCK_BASE_INFO_FIELDS
from ..common.config import (
FS_DICT,
MARKET_NUMBER_DICT,
EASTMONEY_REQUEST_HEADERS,
EASTMONEY_QUOTE_FIELDS
)
signal.signal(signal.SIGINT, multitasking.killall)
@to_numeric
def get_base_info_single(stock_code: str) -> pd.Series:
"""
获取单股票基本信息
Parameters
----------
stock_code : str
股票代码
Returns
-------
Series
单只股票基本信息
"""
fields = ",".join(EASTMONEY_STOCK_BASE_INFO_FIELDS.keys())
secid = get_quote_id(stock_code)
if not secid:
return pd.Series(index=EASTMONEY_STOCK_BASE_INFO_FIELDS.values())
params = (
('ut', 'fa5fd1943c7b386f172d6893dbfba10b'),
('invt', '2'),
('fltt', '2'),
('fields', fields),
('secid', secid),
)
url = 'http://push2.eastmoney.com/api/qt/stock/get'
json_response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
s = pd.Series(json_response['data']).rename(
index=EASTMONEY_STOCK_BASE_INFO_FIELDS)
return s[EASTMONEY_STOCK_BASE_INFO_FIELDS.values()]
def get_base_info_muliti(stock_codes: List[str]) -> pd.DataFrame:
"""
获取股票多只基本信息
Parameters
----------
stock_codes : List[str]
股票代码列表
Returns
-------
DataFrame
多只股票基本信息
"""
@multitasking.task
@retry(tries=3, delay=1)
def start(stock_code: str):
s = get_base_info_single(stock_code)
dfs.append(s)
pbar.update()
pbar.set_description(f'Processing => {stock_code}')
dfs: List[pd.DataFrame] = []
pbar = tqdm(total=len(stock_codes))
for stock_code in stock_codes:
start(stock_code)
multitasking.wait_for_tasks()
df = pd.DataFrame(dfs)
df = df.dropna(subset=['股票代码'])
return df
@to_numeric
def get_base_info(stock_codes: Union[str, List[str]]) -> Union[pd.Series, pd.DataFrame]:
"""
Parameters
----------
stock_codes : Union[str, List[str]]
股票代码或股票代码构成的列表
Returns
-------
Union[Series, DataFrame]
- ``Series`` : 包含单只股票基本信息(当 ``stock_codes`` 是字符串时)
- ``DataFrane`` : 包含多只股票基本信息(当 ``stock_codes`` 是字符串列表时)
Raises
------
TypeError
当 ``stock_codes`` 类型不符合要求时
Examples
--------
>>> import efinance as ef
>>> # 获取单只股票信息
>>> ef.stock.get_base_info('600519')
股票代码 600519
股票名称 贵州茅台
市盈率(动) 39.38
市净率 12.54
所处行业 酿酒行业
总市值 2198082348462.0
流通市值 2198082348462.0
板块编号 BK0477
ROE 8.29
净利率 54.1678
净利润 13954462085.610001
毛利率 91.6763
dtype: object
>>> # 获取多只股票信息
>>> ef.stock.get_base_info(['600519','300715'])
股票代码 股票名称 市盈率(动) 市净率 所处行业 总市值 流通市值 板块编号 ROE 净利率 净利润 毛利率
0 300715 凯伦股份 42.29 3.12 水泥建材 9.160864e+09 6.397043e+09 BK0424 3.97 12.1659 5.415488e+07 32.8765
1 600519 贵州茅台 39.38 12.54 酿酒行业 2.198082e+12 2.198082e+12 BK0477 8.29 54.1678 1.395446e+10 91.6763
"""
if isinstance(stock_codes, str):
return get_base_info_single(stock_codes)
elif hasattr(stock_codes, '__iter__'):
return get_base_info_muliti(stock_codes)
raise TypeError(f'所给的 {stock_codes} 不符合参数要求')
def get_quote_history(stock_codes: Union[str, List[str]],
beg: str = '19000101',
end: str = '20500101',
klt: int = 101,
fqt: int = 1,
**kwargs) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""
获取股票的 K 线数据
Parameters
----------
stock_codes : Union[str,List[str]]
股票代码、名称 或者 股票代码、名称构成的列表
beg : str, optional
开始日期,默认为 ``'19000101'`` ,表示 1900年1月1日
end : str, optional
结束日期,默认为 ``'20500101'`` ,表示 2050年1月1日
klt : int, optional
行情之间的时间间隔,默认为 ``101`` ,可选示例如下
- ``1`` : 分钟
- ``5`` : 5 分钟
- ``15`` : 15 分钟
- ``30`` : 30 分钟
- ``60`` : 60 分钟
- ``101`` : 日
- ``102`` : 周
- ``103`` : 月
fqt : int, optional
复权方式,默认为 ``1`` ,可选示例如下
- ``0`` : 不复权
- ``1`` : 前复权
- ``2`` : 后复权
Returns
-------
Union[DataFrame, Dict[str, DataFrame]]
股票的 K 线数据
- ``DataFrame`` : 当 ``stock_codes`` 是 ``str`` 时
- ``Dict[str, DataFrame]`` : 当 ``stock_codes`` 是 ``List[str]`` 时
Examples
--------
>>> import efinance as ef
>>> # 获取单只股票日 K 行情数据
>>> ef.stock.get_quote_history('600519')
股票名称 股票代码 日期 开盘 收盘 最高 最低 成交量 成交额 振幅 涨跌幅 涨跌额 换手率
0 贵州茅台 600519 2001-08-27 -89.74 -89.53 -89.08 -90.07 406318 1.410347e+09 -1.10 0.92 0.83 56.83
1 贵州茅台 600519 2001-08-28 -89.64 -89.27 -89.24 -89.72 129647 4.634630e+08 -0.54 0.29 0.26 18.13
2 贵州茅台 600519 2001-08-29 -89.24 -89.36 -89.24 -89.42 53252 1.946890e+08 -0.20 -0.10 -0.09 7.45
3 贵州茅台 600519 2001-08-30 -89.38 -89.22 -89.14 -89.44 48013 1.775580e+08 -0.34 0.16 0.14 6.72
4 贵州茅台 600519 2001-08-31 -89.21 -89.24 -89.12 -89.28 23231 8.623100e+07 -0.18 -0.02 -0.02 3.25
... ... ... ... ... ... ... ... ... ... ... ... ... ...
4756 贵州茅台 600519 2021-07-23 1937.82 1900.00 1937.82 1895.09 47585 9.057762e+09 2.20 -2.06 -40.01 0.38
4757 贵州茅台 600519 2021-07-26 1879.00 1804.11 1879.00 1780.00 98619 1.789436e+10 5.21 -5.05 -95.89 0.79
4758 贵州茅台 600519 2021-07-27 1803.00 1712.89 1810.00 1703.00 86577 1.523081e+10 5.93 -5.06 -91.22 0.69
4759 贵州茅台 600519 2021-07-28 1703.00 1768.90 1788.20 1682.12 85369 1.479247e+10 6.19 3.27 56.01 0.68
4760 贵州茅台 600519 2021-07-29 1810.01 1749.79 1823.00 1734.34 63864 1.129957e+10 5.01 -1.08 -19.11 0.51
>>> # 获取多只股票历史行情
>>> stock_df = ef.stock.get_quote_history(['600519','300750'])
>>> type(stock_df)
<class 'dict'>
>>> stock_df.keys()
dict_keys(['300750', '600519'])
>>> stock_df['600519']
股票名称 股票代码 日期 开盘 收盘 最高 最低 成交量 成交额 振幅 涨跌幅 涨跌额 换手率
0 贵州茅台 600519 2001-08-27 -89.74 -89.53 -89.08 -90.07 406318 1.410347e+09 -1.10 0.92 0.83 56.83
1 贵州茅台 600519 2001-08-28 -89.64 -89.27 -89.24 -89.72 129647 4.634630e+08 -0.54 0.29 0.26 18.13
2 贵州茅台 600519 2001-08-29 -89.24 -89.36 -89.24 -89.42 53252 1.946890e+08 -0.20 -0.10 -0.09 7.45
3 贵州茅台 600519 2001-08-30 -89.38 -89.22 -89.14 -89.44 48013 1.775580e+08 -0.34 0.16 0.14 6.72
4 贵州茅台 600519 2001-08-31 -89.21 -89.24 -89.12 -89.28 23231 8.623100e+07 -0.18 -0.02 -0.02 3.25
... ... ... ... ... ... ... ... ... ... ... ... ... ...
4756 贵州茅台 600519 2021-07-23 1937.82 1900.00 1937.82 1895.09 47585 9.057762e+09 2.20 -2.06 -40.01 0.38
4757 贵州茅台 600519 2021-07-26 1879.00 1804.11 1879.00 1780.00 98619 1.789436e+10 5.21 -5.05 -95.89 0.79
4758 贵州茅台 600519 2021-07-27 1803.00 1712.89 1810.00 1703.00 86577 1.523081e+10 5.93 -5.06 -91.22 0.69
4759 贵州茅台 600519 2021-07-28 1703.00 1768.90 1788.20 1682.12 85369 1.479247e+10 6.19 3.27 56.01 0.68
4760 贵州茅台 600519 2021-07-29 1810.01 1749.79 1823.00 1734.34 63864 1.129957e+10 5.01 -1.08 -19.11 0.51
"""
df = get_quote_history_for_stock(
stock_codes,
beg=beg,
end=end,
klt=klt,
fqt=fqt
)
if isinstance(df, pd.DataFrame):
df.rename(columns={'代码': '股票代码',
'名称': '股票名称'
},
inplace=True)
elif isinstance(df, dict):
for stock_code in df.keys():
df[stock_code].rename(columns={'代码': '股票代码',
'名称': '股票名称'
},
inplace=True)
# NOTE 扩展接口 设定此关键词即返回 DataFrame 而不是 dict
if kwargs.get('return_df'):
df: pd.DataFrame = pd.concat(df, axis=0, ignore_index=True)
return df
@process_dataframe_and_series(remove_columns_and_indexes=['市场编号'])
@to_numeric
def get_realtime_quotes(fs: Union[str, List[str]] = None) -> pd.DataFrame:
"""
获取单个或者多个市场行情的最新状况
Parameters
----------
fs : Union[str, List[str]], optional
行情名称或者多个行情名列表 可选值及示例如下
- ``None`` 沪深京A股市场行情
- ``'沪深A股'`` 沪深A股市场行情
- ``'沪A'`` 沪市A股市场行情
- ``'深A'`` 深市A股市场行情
- ``北A`` 北证A股市场行情
- ``'可转债'`` 沪深可转债市场行情
- ``'期货'`` 期货市场行情
- ``'创业板'`` 创业板市场行情
- ``'美股'`` 美股市场行情
- ``'港股'`` 港股市场行情
- ``'中概股'`` 中国概念股市场行情
- ``'新股'`` 沪深新股市场行情
- ``'科创板'`` 科创板市场行情
- ``'沪股通'`` 沪股通市场行情
- ``'深股通'`` 深股通市场行情
- ``'行业板块'`` 行业板块市场行情
- ``'概念板块'`` 概念板块市场行情
- ``'沪深系列指数'`` 沪深系列指数市场行情
- ``'上证系列指数'`` 上证系列指数市场行情
- ``'深证系列指数'`` 深证系列指数市场行情
- ``'ETF'`` ETF 基金市场行情
- ``'LOF'`` LOF 基金市场行情
Returns
-------
DataFrame
单个或者多个市场行情的最新状况
Raises
------
KeyError
当参数 ``fs`` 中含有不正确的行情类型时引发错误
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_realtime_quotes()
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 688787 N海天 277.59 139.48 172.39 139.25 171.66 102.54 85.62 - 78.93 74519 1110318832.0 36.94 5969744000 1213908667 1.688787 沪A
1 301045 N天禄 149.34 39.42 48.95 39.2 48.95 23.61 66.66 - 37.81 163061 683878656.0 15.81 4066344240 964237089 0.301045 深A
2 300532 今天国际 20.04 12.16 12.16 10.69 10.69 2.03 8.85 3.02 -22.72 144795 171535181.0 10.13 3322510580 1989333440 0.300532 深A
3 300600 国瑞科技 20.02 13.19 13.19 11.11 11.41 2.2 18.61 2.82 218.75 423779 541164432.0 10.99 3915421427 3003665117 0.300600 深A
4 300985 致远新能 20.01 47.08 47.08 36.8 39.4 7.85 66.65 2.17 58.37 210697 897370992.0 39.23 6277336472 1488300116 0.300985 深A
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
4598 603186 华正新材 -10.0 43.27 44.09 43.27 43.99 -4.81 1.98 0.48 25.24 27697 120486294.0 48.08 6146300650 6063519472 1.603186 沪A
4599 688185 康希诺-U -10.11 476.4 534.94 460.13 530.0 -53.6 6.02 2.74 -2088.07 40239 1960540832.0 530.0 117885131884 31831479215 1.688185 沪A
4600 688148 芳源股份 -10.57 31.3 34.39 31.3 33.9 -3.7 26.07 0.56 220.01 188415 620632512.0 35.0 15923562000 2261706043 1.688148 沪A
4601 300034 钢研高纳 -10.96 43.12 46.81 42.88 46.5 -5.31 7.45 1.77 59.49 323226 1441101824.0 48.43 20959281094 18706911861 0.300034 深A
4602 300712 永福股份 -13.71 96.9 110.94 95.4 109.0 -15.4 6.96 1.26 511.21 126705 1265152928.0 112.3 17645877600 17645877600 0.300712 深A
>>> ef.stock.get_realtime_quotes(['创业板','港股'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 00859 中昌国际控股 49.02 0.38 0.38 0.26 0.26 0.125 0.08 86.85 -2.83 938000 262860.0 0.255 427510287 427510287 128.00859 None
1 01058 粤海制革 41.05 1.34 1.51 0.9 0.93 0.39 8.34 1.61 249.89 44878000 57662440.0 0.95 720945460 720945460 128.01058 None
2 00713 世界(集团) 27.94 0.87 0.9 0.68 0.68 0.19 1.22 33.28 3.64 9372000 7585400.0 0.68 670785156 670785156 128.00713 None
3 08668 瀛海集团 24.65 0.177 0.179 0.145 0.145 0.035 0.0 10.0 -9.78 20000 3240.0 0.142 212400000 212400000 128.08668 None
4 08413 亚洲杂货 24.44 0.28 0.28 0.25 0.25 0.055 0.01 3.48 -20.76 160000 41300.0 0.225 325360000 325360000 128.08413 None
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
5632 08429 冰雪集团 -16.75 0.174 0.2 0.166 0.2 -0.035 2.48 3.52 -21.58 11895000 2074645.0 0.209 83520000 83520000 128.08429 None
5633 00524 长城天下 -17.56 0.108 0.118 0.103 0.118 -0.023 0.45 15.43 -6.55 5961200 649171.0 0.131 141787800 141787800 128.00524 None
5634 08377 申酉控股 -17.71 0.395 0.46 0.39 0.46 -0.085 0.07 8.06 -5.07 290000 123200.0 0.48 161611035 161611035 128.08377 None
5635 00108 国锐地产 -19.01 1.15 1.42 1.15 1.42 -0.27 0.07 0.78 23.94 2376000 3012080.0 1.42 3679280084 3679280084 128.00108 None
5636 08237 华星控股 -25.0 0.024 0.031 0.023 0.031 -0.008 0.43 8.74 -2.01 15008000 364188.0 0.032 83760000 83760000 128.08237 None
>>> ef.stock.get_realtime_quotes(['ETF'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 513050 中概互联网ETF 4.49 1.444 1.455 1.433 1.452 0.062 6.71 0.92 - 12961671 1870845984.0 1.382 27895816917 27895816917 1.513050 沪A
1 513360 教育ETF 4.38 0.5 0.502 0.486 0.487 0.021 16.89 1.7 - 1104254 54634387.0 0.479 326856952 326856952 1.513360 沪A
2 159766 旅游ETF 3.84 0.974 0.988 0.95 0.95 0.036 14.46 1.97 - 463730 45254947.0 0.938 312304295 312304295 0.159766 深A
3 159865 养殖ETF 3.8 0.819 0.828 0.785 0.791 0.03 12.13 0.89 - 1405871 114254714.0 0.789 949594189 949594189 0.159865 深A
4 516670 畜牧养殖ETF 3.76 0.856 0.864 0.825 0.835 0.031 24.08 0.98 - 292027 24924513.0 0.825 103803953 103803953 1.516670 沪A
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
549 513060 恒生医疗ETF -4.12 0.861 0.905 0.86 0.902 -0.037 47.96 1.57 - 1620502 141454355.0 0.898 290926128 290926128 1.513060 沪A
550 515220 煤炭ETF -4.46 2.226 2.394 2.194 2.378 -0.104 14.39 0.98 - 2178176 487720560.0 2.330 3369247992 3369247992 1.515220 沪A
551 513000 日经225ETF易方达 -4.49 1.212 1.269 1.21 1.269 -0.057 5.02 2.49 - 25819 3152848.0 1.269 62310617 62310617 1.513000 沪A
552 513880 日经225ETF -4.59 1.163 1.224 1.162 1.217 -0.056 16.93 0.94 - 71058 8336846.0 1.219 48811110 48811110 1.513880 沪A
553 513520 日经ETF -4.76 1.2 1.217 1.196 1.217 -0.06 27.7 1.79 - 146520 17645828.0 1.260 63464640 63464640 1.513520 沪A
Notes
-----
无论股票、可转债、期货还是基金。第一列表头始终叫 ``股票代码``
"""
fs_list: List[str] = []
if fs is None:
fs_list.append(FS_DICT['stock'])
if isinstance(fs, str):
fs = [fs]
if isinstance(fs, list):
for f in fs:
if not FS_DICT.get(f):
raise KeyError(f'指定的行情参数 `{fs}` 不正确')
fs_list.append(FS_DICT[f])
# 给空列表时 试用沪深A股行情
if not fs_list:
fs_list.append(FS_DICT['stock'])
fs_str = ','.join(fs_list)
df = get_realtime_quotes_by_fs(fs_str)
df.rename(columns={'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_history_bill(stock_code: str) -> pd.DataFrame:
"""
获取单只股票历史单子流入流出数据
Parameters
----------
stock_code : str
股票代码
Returns
-------
DataFrame
沪深市场单只股票历史单子流入流出数据
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_history_bill('600519')
股票名称 股票代码 日期 主力净流入 小单净流入 中单净流入 大单净流入 超大单净流入 主力净流入占比 小单流入净占比 中单流入净占比 大单流入净占比 超大单流入净占比 收盘价 涨跌幅
0 贵州茅台 600519 2021-03-04 -3.670272e+06 -2282056.0 5.952143e+06 1.461528e+09 -1.465199e+09 -0.03 -0.02 0.04 10.99 -11.02 2013.71 -5.05
1 贵州茅台 600519 2021-03-05 -1.514880e+07 -1319066.0 1.646793e+07 -2.528896e+07 1.014016e+07 -0.12 -0.01 0.13 -0.19 0.08 2040.82 1.35
2 贵州茅台 600519 2021-03-08 -8.001702e+08 -877074.0 8.010473e+08 5.670671e+08 -1.367237e+09 -6.29 -0.01 6.30 4.46 -10.75 1940.71 -4.91
3 贵州茅台 600519 2021-03-09 -2.237770e+08 -6391767.0 2.301686e+08 -1.795013e+08 -4.427571e+07 -1.39 -0.04 1.43 -1.11 -0.27 1917.70 -1.19
4 贵州茅台 600519 2021-03-10 -2.044173e+08 -1551798.0 2.059690e+08 -2.378506e+08 3.343331e+07 -2.02 -0.02 2.03 -2.35 0.33 1950.72 1.72
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
97 贵州茅台 600519 2021-07-26 -1.564233e+09 13142211.0 1.551091e+09 -1.270400e+08 -1.437193e+09 -8.74 0.07 8.67 -0.71 -8.03 1804.11 -5.05
98 贵州茅台 600519 2021-07-27 -7.803296e+08 -10424715.0 7.907544e+08 6.725104e+07 -8.475807e+08 -5.12 -0.07 5.19 0.44 -5.56 1712.89 -5.06
99 贵州茅台 600519 2021-07-28 3.997645e+08 2603511.0 -4.023677e+08 2.315648e+08 1.681997e+08 2.70 0.02 -2.72 1.57 1.14 1768.90 3.27
100 贵州茅台 600519 2021-07-29 -9.209842e+08 -2312235.0 9.232964e+08 -3.959741e+08 -5.250101e+08 -8.15 -0.02 8.17 -3.50 -4.65 1749.79 -1.08
101 贵州茅台 600519 2021-07-30 -1.524740e+09 -6020099.0 1.530761e+09 1.147248e+08 -1.639465e+09 -11.63 -0.05 11.68 0.88 -12.51 1678.99 -4.05
"""
df = get_history_bill_for_stock(stock_code)
df.rename(columns={
'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_today_bill(stock_code: str) -> pd.DataFrame:
"""
获取单只股票最新交易日的日内分钟级单子流入流出数据
Parameters
----------
stock_code : str
股票代码
Returns
-------
DataFrame
单只股票最新交易日的日内分钟级单子流入流出数据
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_today_bill('600519')
股票代码 时间 主力净流入 小单净流入 中单净流入 大单净流入 超大单净流入
0 600519 2021-07-29 09:31 -3261705.0 -389320.0 3651025.0 -12529658.0 9267953.0
1 600519 2021-07-29 09:32 6437999.0 -606994.0 -5831006.0 -42615994.0 49053993.0
2 600519 2021-07-29 09:33 13179707.0 -606994.0 -12572715.0 -85059118.0 98238825.0
3 600519 2021-07-29 09:34 15385244.0 -970615.0 -14414632.0 -86865209.0 102250453.0
4 600519 2021-07-29 09:35 7853716.0 -970615.0 -6883104.0 -75692436.0 83546152.0
.. ... ... ... ... ... ... ...
235 600519 2021-07-29 14:56 -918956019.0 -1299630.0 920255661.0 -397127393.0 -521828626.0
236 600519 2021-07-29 14:57 -920977761.0 -2319213.0 923296987.0 -397014702.0 -523963059.0
237 600519 2021-07-29 14:58 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
238 600519 2021-07-29 14:59 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
239 600519 2021-07-29 15:00 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
"""
df = get_today_bill_for_stock(stock_code)
df.rename(columns={
'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_latest_quote(stock_codes: List[str]) -> pd.DataFrame:
"""
获取沪深市场多只股票的实时涨幅情况
Parameters
----------
stock_codes : List[str]
多只股票代码列表
Returns
-------
DataFrame
沪深市场、港股、美股多只股票的实时涨幅情况
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_latest_quote(['600519','300750'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 市场类型
0 600519 贵州茅台 0.59 1700.04 1713.0 1679.0 1690.0 10.04 0.30 0.72 43.31 37905 6.418413e+09 1690.0 2135586507912 2135586507912 沪A
1 300750 宁德时代 0.01 502.05 529.9 480.0 480.0 0.05 1.37 1.75 149.57 277258 1.408545e+10 502.0 1169278366994 1019031580505 深A
Notes
-----
当需要获取多只沪深 A 股 的实时涨跌情况时,最好使用 ``efinance.stock.get_realtime_quptes``
"""
if isinstance(stock_codes, str):
stock_codes = [stock_codes]
secids: List[str] = [get_quote_id(stock_code)
for stock_code in stock_codes]
columns = EASTMONEY_QUOTE_FIELDS
fields = ",".join(columns.keys())
params = (
('OSVersion', '14.3'),
('appVersion', '6.3.8'),
('fields', fields),
('fltt', '2'),
('plat', 'Iphone'),
('product', 'EFund'),
('secids', ",".join(secids)),
('serverVersion', '6.3.6'),
('version', '6.3.8'),
)
url = 'https://push2.eastmoney.com/api/qt/ulist.np/get'
json_response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
rows = jsonpath(json_response, '$..diff[:]')
if rows is None:
return pd.DataFrame(columns=columns.values()).rename({
'市场编号': '市场类型'
})
df = pd.DataFrame(rows)[columns.keys()].rename(columns=columns)
df['市场类型'] = df['市场编号'].apply(lambda x: MARKET_NUMBER_DICT.get(str(x)))
del df['市场编号']
return df
@to_numeric
def get_top10_stock_holder_info(stock_code: str,
top: int = 4) -> pd.DataFrame:
"""
获取沪深市场指定股票前十大股东信息
Parameters
----------
stock_code : str
股票代码
top : int, optional
最新 top 个前 10 大流通股东公开信息, 默认为 ``4``
Returns
-------
DataFrame
个股持仓占比前 10 的股东的一些信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_top10_stock_holder_info('600519',top = 1)
股票代码 更新日期 股东代码 股东名称 持股数 持股比例 增减 变动率
0 600519 2021-03-31 80010298 中国贵州茅台酒厂(集团)有限责任公司 6.783亿 54.00% 不变 --
1 600519 2021-03-31 80637337 香港中央结算有限公司 9594万 7.64% -841.1万 -8.06%
2 600519 2021-03-31 80732941 贵州省国有资本运营有限责任公司 5700万 4.54% -182.7万 -3.11%
3 600519 2021-03-31 80010302 贵州茅台酒厂集团技术开发公司 2781万 2.21% 不变 --
4 600519 2021-03-31 80475097 中央汇金资产管理有限责任公司 1079万 0.86% 不变 --
5 600519 2021-03-31 80188285 中国证券金融股份有限公司 803.9万 0.64% -91 0.00%
6 600519 2021-03-31 78043999 深圳市金汇荣盛财富管理有限公司-金汇荣盛三号私募证券投资基金 502.1万 0.40% 不变 --
7 600519 2021-03-31 70400207 中国人寿保险股份有限公司-传统-普通保险产品-005L-CT001沪 434.1万 0.35% 44.72万 11.48%
8 600519 2021-03-31 005827 中国银行股份有限公司-易方达蓝筹精选混合型证券投资基金 432万 0.34% 新进 --
9 600519 2021-03-31 78083830 珠海市瑞丰汇邦资产管理有限公司-瑞丰汇邦三号私募证券投资基金 416.1万 0.33% 不变 --
"""
def gen_fc(stock_code: str) -> str:
"""
Parameters
----------
stock_code : str
股票代码
Returns
-------
str
指定格式的字符串
"""
_type, stock_code = get_quote_id(stock_code).split('.')
_type = int(_type)
# 深市
if _type == 0:
return f'{stock_code}02'
# 沪市
return f'{stock_code}01'
def get_public_dates(stock_code: str) -> List[str]:
"""
获取指定股票公开股东信息的日期
Parameters
----------
stock_code : str
股票代码
Returns
-------
List[str]
公开日期列表
"""
quote_id = get_quote_id(stock_code)
stock_code = quote_id.split('.')[-1]
fc = gen_fc(stock_code)
data = {"fc": fc}
url = 'https://emh5.eastmoney.com/api/GuBenGuDong/GetFirstRequest2Data'
json_response = requests.post(
url, json=data).json()
dates = jsonpath(json_response, f'$..BaoGaoQi')
if not dates:
return []
return dates
fields = {
'GuDongDaiMa': '股东代码',
'GuDongMingCheng': '股东名称',
'ChiGuShu': '持股数',
'ChiGuBiLi': '持股比例',
'ZengJian': '增减',
'BianDongBiLi': '变动率',
}
quote_id = get_quote_id(stock_code)
stock_code = quote_id.split('.')[-1]
fc = gen_fc(stock_code)
dates = get_public_dates(stock_code)
dfs: List[pd.DataFrame] = []
empty_df = pd.DataFrame(columns=['股票代码', '日期']+list(fields.values()))
for date in dates[:top]:
data = {"fc": fc, "BaoGaoQi": date}
url = 'https://emh5.eastmoney.com/api/GuBenGuDong/GetShiDaLiuTongGuDong'
response = requests.post(url, json=data)
response.encoding = 'utf-8'
items: List[dict] = jsonpath(
response.json(), f'$..ShiDaLiuTongGuDongList[:]')
if not items:
continue
df = pd.DataFrame(items)
df.rename(columns=fields, inplace=True)
df.insert(0, '股票代码', [stock_code for _ in range(len(df))])
df.insert(1, '更新日期', [date for _ in range(len(df))])
del df['IsLink']
dfs.append(df)
if len(dfs) == 0:
return empty_df
return pd.concat(dfs, axis=0, ignore_index=True)
def get_all_report_dates() -> pd.DataFrame:
"""
获取沪深市场的全部股票报告期信息
Returns
-------
DataFrame
沪深市场的全部股票报告期信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_all_report_dates()
报告日期 季报名称
0 2021-06-30 2021年 半年报
1 2021-03-31 2021年 一季报
2 2020-12-31 2020年 年报
3 2020-09-30 2020年 三季报
4 2020-06-30 2020年 半年报
5 2020-03-31 2020年 一季报
6 2019-12-31 2019年 年报
7 2019-09-30 2019年 三季报
8 2019-06-30 2019年 半年报
9 2019-03-31 2019年 一季报
10 2018-12-31 2018年 年报
11 2018-09-30 2018年 三季报
12 2018-06-30 2018年 半年报
13 2018-03-31 2018年 一季报
14 2017-12-31 2017年 年报
15 2017-09-30 2017年 三季报
16 2017-06-30 2017年 半年报
17 2017-03-31 2017年 一季报
18 2016-12-31 2016年 年报
19 2016-09-30 2016年 三季报
20 2016-06-30 2016年 半年报
21 2016-03-31 2016年 一季报
22 2015-12-31 2015年 年报
24 2015-06-30 2015年 半年报
25 2015-03-31 2015年 一季报
26 2014-12-31 2014年 年报
27 2014-09-30 2014年 三季报
28 2014-06-30 2014年 半年报
29 2014-03-31 2014年 一季报
30 2013-12-31 2013年 年报
31 2013-09-30 2013年 三季报
32 2013-06-30 2013年 半年报
33 2013-03-31 2013年 一季报
34 2012-12-31 2012年 年报
35 2012-09-30 2012年 三季报
36 2012-06-30 2012年 半年报
37 2012-03-31 2012年 一季报
38 2011-12-31 2011年 年报
39 2011-09-30 2011年 三季报
"""
fields = {
'REPORT_DATE': '报告日期',
'DATATYPE': '季报名称'
}
params = (
('type', 'RPT_LICO_FN_CPD_BBBQ'),
('sty', ','.join(fields.keys())),
('p', '1'),
('ps', '2000'),
)
url = 'https://datacenter.eastmoney.com/securities/api/data/get'
response = requests.get(
url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
pd.DataFrame(columns=fields.values())
df = pd.DataFrame(items)
df = df.rename(columns=fields)
df['报告日期'] = df['报告日期'].apply(lambda x: x.split()[0])
return df
@to_numeric
def get_all_company_performance(date: str = None) -> pd.DataFrame:
"""
获取沪深市场股票某一季度的表现情况
Parameters
----------
date : str, optional
报告发布日期 部分可选示例如下(默认为 ``None``)
- ``None`` : 最新季报
- ``'2021-06-30'`` : 2021 年 Q2 季度报
- ``'2021-03-31'`` : 2021 年 Q1 季度报
Returns
-------
DataFrame
获取沪深市场股票某一季度的表现情况
Examples
---------
>>> import efinance as ef
>>> # 获取最新季度业绩表现
>>> ef.stock.get_all_company_performance()
股票代码 股票简称 公告日期 营业收入 营业收入同比增长 营业收入季度环比 净利润 净利润同比增长 净利润季度环比 每股收益 每股净资产 净资产收益率 销售毛利率 每股经营现金流量
0 688981 中芯国际 2021-08-28 00:00:00 1.609039e+10 22.253453 20.6593 5.241321e+09 278.100000 307.8042 0.6600 11.949525 5.20 26.665642 1.182556
1 688819 天能股份 2021-08-28 00:00:00 1.625468e+10 9.343279 23.9092 6.719446e+08 -14.890000 -36.8779 0.7100 11.902912 6.15 17.323263 -1.562187
2 688789 宏华数科 2021-08-28 00:00:00 4.555604e+08 56.418441 6.5505 1.076986e+08 49.360000 -7.3013 1.8900 14.926761 13.51 43.011243 1.421272
3 688681 科汇股份 2021-08-28 00:00:00 1.503343e+08 17.706987 121.9407 1.664509e+07 -13.100000 383.3331 0.2100 5.232517 4.84 47.455511 -0.232395
4 688670 金迪克 2021-08-28 00:00:00 3.209423e+07 -63.282413 -93.1788 -2.330505e+07 -242.275001 -240.1554 -0.3500 3.332254 -10.10 85.308531 1.050348
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
3720 600131 国网信通 2021-07-16 00:00:00 2.880378e+09 6.787087 69.5794 2.171389e+08 29.570000 296.2051 0.1800 4.063260 4.57 19.137437 -0.798689
3721 600644 乐山电力 2021-07-15 00:00:00 1.257030e+09 18.079648 5.7300 8.379727e+07 -14.300000 25.0007 0.1556 3.112413 5.13 23.645137 0.200906
3722 002261 拓维信息 2021-07-15 00:00:00 8.901777e+08 47.505282 24.0732 6.071063e+07 68.320000 30.0596 0.0550 2.351598 2.37 37.047968 -0.131873
3723 601952 苏垦农发 2021-07-13 00:00:00 4.544138e+09 11.754570 47.8758 3.288132e+08 1.460000 83.1486 0.2400 3.888046 6.05 15.491684 -0.173772
3724 601568 北元集团 2021-07-09 00:00:00 6.031506e+09 32.543303 30.6352 1.167989e+09 61.050000 40.8165 0.3200 3.541533 9.01 27.879243 0.389860
>>> # 获取指定日期的季度业绩表现
>>> ef.stock.get_all_company_performance('2020-03-31')
股票代码 股票简称 公告日期 营业收入 营业收入同比增长 营业收入季度环比 净利润 净利润同比增长 净利润季度环比 每股收益 每股净资产 净资产收益率 销售毛利率 每股经营现金流量
0 605033 美邦股份 2021-08-25 00:00:00 2.178208e+08 NaN NaN 4.319814e+07 NaN NaN 0.4300 NaN NaN 37.250416 NaN
1 301048 金鹰重工 2021-07-30 00:00:00 9.165528e+07 NaN NaN -2.189989e+07 NaN NaN NaN NaN -1.91 20.227118 NaN
2 001213 中铁特货 2021-07-29 00:00:00 1.343454e+09 NaN NaN -3.753634e+07 NaN NaN -0.0100 NaN NaN -1.400708 NaN
3 605588 冠石科技 2021-07-28 00:00:00 1.960175e+08 NaN NaN 1.906751e+07 NaN NaN 0.3500 NaN NaN 16.324650 NaN
4 688798 艾为电子 2021-07-27 00:00:00 2.469943e+08 NaN NaN 2.707568e+07 NaN NaN 0.3300 NaN 8.16 33.641934 NaN
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
4440 603186 华正新材 2020-04-09 00:00:00 4.117502e+08 -6.844813 -23.2633 1.763252e+07 18.870055 -26.3345 0.1400 5.878423 2.35 18.861255 0.094249
4441 002838 道恩股份 2020-04-09 00:00:00 6.191659e+08 -8.019810 -16.5445 6.939886e+07 91.601624 76.7419 0.1700 2.840665 6.20 22.575224 0.186421
4442 600396 金山股份 2020-04-08 00:00:00 2.023133e+09 0.518504 -3.0629 1.878432e+08 114.304022 61.2733 0.1275 1.511012 8.81 21.422393 0.085698
4443 002913 奥士康 2020-04-08 00:00:00 4.898977e+08 -3.883035 -23.2268 2.524717e+07 -47.239162 -58.8136 0.1700 16.666749 1.03 22.470020 0.552624
4444 002007 华兰生物 2020-04-08 00:00:00 6.775414e+08 -2.622289 -36.1714 2.472864e+08 -4.708821 -22.6345 0.1354 4.842456 3.71 61.408522 0.068341
Notes
-----
当输入的日期不正确时,会输出可选的日期列表。
你也可以通过函数 ``efinance.stock.get_all_report_dates`` 来获取可选日期
"""
# TODO 加速
fields = {
'SECURITY_CODE': '股票代码',
'SECURITY_NAME_ABBR': '股票简称',
'NOTICE_DATE': '公告日期',
'TOTAL_OPERATE_INCOME': '营业收入',
'YSTZ': '营业收入同比增长',
'YSHZ': '营业收入季度环比',
'PARENT_NETPROFIT': '净利润',
'SJLTZ': '净利润同比增长',
'SJLHZ': '净利润季度环比',
'BASIC_EPS': '每股收益',
'BPS': '每股净资产',
'WEIGHTAVG_ROE': '净资产收益率',
'XSMLL': '销售毛利率',
'MGJYXJJE': '每股经营现金流量'
# 'ISNEW':'是否最新'
}
dates = get_all_report_dates()['报告日期'].to_list()
if date is None:
date = dates[0]
if date not in dates:
rich.print('日期输入有误,可选日期如下:')
rich.print(dates)
return pd.DataFrame(columns=fields.values())
date = f"(REPORTDATE=\'{date}\')"
page = 1
dfs: List[pd.DataFrame] = []
while 1:
params = (
('st', 'NOTICE_DATE,SECURITY_CODE'),
('sr', '-1,-1'),
('ps', '500'),
('p', f'{page}'),
('type', 'RPT_LICO_FN_CPD'),
('sty', 'ALL'),
('token', '<KEY>'),
# ! 只选沪深A股
('filter',
f'(SECURITY_TYPE_CODE in ("058001001","058001008")){date}'),
)
url = 'http://datacenter-web.eastmoney.com/api/data/get'
response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
break
df = pd.DataFrame(items)
dfs.append(df)
page += 1
if len(dfs) == 0:
df = pd.DataFrame(columns=fields.values())
return df
df = pd.concat(dfs, axis=0, ignore_index=True)
df = df.rename(columns=fields)[fields.values()]
return df
@to_numeric
def get_latest_holder_number(date: str = None) -> pd.DataFrame:
"""
获取沪深A股市场最新公开的股东数目变化情况 也可获取指定报告期的股东数目变化情况
Parameters
----------
date : str, optional
报告期日期 部分可选示例如下
- ``None`` 最新的报告期
- ``'2021-06-30'`` 2021年中报
- ``'2021-03-31'`` 2021年一季报
Returns
-------
DataFrame
沪深A股市场最新公开的或指定报告期的股东数目变化情况
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_latest_holder_number()
股票代码 股票名称 股东人数 股东人数增减 较上期变化百分比 股东户数统计截止日 户均持股市值 户均持股数量 总市值 总股本 公告日期
0 301029 怡合达 12021 -1.636527 -200.0 2021-09-30 00:00:00 2.790187e+06 33275.933783 3.354084e+10 400010000 2021-10-09 00:00:00
1 301006 迈拓股份 10964 -0.463005 -51.0 2021-09-30 00:00:00 3.493433e+05 12703.392922 3.830200e+09 139280000 2021-10-09 00:00:00
2 301003 江苏博云 11642 -2.658863 -318.0 2021-09-30 00:00:00 2.613041e+05 5004.867463 3.042103e+09 58266667 2021-10-09 00:00:00
3 300851 交大思诺 12858 -2.752987 -364.0 2021-09-30 00:00:00 2.177054e+05 6761.035931 2.799255e+09 86933400 2021-10-09 00:00:00
4 300830 金现代 34535 -16.670688 -6909.0 2021-09-30 00:00:00 2.001479e+05 12454.756045 6.912109e+09 430125000 2021-10-09 00:00:00
... ... ... ... ... ... ... ... ... ... ... ...
4435 600618 氯碱化工 45372 -0.756814 -346.0 2014-06-30 00:00:00 1.227918e+05 16526.491581 5.571311e+09 749839976 2014-08-22 00:00:00
4436 601880 辽港股份 89923 -3.589540 -3348.0 2014-03-31 00:00:00 9.051553e+04 37403.111551 8.139428e+09 3363400000 2014-04-30 00:00:00
4437 600685 中船防务 52296 -4.807325 -2641.0 2014-03-11 00:00:00 1.315491e+05 8384.263691 6.879492e+09 438463454 2014-03-18 00:00:00
4438 000017 深中华A 21358 -10.800200 -2586.0 2013-06-30 00:00:00 5.943993e+04 14186.140556 1.269518e+09 302987590 2013-08-24 00:00:00
4439 601992 金隅集团 66736 -12.690355 -9700.0 2013-06-30 00:00:00 2.333339e+05 46666.785918 1.557177e+10 3114354625 2013-08-22 00:00:00
>>> ef.stock.get_latest_holder_number(date='2021-06-30')
股票代码 股票名称 股东人数 股东人数增减 较上期变化百分比 股东户数统计截止日 户均持股市值 户均持股数量 总市值 总股本 公告日期
0 688768 容知日新 24 0.000000 0.0 2021-06-30 00:00:00 NaN 1.714395e+06 NaN 41145491 2021-08-31 00:00:00
1 688669 聚石化学 8355 -11.135929 -1047.0 2021-06-30 00:00:00 3.662956e+05 1.117096e+04 3.060400e+09 93333334 2021-08-31 00:00:00
2 688613 奥精医疗 8768 -71.573999 -22077.0 2021-06-30 00:00:00 1.380627e+06 1.520681e+04 1.210533e+10 133333334 2021-08-31 00:00:00
3 688586 江航装备 20436 -5.642257 -1222.0 2021-06-30 00:00:00 5.508121e+05 1.975653e+04 1.125640e+10 403744467 2021-08-31 00:00:00
4 688559 海目星 7491 -16.460355 -1476.0 2021-06-30 00:00:00 8.071019e+05 2.669871e+04 6.046000e+09 200000000 2021-08-31 00:00:00
... ... ... ... ... ... ... ... ... ... ... ...
4292 002261 拓维信息 144793 0.931290 1336.0 2021-06-30 00:00:00 7.731589e+04 7.602349e+03 1.119480e+10 1100766874 2021-07-15 00:00:00
4293 002471 中超控股 75592 1.026409 768.0 2021-06-30 00:00:00 4.864536e+04 1.677426e+04 3.677200e+09 1268000000 2021-07-12 00:00:00
4294 600093 *ST易见 52497 -2.118099 -1136.0 2021-06-30 00:00:00 1.267904e+05 2.138117e+04 6.656114e+09 1122447500 2021-07-06 00:00:00
4295 688091 上海谊众 25 0.000000 0.0 2021-06-30 00:00:00 NaN 3.174000e+06 NaN 79350000 2021-07-02 00:00:00
4296 301053 远信工业 10 0.000000 0.0 2021-06-30 00:00:00 NaN 6.131250e+06 NaN 61312500 2021-06-30 00:00:00
"""
dfs: List[pd.DataFrame] = []
if date is not None:
date: datetime = datetime.strptime(date, '%Y-%m-%d')
year = date.year
month = date.month
if month % 3 != 0:
month -= month % 3
# TODO 优化处理月份正确但日期不为月份最后一天的逻辑
if month < 3:
year -= 1
# NOTE 对应上一年最后一个月
month = 12
_, last_day = calendar.monthrange(year, month)
date: str = datetime.strptime(
f'{year}-{month}-{last_day}', '%Y-%m-%d').strftime('%Y-%m-%d')
page = 1
fields = {
'SECURITY_CODE': '股票代码',
'SECURITY_NAME_ABBR': '股票名称',
'HOLDER_NUM': '股东人数',
'HOLDER_NUM_RATIO': '股东人数增减',
'HOLDER_NUM_CHANGE': '较上期变化百分比',
'END_DATE': '股东户数统计截止日',
'AVG_MARKET_CAP': '户均持股市值',
'AVG_HOLD_NUM': '户均持股数量',
'TOTAL_MARKET_CAP': '总市值',
'TOTAL_A_SHARES': '总股本',
'HOLD_NOTICE_DATE': '公告日期'
}
while 1:
params = [
('sortColumns', 'HOLD_NOTICE_DATE,SECURITY_CODE'),
('sortTypes', '-1,-1'),
('pageSize', '500'),
('pageNumber', page),
('columns', 'SECURITY_CODE,SECURITY_NAME_ABBR,END_DATE,INTERVAL_CHRATE,AVG_MARKET_CAP,AVG_HOLD_NUM,TOTAL_MARKET_CAP,TOTAL_A_SHARES,HOLD_NOTICE_DATE,HOLDER_NUM,PRE_HOLDER_NUM,HOLDER_NUM_CHANGE,HOLDER_NUM_RATIO,END_DATE,PRE_END_DATE'),
('quoteColumns', 'f2,f3'),
('source', 'WEB'),
('client', 'WEB'),
]
if date is not None:
# NOTE 注意不能漏 \'
params.append(('filter', f'(END_DATE=\'{date}\')'))
params.append(('reportName', 'RPT_HOLDERNUM_DET'))
else:
params.append(('reportName', 'RPT_HOLDERNUMLATEST'))
params = tuple(params)
url = 'http://datacenter-web.eastmoney.com/api/data/v1/get'
response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
break
df = pd.DataFrame(items)
df = df.rename(columns=fields)[fields.values()]
page += 1
dfs.append(df)
if len(dfs) == 0:
df = pd.DataFrame(columns=fields.values())
return df
df = pd.concat(dfs, ignore_index=True)
return df
@to_numeric
@retry(tries=3)
def get_daily_billboard(start_date: str = None,
end_date: str = None) -> pd.DataFrame:
"""
获取指定日期区间的龙虎榜详情数据
Parameters
----------
start_date : str, optional
开始日期
部分可选示例如下
- ``None`` 最新一个榜单公开日(默认值)
- ``"2021-08-27"`` 2021年8月27日
end_date : str, optional
结束日期
部分可选示例如下
- ``None`` 最新一个榜单公开日(默认值)
- ``"2021-08-31"`` 2021年8月31日
Returns
-------
DataFrame
龙虎榜详情数据
Examples
--------
>>> import efinance as ef
>>> # 获取最新一个公开的龙虎榜数据(后面还有获取指定日期区间的示例代码)
>>> ef.stock.get_daily_billboard()
股票代码 股票名称 上榜日期 解读 收盘价 涨跌幅 换手率 龙虎榜净买额 龙虎榜买入额 龙虎榜卖出额 龙虎榜成交额 市场总成交额 净买额占总成交比 成交额占总成交比 流通市值 上榜原因
0 000608 阳光股份 2021-08-27 卖一主卖,成功率48.36% 3.73 -9.9034 3.8430 -8.709942e+06 1.422786e+07 2.293780e+07 3.716565e+07 110838793 -7.858208 33.531268 2.796761e+09 日跌幅偏离值达到7%的前5只证券
1 000751 锌业股份 2021-08-27 主力做T,成功率18.84% 5.32 -2.9197 19.6505 -1.079219e+08 5.638899e+07 1.643109e+08 2.206999e+08 1462953973 -7.376984 15.085906 7.500502e+09 日振幅值达到15%的前5只证券
2 000762 西藏矿业 2021-08-27 北京资金买入,成功率39.42% 63.99 1.0741 15.6463 2.938758e+07 4.675541e+08 4.381665e+08 9.057206e+08 4959962598 0.592496 18.260633 3.332571e+10 日振幅值达到15%的前5只证券
3 000833 粤桂股份 2021-08-27 实力游资买入,成功率44.55% 8.87 10.0496 8.8263 4.993555e+07 1.292967e+08 7.936120e+07 2.086580e+08 895910429 5.573721 23.290046 3.353614e+09 连续三个交易日内,涨幅偏离值累计达到20%的证券
4 001208 华菱线缆 2021-08-27 1家机构买入,成功率40.43% 19.72 4.3386 46.1985 4.055258e+07 1.537821e+08 1.132295e+08 2.670117e+08 1203913048 3.368398 22.178651 2.634710e+09 日换手率达到20%的前5只证券
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
70 688558 国盛智科 2021-08-27 买一主买,成功率38.71% 60.72 1.6064 34.0104 1.835494e+07 1.057779e+08 8.742293e+07 1.932008e+08 802569300 2.287023 24.072789 2.321743e+09 有价格涨跌幅限制的日换手率达到30%的前五只证券
71 688596 正帆科技 2021-08-27 1家机构买入,成功率57.67% 26.72 3.1660 3.9065 -1.371039e+07 8.409046e+07 9.780085e+07 1.818913e+08 745137400 -1.839982 24.410438 4.630550e+09 有价格涨跌幅限制的连续3个交易日内收盘价格涨幅偏离值累计达到30%的证券
72 688663 新风光 2021-08-27 卖一主卖,成功率37.18% 28.17 -17.6316 32.2409 1.036460e+07 5.416901e+07 4.380440e+07 9.797341e+07 274732700 3.772613 35.661358 8.492507e+08 有价格涨跌幅限制的日收盘价格跌幅达到15%的前五只证券
73 688663 新风光 2021-08-27 卖一主卖,成功率37.18% 28.17 -17.6316 32.2409 1.036460e+07 5.416901e+07 4.380440e+07 9.797341e+07 274732700 3.772613 35.661358 8.492507e+08 有价格涨跌幅限制的日换手率达到30%的前五只证券
74 688667 菱电电控 2021-08-27 1家机构卖出,成功率49.69% 123.37 -18.8996 17.7701 -2.079877e+06 4.611216e+07 4.819204e+07 9.430420e+07 268503400 -0.774618 35.122163 1.461225e+09 有价格涨跌幅限制的日收盘价格跌幅达到15%的前五只证券
>>> # 获取指定日期区间的龙虎榜数据
>>> start_date = '2021-08-20' # 开始日期
>>> end_date = '2021-08-27' # 结束日期
>>> ef.stock.get_daily_billboard(start_date = start_date,end_date = end_date)
股票代码 股票名称 上榜日期 解读 收盘价 涨跌幅 换手率 龙虎榜净买额 龙虎榜买入额 龙虎榜卖出额 龙虎榜成交额 市场总成交额 净买额占总成交比 成交额占总成交比 流通市值 上榜原因
0 000608 阳光股份 2021-08-27 卖一主卖,成功率48.36% 3.73 -9.9034 3.8430 -8.709942e+06 1.422786e+07 2.293780e+07 3.716565e+07 110838793 -7.858208 33.531268 2.796761e+09 日跌幅偏离值达到7%的前5只证券
1 000751 锌业股份 2021-08-27 主力做T,成功率18.84% 5.32 -2.9197 19.6505 -1.079219e+08 5.638899e+07 1.643109e+08 2.206999e+08 1462953973 -7.376984 15.085906 7.500502e+09 日振幅值达到15%的前5只证券
2 000762 西藏矿业 2021-08-27 北京资金买入,成功率39.42% 63.99 1.0741 15.6463 2.938758e+07 4.675541e+08 4.381665e+08 9.057206e+08 4959962598 0.592496 18.260633 3.332571e+10 日振幅值达到15%的前5只证券
3 000833 粤桂股份 2021-08-27 实力游资买入,成功率44.55% 8.87 10.0496 8.8263 4.993555e+07 1.292967e+08 7.936120e+07 2.086580e+08 895910429 5.573721 23.290046 3.353614e+09 连续三个交易日内,涨幅偏离值累计达到20%的证券
4 001208 华菱线缆 2021-08-27 1家机构买入,成功率40.43% 19.72 4.3386 46.1985 4.055258e+07 1.537821e+08 1.132295e+08 2.670117e+08 1203913048 3.368398 22.178651 2.634710e+09 日换手率达到20%的前5只证券
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
414 605580 恒盛能源 2021-08-20 买一主买,成功率33.33% 13.28 10.0249 0.4086 2.413149e+06 2.713051e+06 2.999022e+05 3.012953e+06 2713051 88.945937 111.054054 6.640000e+08 有价格涨跌幅限制的日收盘价格涨幅偏离值达到7%的前三只证券
415 688029 南微医学 2021-08-20 4家机构卖出,成功率55.82% 204.61 -18.5340 8.1809 -1.412053e+08 1.883342e+08 3.295394e+08 5.178736e+08 762045800 -18.529760 67.958326 9.001510e+09 有价格涨跌幅限制的日收盘价格跌幅达到15%的前五只证券
416 688408 中信博 2021-08-20 4家机构卖出,成功率47.86% 179.98 -0.0666 15.3723 -4.336304e+07 3.750919e+08 4.184550e+08 7.935469e+08 846547400 -5.122340 93.739221 5.695886e+09 有价格涨跌幅限制的日价格振幅达到30%的前五只证券
417 688556 高测股份 2021-08-20 上海资金买入,成功率60.21% 51.97 17.0495 10.6452 -3.940045e+07 1.642095e+08 2.036099e+08 3.678194e+08 575411600 -6.847351 63.922831 5.739089e+09 有价格涨跌幅限制的日收盘价格涨幅达到15%的前五只证券
418 688636 智明达 2021-08-20 2家机构买入,成功率47.37% 161.90 15.8332 11.9578 2.922406e+07 6.598126e+07 3.675721e+07 1.027385e+08 188330100 15.517464 54.552336 1.647410e+09 有价格涨跌幅限制的日收盘价格涨幅达到15%的前五只证券
"""
today = datetime.today().date()
mode = 'auto'
if start_date is None:
start_date = today
if end_date is None:
end_date = today
if isinstance(start_date, str):
mode = 'user'
start_date = datetime.strptime(start_date, '%Y-%m-%d')
if isinstance(end_date, str):
mode = 'user'
end_date = datetime.strptime(end_date, '%Y-%m-%d')
fields = EASTMONEY_STOCK_DAILY_BILL_BOARD_FIELDS
bar: tqdm = None
while 1:
dfs: List[pd.DataFrame] = []
page = 1
while 1:
params = (
('sortColumns', 'TRADE_DATE,SECURITY_CODE'),
('sortTypes', '-1,1'),
('pageSize', '500'),
('pageNumber', page),
('reportName', 'RPT_DAILYBILLBOARD_DETAILS'),
('columns', 'ALL'),
('source', 'WEB'),
('client', 'WEB'),
('filter',
f"(TRADE_DATE<='{end_date}')(TRADE_DATE>='{start_date}')"),
)
url = 'http://datacenter-web.eastmoney.com/api/data/v1/get'
response = session.get(url, params=params)
if bar is None:
pages = jsonpath(response.json(), '$..pages')
if pages and pages[0] != 1:
total = pages[0]
bar = tqdm(total=int(total))
if bar is not None:
bar.update()
items = jsonpath(response.json(), '$..data[:]')
if not items:
break
page += 1
df = pd.DataFrame(items).rename(columns=fields)[fields.values()]
dfs.append(df)
if mode == 'user':
break
if len(dfs) == 0:
start_date = start_date-timedelta(1)
end_date = end_date-timedelta(1)
if len(dfs) > 0:
break
if len(dfs) == 0:
df = pd.DataFrame(columns=fields.values())
return df
df = pd.concat(dfs, ignore_index=True)
df['上榜日期'] = df['上榜日期'].astype('str').apply(lambda x: x.split(' ')[0])
return df
def get_members(index_code: str) -> pd.DataFrame:
"""
获取指数成分股信息
Parameters
----------
index_code : str
指数名称或者指数代码
Returns
-------
DataFrame
指数成分股信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_members('000300')
指数代码 指数名称 股票代码 股票名称 股票权重
0 000300 沪深300 600519 贵州茅台 4.77
1 000300 沪深300 601398 工商银行 3.46
2 000300 沪深300 601939 建设银行 3.12
3 000300 沪深300 600036 招商银行 2.65
4 000300 沪深300 601857 中国石油 2.37
.. ... ... ... ... ...
295 000300 沪深300 688126 沪硅产业 NaN
296 000300 沪深300 688169 石头科技 NaN
297 000300 沪深300 688036 传音控股 NaN
298 000300 沪深300 688009 中国通号 NaN
299 000300 沪深300 688008 澜起科技 NaN
>>> ef.stock.get_members('中证白酒')
指数代码 指数名称 股票代码 股票名称 股票权重
0 399997 中证白酒 600519 贵州茅台 49.25
1 399997 中证白酒 000858 五粮液 18.88
2 399997 中证白酒 600809 山西汾酒 8.45
3 399997 中证白酒 000568 泸州老窖 7.03
4 399997 中证白酒 002304 洋河股份 5.72
5 399997 中证白酒 000596 古井贡酒 2.76
6 399997 中证白酒 000799 酒鬼酒 1.77
7 399997 中证白酒 600779 水井坊 1.36
8 399997 中证白酒 603369 今世缘 1.26
9 399997 中证白酒 603198 迎驾贡酒 0.89
10 399997 中证白酒 603589 口子窖 0.67
11 399997 中证白酒 000860 顺鑫农业 0.59
12 399997 中证白酒 600559 老白干酒 0.44
13 399997 中证白酒 603919 金徽酒 0.39
14 399997 中证白酒 600197 伊力特 0.28
15 399997 中证白酒 600199 金种子酒 0.26
"""
fields = {
'IndexCode': '指数代码',
'IndexName': '指数名称',
'StockCode': '股票代码',
'StockName': '股票名称',
'MARKETCAPPCT': '股票权重'
}
qs = search_quote(index_code, count=10)
df = pd.DataFrame(columns=fields.values())
if not qs:
return df
for q in qs:
if q.security_typeName == '指数':
params = (
('IndexCode', f'{q.code}'),
('pageIndex', '1'),
('pageSize', '10000'),
('deviceid', '1234567890'),
('version', '6.9.9'),
('product', 'EFund'),
('plat', 'Iphone'),
('ServerVersion', '6.9.9'),
)
url = 'https://fundztapi.eastmoney.com/FundSpecialApiNew/FundSpecialZSB30ZSCFG'
json_response = requests.get(
url,
params=params,
headers=EASTMONEY_REQUEST_HEADERS).json()
items = json_response['Datas']
# NOTE 这是为了跳过排在前面但无法获取成分股的指数 例如搜索 白酒 时排在前面的 980031
if not items:
continue
df: pd.DataFrame = pd.DataFrame(items).rename(
columns=fields)[fields.values()]
df['股票权重'] = pd.to_nu | meric(df['股票权重'], errors='coerce') | pandas.to_numeric |
# Generate content tables
# Run from the root of the repo:
# python3 vanda_jobs/scripts/content-table-generations.py -i objects -j ./GITIGNORE_DATA/elastic_export/objects/custom -g -o ./GITIGNORE_DATA/hc_import/content
# python3 vanda_jobs/scripts/content-table-generations.py -i persons -j ./GITIGNORE_DATA/elastic_export/persons/all -b -o ./GITIGNORE_DATA/hc_import/content
# python3 vanda_jobs/scripts/content-table-generations.py -i organisations -j ./GITIGNORE_DATA/elastic_export/organisations/all -b -o ./GITIGNORE_DATA/hc_import/content
# python3 vanda_jobs/scripts/content-table-generations.py -i events -j ./GITIGNORE_DATA/elastic_export/events/all -g -o ./GITIGNORE_DATA/hc_import/content
# Generate join tables
# Run from the root of repo:
# python3 vanda_jobs/scripts/join-table-generations.py -j ./GITIGNORE_DATA/elastic_export/objects/custom -g -o ./GITIGNORE_DATA/hc_import/join
import sys
sys.path.append("..")
import pandas as pd
import random
import re
from heritageconnector.config import config, field_mapping
from heritageconnector import datastore, datastore_helpers
from heritageconnector.namespace import (
XSD,
FOAF,
OWL,
RDF,
PROV,
SDO,
SKOS,
WD,
WDT,
)
from heritageconnector.utils.data_transformation import get_year_from_date_value
from heritageconnector.utils.generic import get_timestamp
from heritageconnector import logging
logger = logging.get_logger(__name__)
# disable pandas SettingWithCopyWarning
pd.options.mode.chained_assignment = None
# optional limit of number of records to import to test loader. no limit -> None
# passed as an argument into `pd.read_csv`. You might want to use your own implementation
# depending on your source data format
max_records = None
MAX_NO_WORDS_PER_DESCRIPTION = 500
# create instance of RecordLoader from datastore
record_loader = datastore.RecordLoader(
collection_name="vanda", field_mapping=field_mapping
)
# ======================================================
## Content Table Loading
def trim_description(desc: str, n_words: int) -> str:
"""Return the first `n_words` words of description `desc`/"""
return " ".join(str(desc).split(" ")[0:n_words])
def reverse_person_preferred_name_and_strip_brackets(name: str) -> str:
name_stripped = re.sub(r"\([^()]*\)", "", name)
if not pd.isnull(name_stripped) and len(name_stripped.split(",")) == 2:
return f"{name_stripped.split(',')[1].strip()} {name_stripped.split(',')[0].strip()}"
else:
return name_stripped
def create_object_disambiguating_description(
row: pd.Series, description_field: str = "COMBINED_DESCRIPTION"
) -> str:
"""
Original description col = `description_field` (function argument).
Components:
- OBJECT_TYPE -> 'Pocket watch.'
- PLACE_MADE + DATE_MADE -> 'Made in London, 1940.'
- DESCRIPTION (original description)
NOTE: must be used before dates are converted to numbers using `get_year_from_date_string`, so that
uncertainty such as 'about 1971' is added to the description.
"""
# OBJECT_TYPE
# Here we also check that the name without 's' or 'es' is not already in the description,
# which should cover the majority of plurals.
if (
(str(row.OBJECT_TYPE) != "nan")
and (str(row.OBJECT_TYPE).lower() not in row[description_field].lower())
and (
str(row.OBJECT_TYPE).rstrip("s").lower()
not in row[description_field].lower()
)
and (
str(row.OBJECT_TYPE).rstrip("es").lower()
not in row[description_field].lower()
)
):
object_type = f"{row.OBJECT_TYPE.capitalize().strip()}."
else:
object_type = ""
# PRIMARY_PLACE + PRIMARY_DATE
add_place_made = (
(row["PRIMARY_PLACE"])
and (str(row["PRIMARY_PLACE"]) != "nan")
and (str(row["PRIMARY_PLACE"]).lower() not in row[description_field].lower())
)
add_date_made = (
(row["PRIMARY_DATE"])
and (str(row["PRIMARY_DATE"]) != "nan")
and (str(row["PRIMARY_DATE"]))
and (str(row["PRIMARY_DATE"]) not in row[description_field].lower())
)
# Also check for dates minus suffixes, e.g. 200-250 should match with 200-250 AD and vice-versa
if re.findall(r"\d+-?\d*", str(row["PRIMARY_DATE"])):
add_date_made = add_date_made and (
re.findall(r"\d+-?\d*", row["PRIMARY_DATE"])[0].lower()
not in row[description_field].lower()
)
if add_place_made and add_date_made:
made_str = f"Made in {row.PRIMARY_PLACE.strip()}, {row.PRIMARY_DATE}."
elif add_place_made:
made_str = f"Made in {row.PRIMARY_PLACE.strip()}."
elif add_date_made:
made_str = f"Made {row.PRIMARY_DATE}."
else:
made_str = ""
# add space and full stop (if needed) to end of description
if row[description_field].strip():
description = (
row[description_field].strip()
if row[description_field].strip()[-1] == "."
else f"{row[description_field].strip()}."
)
else:
description = ""
# we shuffle the components of the description so any model using them does not learn the order that we put them in
aug_description_components = [object_type, description, made_str]
random.shuffle(aug_description_components)
return (" ".join(aug_description_components)).strip()
def load_object_data(data_path):
"""Load data from ndjson files """
table_name = "OBJECT"
object_df = pd.read_json(data_path, lines=True, nrows=max_records)
# PRE_PROCESS
object_df[["DESCRIPTION", "PHYS_DESCRIPTION", "PRODUCTION_TYPE"]] = object_df[
["DESCRIPTION", "PHYS_DESCRIPTION", "PRODUCTION_TYPE"]
].fillna("")
# remove newlines and tab chars
object_df.loc[
:, ["DESCRIPTION", "PHYS_DESCRIPTION", "PRODUCTION_TYPE"]
] = object_df.loc[
:, ["DESCRIPTION", "PHYS_DESCRIPTION", "PRODUCTION_TYPE"]
].applymap(
datastore_helpers.process_text
)
# create combined text fields
newline = " \n " # can't insert into fstring below
object_df.loc[:, "COMBINED_DESCRIPTION"] = object_df[
["DESCRIPTION", "PHYS_DESCRIPTION", "PRODUCTION_TYPE"]
].apply(lambda x: f"{newline.join(x)}" if any(x) else "", axis=1)
object_df["COMBINED_DESCRIPTION"] = object_df["COMBINED_DESCRIPTION"].apply(
lambda x: trim_description(x, MAX_NO_WORDS_PER_DESCRIPTION)
)
object_df["DISAMBIGUATING_DESCRIPTION"] = object_df.apply(
create_object_disambiguating_description, axis=1
)
# convert date created to year
object_df["PRIMARY_DATE"] = object_df["PRIMARY_DATE"].apply(
get_year_from_date_value
)
logger.info("loading object data")
record_loader.add_records(table_name, object_df, add_type=WD.Q488383)
return
def create_people_disambiguating_description(row: pd.Series) -> str:
"""
Original description col = BIOGRAPHY.
Components:
- NATIONALITY -> 'American photographer.'
- BIRTH_DATE + BIRTH_PLACE -> 'Born 1962, United Kingdom.'
- DEATH_DATE + DEATH_PLACE + CAUSE_OF_DEATH -> 'Died 1996 of heart attack.' (Add place if no overlap between
BIRTH_PLACE and DEATH_PLACE strings. Joined to founded string above)
- BIOGRAPHY (original description)
NOTE: must be used before dates are converted to numbers using `get_year_from_date_string`, so that
uncertainty such as 'about 1971' is added to the description.
"""
# NATIONALITY + OCCUPATION (only uses first of each)
nationality = str(row["NATIONALITY"])
add_nationality = (nationality != "nan") and (
nationality.lower() not in row.BIOGRAPHY.lower()
)
if add_nationality:
nationality_occupation_str = f"{nationality.strip().title()}."
else:
nationality_occupation_str = ""
# BIRTH_PLACE + BIRTH_DATE
add_birth_place = (str(row["BIRTHPLACE"]) != "nan") and (
str(row["BIRTHPLACE"]).lower() not in row.BIOGRAPHY.lower()
)
add_birth_date = (str(row["BIRTHDATE_EARLIEST"]) != "nan") and (
str(row["BIRTHDATE_EARLIEST"]).lower() not in row.BIOGRAPHY.lower()
)
# Also check for dates minus suffixes, e.g. 200-250 should match with 200-250 AD and vice-versa
if re.findall(r"\d+-?\d*", str(row["BIRTHDATE_EARLIEST"])):
add_birth_date = add_birth_date and (
re.findall(r"\d+-?\d*", row["BIRTHDATE_EARLIEST"])[0].lower()
not in row.BIOGRAPHY.lower()
)
if add_birth_place and add_birth_date:
founded_str = (
f"Born in {row.BIRTHPLACE.strip()}, {row.BIRTHDATE_EARLIEST.strip()}."
)
elif add_birth_place:
founded_str = f"Born in {row.BIRTHPLACE.strip()}."
elif add_birth_date:
founded_str = f"Born {row.BIRTHDATE_EARLIEST.strip()}."
else:
founded_str = ""
# DEATH_PLACE + DEATH_DATE
add_death_place = (
row["DEATHPLACE"]
and (str(row["DEATHPLACE"]) != "nan")
and (str(row["DEATHPLACE"]).lower() not in row.BIOGRAPHY.lower())
and (str(row["DEATHPLACE"]) not in str(row["BIRTHPLACE"]))
and (str(row["BIRTHPLACE"]) not in str(row["DEATHPLACE"]))
)
if add_death_place:
dissolved_str = f"Died in {row.DEATHPLACE.strip()}."
else:
dissolved_str = ""
# Assemble
dates_str = " ".join([founded_str, dissolved_str]).strip()
# add space and full stop (if needed) to end of description
if row.BIOGRAPHY and str(row.BIOGRAPHY) != "nan":
description = (
row.BIOGRAPHY.strip()
if row.BIOGRAPHY.strip()[-1] == "."
else f"{row.BIOGRAPHY.strip()}."
)
else:
description = ""
# we shuffle the components of the description so any model using them does not learn the order that we put them in
aug_description_components = [nationality_occupation_str, description, dates_str]
random.shuffle(aug_description_components)
return (" ".join(aug_description_components)).strip()
def load_person_data(data_path):
"""Load data from ndjson files """
table_name = "PERSON"
person_df = pd.read_json(data_path, lines=True, nrows=max_records)
person_df["BIOGRAPHY"] = person_df["BIOGRAPHY"].apply(
lambda x: trim_description(x, MAX_NO_WORDS_PER_DESCRIPTION)
)
# convert birthdate to year
person_df["BIRTHDATE_EARLIEST"] = (
person_df["BIRTHDATE_EARLIEST"]
.apply(lambda x: x[0:4] if x is not None else x)
.fillna("")
)
person_df["DISAMBIGUATING_DESCRIPTION"] = person_df.apply(
create_people_disambiguating_description, axis=1
)
logger.info("loading person data")
record_loader.add_records(table_name, person_df, add_type=WD.Q5)
return
def create_org_disambiguating_description(row: pd.Series) -> str:
"""
Original description col = BIOGRAPHY.
Components:
- NATIONALITY + OCCUPATION -> 'British Railway Board'
- BIRTH_DATE + BIRTH_PLACE -> 'Founded 1962, United Kingdom'
- DEATH_DATE + DEATH_PLACE -> 'Dissolved 1996.' (Add place if no overlap between
BIRTH_PLACE and DEATH_PLACE strings. Joined to founded string above)
- BIOGRAPHY (original description)
NOTE: must be used before dates are converted to numbers using `get_year_from_date_string`, so that
uncertainty such as 'about 1971' is added to the description.
"""
founded_place_col = "FOUNDATION_PLACE_NAME"
founded_date_col = "FOUNDATION_DATE_EARLIEST"
description_col = "HISTORY"
# BIRTH_PLACE + BIRTH_DATE
add_birth_place = (str(row[founded_place_col]) != "nan") and (
str(row[founded_place_col]).lower() not in row[description_col].lower()
)
add_birth_date = (str(row[founded_date_col]) != "nan") and (
str(row[founded_date_col]).lower() not in row[description_col].lower()
)
# Also check for dates minus suffixes, e.g. 200-250 should match with 200-250 AD and vice-versa
if re.findall(r"\d+-?\d*", str(row[founded_date_col])):
add_birth_date = add_birth_date and (
re.findall(r"\d+-?\d*", row[founded_date_col])[0].lower()
not in row[description_col].lower()
)
if add_birth_place and add_birth_date:
founded_str = f"Founded in {row[founded_place_col].strip()}, {row[founded_date_col].strip()}."
elif add_birth_place:
founded_str = f"Founded in {row[founded_place_col].strip()}."
elif add_birth_date:
founded_str = f"Founded {row[founded_date_col].strip()}."
else:
founded_str = ""
# Assemble
dates_str = founded_str.strip()
# add space and full stop (if needed) to end of description
if row[description_col] and str(row[description_col]) != "nan":
description = (
row[description_col].strip()
if row[description_col].strip()[-1] == "."
else f"{row[description_col].strip()}."
)
else:
description = ""
# we shuffle the components of the description so any model using them does not learn the order that we put them in
aug_description_components = [description, dates_str]
random.shuffle(aug_description_components)
return (" ".join(aug_description_components)).strip()
def load_org_data(data_path):
"""Load data from ndjson files """
table_name = "ORGANISATION"
org_df = pd.read_json(data_path, lines=True, nrows=max_records)
org_df["HISTORY"] = org_df["HISTORY"].apply(
lambda x: trim_description(x, MAX_NO_WORDS_PER_DESCRIPTION)
)
# convert founding date to year
org_df["FOUNDATION_DATE_EARLIEST"] = (
org_df["FOUNDATION_DATE_EARLIEST"]
.apply(lambda x: x[0:4] if x is not None else x)
.fillna("")
)
org_df["DISAMBIGUATING_DESCRIPTION"] = org_df.apply(
create_org_disambiguating_description, axis=1
)
logger.info("loading org data")
record_loader.add_records(table_name, org_df, add_type=WD.Q43229)
return
def load_event_data(data_path):
"""Load data from ndjson files """
table_name = "EVENT"
event_df = | pd.read_json(data_path, lines=True, nrows=max_records) | pandas.read_json |
import os
import pandas as pd
import json
import cv2
def CSV_300W_LP(data_dir):
folders = [folder for folder in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, folder))]
images = []
for idx, folder in enumerate(folders):
folder_path = os.path.join(data_dir, folder)
folder_images = [image[:-4] for image in os.listdir(folder_path) if '.jpg' in image]
for image in folder_images:
image_path = os.path.join(folder, image)
images.append(image_path)
df = pd.DataFrame(images)
df.to_csv(os.path.join(data_dir, '300W_LP.txt'), header=False, index=False)
def CSV_custom(data_dir, data_type, output_dir, padding_perc = 0.0):
images_dir = os.path.join(data_dir,'images', data_type)
annotations = os.path.join(data_dir, 'annotations', '%s_400_rigi_essemble.json' %data_type)
with open(annotations, 'r') as f:
annon_dict = json.loads(f.read())
# Initializes variables
avail_imgs = annon_dict.keys()
x1_list = []
x2_list = []
y1_list = []
y2_list = []
roll_list = []
pitch_list = []
yaw_list = []
image_paths = []
# Gets path for all images
images = [os.path.join(images_dir, image) for image in os.listdir(images_dir) if 'jpg' or 'png' in image]
for image in images:
# read image (to determine size later)
img = cv2.imread(image)
# gets images Id
img_id = os.path.basename(image)[:-4].lstrip('0')
# ensures the image is in the dictionary key
if not img_id in avail_imgs:
continue
for idx, annon in enumerate(annon_dict[img_id].keys()):
# ensures we have a face detected
if not annon_dict[img_id][annon]['head_pose_pred']:
continue
bbox = annon_dict[img_id][annon]['head_pose_pred']['box']
x1 = bbox['x1']
x2 = bbox['x2']
y1 = bbox['y1']
y2 = bbox['y2']
# add padding to face
upper_y = int(max(0, y1 - (y2 - y1) * padding_perc))
lower_y = int(min(img.shape[0], y2 + (y2 - y1) * padding_perc))
left_x = int(max(0, x1 - (x2 - x1) * padding_perc))
right_x = int(min(img.shape[1], x2 + (x2 - x1) * padding_perc))
# get head pose labels
roll = annon_dict[img_id][annon]['head_pose_pred']['roll']
pitch = annon_dict[img_id][annon]['head_pose_pred']['pitch']
yaw = annon_dict[img_id][annon]['head_pose_pred']['yaw']
image_paths.append(os.path.basename(image)[:-4])
x1_list.append(left_x)
x2_list.append(right_x)
y1_list.append(upper_y)
y2_list.append(lower_y)
roll_list.append(roll)
pitch_list.append(pitch)
yaw_list.append(yaw)
# saves data in RetinaNet format
data = {'image_path': image_paths,
'x1': x1_list, 'x2': x2_list,
'y1': y1_list, 'y2': y2_list,
'roll': roll_list, 'pitch': pitch_list,
'yaw': yaw_list}
# Create DataFrame
df = pd.DataFrame(data)
df.to_csv(os.path.join(output_dir, '%s_labels_headpose_essemble.csv' %data_type), index=False, header=True)
def CSV_AFLW2000(data_dir):
images = [image[:-4] for image in os.listdir(data_dir) if '.jpg' in image]
df = pd.DataFrame(images)
df.to_csv(os.path.join(data_dir, 'AFLW2000.txt'), header=False, index=False)
def CSV_BIKI(data_dir):
folders = [folder for folder in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, folder))]
images = []
for idx, folder in enumerate(folders):
if folder == 'faces':
continue
folder_path = os.path.join(data_dir, folder)
# Initiating detector and coco annotations
folder_images = [os.path.join(folder, frame[:-8]) for frame in os.listdir(folder_path) if 'png' in frame]
for image in folder_images:
images.append(image)
df = pd.DataFrame(images)
df.to_csv(os.path.join(data_dir, 'BIWI.txt'), header=False, index=False)
def CSV_BIKI_faces(data_dir):
folders = [folder for folder in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, folder))]
images = []
for idx, folder in enumerate(folders):
with open(os.path.join(data_dir, folder, 'face_keypoints.json'), 'r') as f:
folder_dict = json.loads(f.read())
folder_images = folder_dict[folder].keys()
for image in folder_images:
image_path = os.path.join(folder, image[:-4])
images.append(image_path)
df = | pd.DataFrame(images) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = | Series(["a", "b", "c", "f"], index=idx) | pandas.Series |
import glob
import itertools
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sb
from class_tree import EmbeddedClassTree
from dataset import EmbeddedDataset
from dataset_descriptor import DatasetDescriptor
from embedding import Embedding
from utils import get_timestamp, max_of_rows, mean_of_rows, path_to_name
def evaluate(scores, labels):
scores = scores if isinstance(scores, np.ndarray) else np.array(scores)
labels = labels if isinstance(labels, np.ndarray) else np.array(labels)
assert labels.dtype == 'int'
assert max(labels) == 1
assert min(labels) == -1
assert len(labels) == len(scores)
results = {}
pos_inds = np.where(labels == 1)[0]
neg_inds = np.where(labels == -1)[0]
# average scores for negative and positive examples
results['avg_positive_score'] = np.dot(scores[pos_inds], labels[pos_inds]) / len(pos_inds)
results['avg_negative_score'] = np.dot(scores[neg_inds], labels[neg_inds]) / len(neg_inds)
results['n_positive_samples'] = len(pos_inds)
results['n_negative_samples'] = len(neg_inds)
return results
def get_labels(dataset_path, classes):
dataset_path = dataset_path.split('.')[0] # remove file extension
labels_filename = '{0}_positive_examples.json'.format(dataset_path)
with open(labels_filename) as f:
positive_examples = json.load(f)
return np.array([1 if cl in positive_examples else -1 for cl in classes])
def func_name_str(func):
return func.__name__ if hasattr(func, '__name__') else str(func)
def run_trial(trial_kwargs, labels):
duke = DatasetDescriptor(**trial_kwargs)
scores = duke.get_dataset_class_scores()
return evaluate(scores, labels)
def run_experiment(
tree_path='ontologies/class-tree_dbpedia_2016-10.json',
embedding_path='embeddings/wiki2vec/en.model',
dataset_paths=['data/185_baseball.csv'],
model_configs=[{'row_agg_func': mean_of_rows, 'tree_agg_func': np.mean, 'source_agg_func': mean_of_rows}],
max_num_samples=1e6,
verbose=True,
):
print('\nrunning evaluation trials using {0} datasets:\n{1}'.format(len(dataset_paths), '\n'.join(dataset_paths)))
print('\nand {0} configs:\n{1}\n\n'.format(
len(model_configs),
'\n'.join(
[
', '.join(['{0}: {1}'.format(key, func_name_str(val)) for (key, val) in config.items()])
for config in model_configs
]
)
))
embedding = Embedding(embedding_path=embedding_path, verbose=verbose)
tree = EmbeddedClassTree(embedding, tree_path=tree_path, verbose=verbose)
trial_kwargs = {
'tree': tree,
'embedding': embedding,
'max_num_samples': max_num_samples,
'verbose': verbose,
}
rows = []
for dat_path in dataset_paths:
print('\nloading dataset:', dat_path)
trial_kwargs['dataset'] = EmbeddedDataset(embedding, dat_path, verbose=verbose)
labels = get_labels(dat_path, tree.classes)
for config in model_configs:
print('\nrunning trial with config:', {key: func_name_str(val) for (key, val) in config.items()})
# run trial using config
trial_kwargs.update(config)
trial_results = run_trial(trial_kwargs, labels)
# add config and dataset name to results and append results to rows list
trial_results.update({key: func_name_str(val) for (key, val) in config.items()})
trial_results.update({'dataset': path_to_name(dat_path)})
rows.append(trial_results)
# print('\n\nresults from evaluation trials:\n', rows, '\n\n')
df = pd.DataFrame(rows)
df.to_csv('trials/trial_{0}.csv'.format(get_timestamp()), index=False)
return df
def all_labeled_test():
agg_func_combs = itertools.product(
[mean_of_rows, max_of_rows], # row agg funcs
[np.mean, max], # tree agg funcs
[mean_of_rows, max_of_rows], # source agg funcs
)
# create dict list of all func combinations
model_configs = [{'row_agg_func': row, 'tree_agg_func': tree, 'source_agg_func': source} for (row, tree, source) in agg_func_combs]
## manually set config list
# model_configs = [
# {'row_agg_func': mean_of_rows, 'tree_agg_func': np.mean, 'source_agg_func': mean_of_rows},
# ]
dataset_paths = glob.glob('data/*_positive_examples.json')
dataset_paths = [path.replace('_positive_examples.json', '.csv') for path in dataset_paths]
df = run_experiment(
dataset_paths=dataset_paths,
model_configs=model_configs,
)
plot_results(df)
def config_to_legend_string(config):
return ' '.join(['{0}={1}'.format(key.split('_')[0], func_name_str(val).replace('_', ' ')) for (key, val) in config.items()])
def get_config_string_col(df):
# return ['row={0} tree={1} source={2}'.format(
return ['{0}, {1}, {2}'.format(
func_name_str(row['row_agg_func']).split('_')[0],
func_name_str(row['tree_agg_func']).split('_')[0],
func_name_str(row['source_agg_func']).split('_')[0],
) for index, row in df.iterrows()]
def plot_results(trial_results=None, n_top=5):
if trial_results is None:
files = glob.glob('trials/*.csv')
most_recent = sorted(files)[-1] # assumes timestamp file suffix
df = | pd.read_csv(most_recent) | pandas.read_csv |
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists(self):
result = isnull([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isnull(['foo', 'bar'])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isnull([u('foo'), u('bar')])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_nat(self):
result = isnull([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_numpy_nat(self):
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime(self):
assert not isnull(datetime.now())
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notnull(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = isnull(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-02'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal(notnull(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_timedelta_other_units(self):
idx = pd.TimedeltaIndex(['1 days', 'NaT', '2 days'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['timedelta64[D]', 'timedelta64[h]', 'timedelta64[m]',
'timedelta64[s]', 'timedelta64[ms]', 'timedelta64[us]',
'timedelta64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal( | isnull(values) | pandas.core.dtypes.missing.isnull |
from skbio import read
import os
import numpy as np
from typing import Dict
from collections import defaultdict
import pandas as pd
import matplotlib.pyplot as plt
from pysam import AlignmentFile, VariantFile
from tqdm import tqdm
from covid_bronx.quality import sam_files, fasta_files, variant_files
import skbio
def count_it(l):
l = map(lambda x: x.upper(), l)
counts = defaultdict(lambda : 0)
for x in l:
counts[x] += 1
return dict(counts)
df_dict = {}
samples = ['AECOM_123', 'AECOM_124', 'AECOM_125', 'AECOM_126']
for sample_id in tqdm(samples):
filename = sam_files[sample_id]
bamfile = AlignmentFile(filename)
pileup = bamfile.pileup()
df = pd.DataFrame([{"pos": p.pos, "n": p.n, "counts": count_it(p.get_query_sequences())} for p in pileup])
df.index = df['pos']
df.drop(columns=['pos'])
df_dict[sample_id] = df
vardf_all = []
for sample_id in tqdm(samples):
# Get reads to assess depth
sam_filename = sam_files[sample_id]
fasta_filename = fasta_files[sample_id]
variant_filename = variant_files[sample_id]
alignments = AlignmentFile(sam_filename).fetch()
consensus = list(skbio.read(fasta_filename, format="fasta"))[0]
coverage = np.zeros(29903)
for alignment in alignments:
coverage[alignment.positions] += 1
# Get variants
variants = VariantFile(variant_filename)
vardf_all.extend([
{
**{
key: value
for key, value
in var.info.items()
},
**{
"sample_id": sample_id,
"position": var.pos,
"quality": var.qual,
"reference": var.ref,
"alternates": var.alts,
"depth": coverage[var.pos],
},
}
for var in variants.fetch()
])
vardf_all = | pd.DataFrame(vardf_all) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 <NAME>
"""Module implementing WorkerThread."""
import logging
import os
from typing import Optional
import pandas as pd
from PyQt5.QtCore import QCoreApplication, QThread
from easyp2p.excel_writer import write_results
from easyp2p.p2p_credentials import get_credentials_from_user
from easyp2p.p2p_settings import Settings
from easyp2p.p2p_signals import Signals, PlatformFailedError
import easyp2p.platforms as p2p_platforms
_translate = QCoreApplication.translate
class WorkerThread(QThread):
"""
Worker thread to offload calls to p2p_webdriver and p2p_parser.
This class is responsible for accessing the P2P platform methods in
p2p_webdriver and to prepare the results. The main reason for separating
the calls from the main thread is to keep the GUI responsive while the
platform is being evaluated.
"""
# Signals for communicating with ProgressWindow
signals = Signals()
def __init__(self, settings: Settings) -> None:
"""
Constructor of WorkerThread.
Args:
settings: Settings for easyp2p.
"""
super().__init__()
self.logger = logging.getLogger('easyp2p.p2p_worker.WorkerThread')
self.settings = settings
self.signals.get_credentials.connect(self.get_credentials)
self.done = False
self.df_result = | pd.DataFrame() | pandas.DataFrame |
import asyncio
import json
from PoEQuery.official_api_result import OfficialApiResult
from PoEQuery.official_api import search_and_fetch_async
from PoEQuery.official_api_query import StatFilters, OfficialApiQuery
from PoEQuery.affix_finder import find_affixes
from tqdm import tqdm
def estimate_price_in_chaos(price):
if price.currency == "alch":
return price.amount * 0.2
elif price.currency == "chaos":
return price.amount * 1
elif price.currency == "exalted":
return price.amount * 100
elif price.currency == "mirror":
return price.amount * 100 * 420
else:
# print(price)
return None
from pandas import DataFrame
item_classes = dict(
# BOW = "weapon.bow",
# CLAW = "weapon.claw",
# BASE_DAGGER = "weapon.basedagger",
# RUNE_DAGGER = "weapon.runedagger",
# ONE_HANDED_AXE = "weapon.oneaxe",
# ONE_HANDED_MACE = "weapon.onemace",
# ONE_HANDED_SWORD = "weapon.onesword",
# SCEPTRE = "weapon.sceptre",
# BASE_STAFF = "weapon.basestaff",
# WARSTAFF = "weapon.warstaff",
# TWO_HANDED_AXE = "weapon.twoaxe",
# TWO_HANDED_MACE = "weapon.twomace",
# TWO_HANDED_SWORD = "weapon.twosword",
# WAND = "weapon.wand",
# BODY_ARMOUR = "armour.chest",
# BOOTS = "armour.boots",
# GLOVES = "armour.gloves",
# HELMET = "armour.helmet",
# SHIELD = "armour.shield",
# QUIVER = "armour.quiver",
AMULET="accessory.amulet",
# BELT = "accessory.belt",
RING="accessory.ring",
BASE_JEWEL="jewel.base",
ABYSS_JEWEL="jewel.abyss",
CLUSTER_JEWEL="jewel.cluster",
)
for item_class_key, item_class_value in item_classes.items():
print(item_class_key)
queries = []
results_df = | DataFrame() | pandas.DataFrame |
import os
import glob
import psycopg2
import pandas as pd
from sql_queries import *
#load data from a song_data to song, artist tbls
def process_song_file(cur, conn, filepath):
all_files=[]
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root, '*.json'))
for f in files:
all_files.append(os.path.abspath(f))
print('{} files are contained in "{}"'.format(len(all_files), filepath))
for i in range(len(all_files)):
df=pd.read_json(all_files[i], lines=True)
song_data = list(df[['song_id', 'title', 'artist_id', 'year', 'duration']].values[0])
artist_data = list(df[['artist_id', 'artist_name', 'artist_location','artist_latitude', 'artist_longitude']].values[0])
cur.execute(song_table_insert, song_data)
cur.execute(artist_table_insert, artist_data)
print('{}/{} files processed..'.format(i+1, len(all_files)))
conn.commit()
#load data from a log_data to the time, user and songplay tbls
def process_log_file(cur, conn, filepath):
all_files=[]
for root, dirs, files in os.walk('data/log_data'):
files = glob.glob(os.path.join(root, '*.json'))
for f in files:
all_files.append(os.path.abspath(f))
print('{} files are contained in "{}"'.format(len(all_files), filepath))
for j in range(len(all_files)):
df=pd.read_json(all_files[j], lines=True)
#filter by page= NextSong
df=df[df['page']=='NextSong']
#convert timestamp column to datetime
t= | pd.to_datetime(df['ts']) | pandas.to_datetime |
from datetime import datetime, timedelta
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.compat import lrange, range, zip
import pandas as pd
from pandas import DataFrame, Series, Timestamp
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, PeriodIndex, period_range
from pandas.core.resample import _get_period_range_edges
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
import pandas.tseries.offsets as offsets
@pytest.fixture()
def _index_factory():
return period_range
@pytest.fixture
def _series_name():
return 'pi'
class TestPeriodIndex(object):
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_asfreq(self, series_and_frame, freq, kind):
# GH 12884, 15944
# make sure .asfreq() returns PeriodIndex (except kind='timestamp')
obj = series_and_frame
if kind == 'timestamp':
expected = obj.to_timestamp().resample(freq).asfreq()
else:
start = obj.index[0].to_timestamp(how='start')
end = (obj.index[-1] + obj.index.freq).to_timestamp(how='start')
new_index = date_range(start=start, end=end, freq=freq,
closed='left')
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq, kind=kind).asfreq()
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self, series):
# test for fill value during resampling, issue 3715
s = series
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W'])
@pytest.mark.parametrize('kind', [None, 'period', 'timestamp'])
@pytest.mark.parametrize('kwargs', [dict(on='date'), dict(level='d')])
def test_selection(self, index, freq, kind, kwargs):
# This is a bug, these should be implemented
# GH 14008
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
msg = ("Resampling from level= or on= selection with a PeriodIndex is"
r" not currently supported, use \.set_index\(\.\.\.\) to"
" explicitly set index")
with pytest.raises(NotImplementedError, match=msg):
df.resample(freq, kind=kind, **kwargs)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('meth', ['ffill', 'bfill'])
@pytest.mark.parametrize('conv', ['start', 'end'])
@pytest.mark.parametrize('targ', ['D', 'B', 'M'])
def test_annual_upsample_cases(self, targ, conv, meth, month,
simple_period_range_series):
ts = simple_period_range_series(
'1/1/1990', '12/31/1991', freq='A-%s' % month)
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec').mean(), result)
assert_series_equal(ts.resample('a').mean(), result)
@pytest.mark.parametrize('rule,expected_error_msg', [
('a-dec', '<YearEnd: month=12>'),
('q-mar', '<QuarterEnd: startingMonth=3>'),
('M', '<MonthEnd>'),
('w-thu', '<Week: weekday=3>')
])
def test_not_subperiod(
self, simple_period_range_series, rule, expected_error_msg):
# These are incompatible period rules for resampling
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='w-wed')
msg = ("Frequency <Week: weekday=2> cannot be resampled to {}, as they"
" are not sub or super periods").format(expected_error_msg)
with pytest.raises(IncompatibleFrequency, match=msg):
ts.resample(rule).mean()
@pytest.mark.parametrize('freq', ['D', '2D'])
def test_basic_upsample(self, freq, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
resampled = result.resample(freq, convention='end').ffill()
expected = result.to_timestamp(freq, how='end')
expected = expected.asfreq(freq, 'ffill').to_period(freq)
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', convention='end').ffill(limit=2)
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D').ffill()
exp = df['a'].resample('D').ffill()
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M').ffill()
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('target', ['D', 'B', 'M'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_quarterly_upsample(self, month, target, convention,
simple_period_range_series):
freq = 'Q-{month}'.format(month=month)
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq=freq)
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
@pytest.mark.parametrize('target', ['D', 'B'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_monthly_upsample(self, target, convention,
simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='M')
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_basic(self):
# GH3609
s = Series(range(100), index=date_range(
'20130101', freq='s', periods=100, name='idx'), dtype='float')
s[10:30] = np.nan
index = PeriodIndex([
Period('2013-01-01 00:00', 'T'),
Period('2013-01-01 00:01', 'T')], name='idx')
expected = Series([34.5, 79.5], index=index)
result = s.to_period().resample('T', kind='period').mean()
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period').mean()
assert_series_equal(result2, expected)
@pytest.mark.parametrize('freq,expected_vals', [('M', [31, 29, 31, 9]),
('2M', [31 + 29, 31 + 9])])
def test_resample_count(self, freq, expected_vals):
# GH12774
series = Series(1, index=pd.period_range(start='2000', periods=100))
result = series.resample(freq).count()
expected_index = pd.period_range(start='2000', freq=freq,
periods=len(expected_vals))
expected = Series(expected_vals, index=expected_index)
assert_series_equal(result, expected)
def test_resample_same_freq(self, resample_method):
# GH12770
series = Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M'))
expected = series
result = getattr(series.resample('M'), resample_method)()
assert_series_equal(result, expected)
def test_resample_incompat_freq(self):
msg = ("Frequency <MonthEnd> cannot be resampled to <Week: weekday=6>,"
" as they are not sub or super periods")
with pytest.raises(IncompatibleFrequency, match=msg):
Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M')).resample('W').mean()
def test_with_local_timezone_pytz(self):
# see gh-5430
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D') -
offsets.Day())
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_resample_with_pytz(self):
# GH 13238
s = Series(2, index=pd.date_range('2017-01-01', periods=48, freq="H",
tz="US/Eastern"))
result = s.resample("D").mean()
expected = Series(2, index=pd.DatetimeIndex(['2017-01-01',
'2017-01-02'],
tz="US/Eastern"))
assert_series_equal(result, expected)
# Especially assert that the timezone is LMT for pytz
assert result.index.tz == pytz.timezone('US/Eastern')
def test_with_local_timezone_dateutil(self):
# see gh-5430
local_timezone = 'dateutil/America/Los_Angeles'
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
index = pd.date_range(start, end, freq='H', name='idx')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D',
name='idx') - offsets.Day())
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_resample_nonexistent_time_bin_edge(self):
# GH 19375
index = date_range('2017-03-12', '2017-03-12 1:45:00', freq='15T')
s = Series(np.zeros(len(index)), index=index)
expected = s.tz_localize('US/Pacific')
result = expected.resample('900S').mean()
tm.assert_series_equal(result, expected)
# GH 23742
index = date_range(start='2017-10-10', end='2017-10-20', freq='1H')
index = index.tz_localize('UTC').tz_convert('America/Sao_Paulo')
df = DataFrame(data=list(range(len(index))), index=index)
result = df.groupby(pd.Grouper(freq='1D')).count()
expected = date_range(start='2017-10-09', end='2017-10-20', freq='D',
tz="America/Sao_Paulo",
nonexistent='shift_forward', closed='left')
tm.assert_index_equal(result.index, expected)
def test_resample_ambiguous_time_bin_edge(self):
# GH 10117
idx = pd.date_range("2014-10-25 22:00:00", "2014-10-26 00:30:00",
freq="30T", tz="Europe/London")
expected = Series(np.zeros(len(idx)), index=idx)
result = expected.resample('30T').mean()
tm.assert_series_equal(result, expected)
def test_fill_method_and_how_upsample(self):
# GH2073
s = Series(np.arange(9, dtype='int64'),
index=date_range('2010-01-01', periods=9, freq='Q'))
last = s.resample('M').ffill()
both = s.resample('M').ffill().resample('M').last().astype('int64')
assert_series_equal(last, both)
@pytest.mark.parametrize('day', DAYS)
@pytest.mark.parametrize('target', ['D', 'B'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_weekly_upsample(self, day, target, convention,
simple_period_range_series):
freq = 'W-{day}'.format(day=day)
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq=freq)
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp').mean()
expected = ts.to_timestamp(how='start').resample('A-DEC').mean()
assert_series_equal(result, expected)
def test_resample_to_quarterly(self, simple_period_range_series):
for month in MONTHS:
ts = simple_period_range_series(
'1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month).ffill()
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = simple_period_range_series('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how).ffill()
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR').ffill()
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = Series(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A').ffill()
expected = stamps.resample('A').ffill().to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = Series(np.random.randn(5), index=rng)
msg = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=msg):
s.resample('A').ffill()
@pytest.mark.parametrize('freq', ['5min'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_resample_5minute(self, freq, kind):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
expected = ts.to_timestamp().resample(freq).mean()
if kind != 'timestamp':
expected = expected.to_period(freq)
result = ts.resample(freq, kind=kind).mean()
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self, simple_period_range_series):
ts = simple_period_range_series('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D').asfreq()
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = simple_period_range_series('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s').asfreq()
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min').apply(len)
expected = s.resample('10min').apply(len).loc[result.index]
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
from pyqmc.accumulators import SqAccumulator
from pyqmc.coord import PeriodicConfigs
import numpy as np
import pandas as pd
def test_config():
a = 1
Lvecs = np.eye(3) * a
configs = np.array(
[
[-0.1592848, -0.15798219, 0.04790482],
[0.03967904, 0.50691437, 0.40398405],
[0.5295316, -0.11789016, 0.58326953],
[0.49470142, 0.39850735, 0.02882759],
]
).reshape(1, 4, 3)
df = run(Lvecs, configs, 3)
sqref = np.array(
[
4.0,
0.08956614244510086,
1.8925934706558083,
0.1953404868933881,
0.05121727442047123,
1.5398266853045084,
1.4329204824617385,
0.7457498873351416,
1.0713898023987862,
0.2976758438030117,
0.08202120690018336,
0.3755969602702992,
0.933685594722744,
2.650270169642618,
0.26674875141672655,
0.7371957610619541,
0.777701221323419,
0.9084042551734659,
2.170944896653447,
0.38328335391002477,
3.5406891971547862,
1.1884884008703132,
0.6203428839246292,
0.7075185940748288,
0.25780137400339037,
1.317648046579579,
0.8699973207672075,
]
)
diff = np.linalg.norm(df["Sq"] - sqref)
assert diff < 1e-14, diff
def test_big_cell():
import time
a = 1
ncell = (2, 2, 2)
Lvecs = np.diag(ncell) * a
unit_cell = np.zeros((4, 3))
unit_cell[1:] = (np.ones((3, 3)) - np.eye(3)) * a / 2
grid = np.meshgrid(*map(np.arange, ncell), indexing="ij")
shifts = np.stack(list(map(np.ravel, grid)), axis=1)
supercell = (shifts[:, np.newaxis] + unit_cell[np.newaxis]).reshape(1, -1, 3)
configs = supercell.repeat(1000, axis=0)
configs += np.random.randn(*configs.shape) * 0.1
df = run(Lvecs, configs, 8)
df = df.groupby("qmag").mean().reset_index()
large_q = df[-35:-10]["Sq"]
mean = np.mean(large_q - 1)
rms = np.sqrt(np.mean((large_q - 1) ** 2))
assert np.abs(mean) < 0.01, mean
assert rms < 0.1, rms
def run(Lvecs, configs, nq):
sqacc = SqAccumulator(Lvecs=Lvecs, nq=nq)
configs = PeriodicConfigs(configs, Lvecs)
sqavg = sqacc.avg(configs, None)
df = {"qmag": np.linalg.norm(sqacc.qlist, axis=1)}
df.update(sqavg)
return | pd.DataFrame(df) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from io import StringIO
import pandas as pd
import numpy as np
import operator
import math
import os
from .config import max_filesize
from .FlajoletMartin import FMEstimator
"""
dfsummarizer.funcs: Core functions of the dfsummarizer package.
analyse_df( pandas_dataframe): return a sumamry dataframe of the input dataframe
analyse_df_in_chunks(path_to_dataset): Read the dataset in chunks and provide a summary
"""
########################################################################################
def analyse_file(path_to_file):
df = load_complete_dataframe(path_to_file)
summary = analyse_df(df)
return summary
########################################################################################
def analyse_df(df):
"""
Given a pandas dataframe that is already in memory we generate a table of summary
statistics and descriptors.
"""
colnames = df.columns
records = len(df)
df = coerce_dates(df)
rez = pd.DataFrame(columns=('Name', 'Type', 'Unique Vals', 'Unique', 'Nulls', 'Min', 'Mean', 'Max'))
for name in colnames:
nacount = len(df[df[name].isna()])
napercent = round(100*nacount/records,1)
uniques = df[name].unique().tolist()
if np.nan in uniques :
uniques.remove(np.nan)
unicount = len(uniques)
unipercent = round(100*unicount/records,1)
mode = df[name].mode(dropna=False)[0]
#if (type(mode) == float) & np.isnan(mode):
# mode = "NaN"
if mode != mode:
mode = "NaN"
#valtype = infer_type(str(type(df.loc[1,name])), unicount, uniques)
valtype = infer_type_2( df.loc[:,name], 0, unicount, uniques)
if (valtype == "Char") :
lenvec = df[name].apply(lambda x: len_or_null(x))
themin = round(lenvec.min(),3) # "-"
themean = round(lenvec.mean(),3) #"-"
themax = round(lenvec.max(),3) #"-"
elif (valtype == "Bool") :
newvec = df[name].apply(lambda x: booleanize(x))
themin = round(newvec.min(),3)
themean = round(newvec.mean(),3)
themax = round(newvec.max(),3)
else:
if (valtype != "Date") :
themin = round(df[name].min(),3)
themean = round(df[name].mean(),3)
themax = round(df[name].max(),3)
else :
themin = str(df[name].min())[0:10]
themean = str(df[name].mean())[0:10] #"-"
themax = str(df[name].max())[0:10]
values_to_add = {
'Name':name,
'Type':valtype,
'Mode':mode,
'Unique Vals':unicount,
'Unique':unipercent,
'Nulls':napercent,
'Min':themin,
'Mean': themean,
'Max':themax
}
rez = rez.append(values_to_add, ignore_index=True)
return rez
########################################################################################
def analyse_file_in_chunks(path_to_file):
"""
Given a path to a large dataset we will iteratively load it in chunks and build
out the statistics necessary to summarise the whole dataset.
"""
fsize = os.stat(path_to_file).st_size
sample_prop = max_filesize / fsize
line_count = count_lines(path_to_file)
chunks = round(line_count * sample_prop)
temp = {}
data_iterator = pd.read_csv(path_to_file, chunksize=chunks, low_memory=False)
total_chunks = 0
for index, chunk in enumerate(data_iterator, start=0):
startpoint = 0 + (index*chunks)
total_chunks = index + 1
temp = update_temp_summary(temp, chunk, startpoint)
summary = generate_final_summary(temp, total_chunks)
return summary
########################################################################################
def generate_final_summary(temp, total_chunks):
rez = pd.DataFrame(columns=('Name', 'Mode', 'Type', 'Unique Vals', 'Unique', 'Nulls', 'Min', 'Mean', 'Max'))
for name in temp.keys():
col = temp[name]
total = col['nulls'] + col['nonnulls']
mode = max(col['val_counts'].items(), key=operator.itemgetter(1))[0]
unicount = col['uniques'].estimate()
if unicount > total:
uniprop = 1.0
unicount = total
else:
uniprop = unicount / total
unipercent = round(100 * uniprop, 1)
napercent = round((100 * col['nulls']) / total, 1)
if (col['type'] != "Date") :
themean = col['sum'] / total
else:
themean = col['mean']
values_to_add = {
'Name':name,
'Mode':mode,
'Type': col['type'],
'Unique Vals':unicount,
'Unique':unipercent,
'Nulls':napercent,
'Min': col['min'],
'Mean': themean,
'Max': col['max']
}
rez = rez.append(values_to_add, ignore_index=True)
return rez
########################################################################################
def clean_dict(df, col):
temp = df[col].value_counts(dropna=False)
indeces = temp.index
newie = []
for i in indeces:
if np.isnan(i):
newie.append("NaN")
else:
newie.append(i)
temp.index = newie
return temp.to_dict()
def combine_dicts(a, b, op=operator.add):
return {**a, **b, **{k: op(a[k], b[k]) for k in a.keys() & b}}
########################################################################################
def update_temp_summary(temp, df, startpoint):
colnames = df.columns
records = len(df)
df = coerce_dates(df)
for name in colnames:
if name in temp:
rez = temp[name]
else:
rez = { "type":[], "val_counts":{}, "sum":0, "mean":np.nan,
"min":np.nan, "max":np.nan,
"uniques":FMEstimator(), "nulls":0,
"nonnulls":0
}
nacount = len(df[df[name].isna()])
nonnulls = len(df) - nacount
val_counts = clean_dict(df, name)
uniques = df[name].unique().tolist()
if np.nan in uniques :
uniques.remove(np.nan)
unicount = len(uniques)
uniprop = unicount / len(df)
valtype = infer_type_2( df.loc[:,name], startpoint, unicount, uniques)
if (valtype == "Char") :
lenvec = df[name].apply(lambda x: len_or_null(x))
themin = round(lenvec.min(),3) # "-"
thesum = round(lenvec.sum(),3) #"-"
themax = round(lenvec.max(),3) #"-"
elif (valtype == "Bool") :
newvec = df[name].apply(lambda x: booleanize(x))
themin = round(newvec.min(),3)
thesum = round(newvec.sum(),3)
themax = round(newvec.max(),3)
else:
if (valtype != "Date") :
themin = round(df[name].min(),3)
thesum = round(df[name].sum(),3)
themax = round(df[name].max(),3)
else :
themin = str(df[name].min())[0:10]
themean = df[name].mean()
themax = str(df[name].max())[0:10]
rez['type'] = valtype
if (valtype != "Date") :
rez['sum'] = rez['sum'] + thesum
else:
if isNaN(rez['mean']):
rez['mean'] = themean
# else:
# rez['mean'] = rez['mean'] + (rez['mean'] - themean)/2
# ABOVE IS OFF FOR THE MOMENT - i.e Keep the first mean
rez['nulls'] = rez['nulls'] + nacount
if isNaN( rez['min'] ) or themin < rez['min']:
rez['min'] = themin
if isNaN( rez['max'] ) or themax > rez['max']:
rez['max'] = themax
rez['uniques'].update_all(uniques)
#rez['uniques'] += uniprop
rez['val_counts'] = combine_dicts(rez['val_counts'], val_counts)
rez['nonnulls'] = rez['nonnulls'] + nonnulls
temp[name] = rez
return temp
########################################################################################
def extract_file_extension(path_to_file):
return os.path.splitext(path_to_file)[1]
########################################################################################
def load_complete_dataframe(path_to_file):
"""
We load the entire dataset into memory, using the file extension to determine
the expected format. We are using encoding='latin1' because it ppears to
permit loading of the largest variety of files.
Representation of strings may not be perfect, but is not important for generating a
summarization of the entire dataset.
"""
extension = extract_file_extension(path_to_file).lower()
if extension == ".csv":
df = pd.read_csv(path_to_file, encoding='latin1', low_memory=False)
return df
if extension == ".tsv":
df = | pd.read_csv(path_to_file, encoding='latin1', sep='\t', low_memory=False) | pandas.read_csv |
import pandas as pd
from plotly import graph_objects as go
from plotly.subplots import make_subplots
import os
import plotly
import re
benchmarks = ['Celecoxib rediscovery', 'Troglitazone rediscovery', 'Thiothixene rediscovery',
'Aripiprazole similarity', 'Albuterol similarity', 'Mestranol similarity', 'C11H24',
'C9H10N2O2PF2Cl', 'Median molecules 1', 'Median molecules 2', 'Osimertinib MPO',
'Fexofenadine MPO', 'Ranolazine MPO', 'Perindopril MPO', 'Amlodipine MPO', 'Sitagliptin MPO',
'Zaleplon MPO', 'Valsartan SMARTS', 'Scaffold Hop', 'Deco Hop']
benchmarks_cap = ['Celecoxib Rediscovery', 'Troglitazone Rediscovery', 'Thiothixene Rediscovery',
'Aripiprazole Similarity', 'Albuterol Similarity', 'Mestranol Similarity', 'C11H24',
'C9H10N2O2PF2Cl', 'Median Molecules 1', 'Median Molecules 2', 'Osimertinib MPO',
'Fexofenadine MPO', 'Ranolazine MPO', 'Perindopril MPO', 'Amlodipine MPO', 'Sitagliptin MPO',
'Zaleplon MPO', 'Valsartan SMARTS', 'Scaffold Hop', 'Deco Hop']
def load_and_parse_benchmark_scores(location, generations=False):
df = pd.read_csv(location)
if not generations:
df = df[['index', 'score']]
else:
df = df[['index', 'generations']]
df = df.set_index('index')
return df
def gather_all_benchmark_results(location='all_results', generations=False):
df = None
for folder in os.listdir(location):
if folder=='plots' or 'counterscreen' in folder or 'reported' in folder:
continue
current_df = load_and_parse_benchmark_scores(os.path.join(location, folder, 'results.csv'),
generations=generations)
current_df.columns = [folder]
if isinstance(df, pd.DataFrame):
df = df.join(current_df)
else:
df = current_df
return df
def gather_all_benchmark_generations(location='all_results'):
df = None
for folder in os.listdir(location):
if folder=='plots':
continue
current_df = load_and_parse_benchmark_scores(os.path.join(location, folder, 'results.csv'), generations=True)
current_df.columns = [folder]
if isinstance(df, pd.DataFrame):
df = df.join(current_df)
else:
df = current_df
return df
def graph_compare_benchmarks(df, title, out_loc, space=0.15, height=900, width=1600):
layout = go.Layout(
plot_bgcolor='rgba(0,0,0,0)'
)
markers = ['diamond', 'x', 'circle', 'triangle-up', 'cross', 'square']
fig = go.Figure(layout=layout)
colour_set = ['#8aff00', '#005bed', '#ff0000', '#ff00ff', '#ff7f00']
x_labels = benchmarks_cap
deltas = [(-(len(df.columns)-1)*space + 2*space*k) for k in range(len(df.columns))]
i = 0
df.to_csv(out_loc + '.csv')
for idx, column in enumerate(df.columns):
fig.add_trace(
go.Scatter(
name=column,
mode='markers',
legendgroup=column,
#x0=deltas[idx]+1,
x0=1,
dx=1,
y=df[column],
marker=dict(
symbol='line-ew',
#color=plotly.colors.qualitative.Set1[idx],
size=40,
opacity=0.5,
line=dict(
color=colour_set[idx],
width=5
),
),
showlegend=True,
)
)
fig.add_trace(
go.Scatter(
name=column,
mode='markers',
legendgroup=column,
x0=deltas[idx]+1,
#x0=1,
dx=1,
y=df[column],
marker=dict(
symbol=markers[idx],
color='#000000',
size=7,
opacity=1,
),
),
)
i += 1
# grey rectangular sections for each benchmark
for j in range(1, len(x_labels)+1):
fig.add_shape(
# filled Rectangle
type="rect",
x0=j-0.45,
y0=-0.05,
x1=j+0.45,
y1=1.05,
line=dict(
width=0,
),
fillcolor='rgba(0.1,.1,.10,.10)',
)
# black outline for each category
for k, l in [(1, 3), (4, 6), (7, 8), (9, 10), (11, 17), (18, 20)]:
fig.add_shape(
type="rect",
x0=k-0.5,
y0=-0.05,
x1=l+0.5,
y1=1.05,
line=dict(
width=5,
color='Black'
),
)
fig.update_layout(
yaxis_title='Score',
xaxis_title='Benchmark',
title=title,
height=height,
width=width
)
fig.update_xaxes(showgrid=False, ticks='outside', ticktext=x_labels, tickmode='array', tickvals=list(range(1, 21)),
tickangle=45, range=[0.4, 20.6])
fig.update_yaxes(ticks='outside', range=[-0.05, 1.05])
fig.write_image(out_loc+'.png')
fig.write_image(out_loc+'.svg')
def graph_compare_benchmark_generations(df, title, out_loc, space=0.15, height=900, width=1600):
layout = go.Layout(
plot_bgcolor='rgba(0,0,0,0)'
)
y_max = max(df.to_numpy().flatten())
colour_set = ['#8aff00', '#005bed', '#ff0000', '#ff00ff', '#ff7f00']
#markers = ['diamond', 'x', 'circle', 'triangle-up', 'cross', 'square']
fig = go.Figure(layout=layout)
x_labels = benchmarks_cap
#deltas = [(-(len(df.columns)-1)*space + 2*space*k) for k in range(len(df.columns))]
i = 0
for idx, column in enumerate(df.columns):
fig.add_trace(
go.Scatter(
name=column,
mode='markers',
legendgroup=column,
#x0=deltas[idx]+1,
x0=1,
dx=1,
y=df[column],
marker=dict(
symbol='line-ew',
size=40,
opacity=0.5,
line=dict(
color=colour_set[idx],
width=5
),
),
showlegend=True,
)
)
i += 1
# grey rectangular sections for each benchmark
for j in range(1, len(x_labels)+1):
fig.add_shape(
# filled Rectangle
type="rect",
x0=j-0.45,
y0=0,
x1=j+0.45,
y1=y_max+(y_max*0.05),
line=dict(
width=0,
),
fillcolor='rgba(0.1,.1,.10,.10)',
)
# black outline for each category
for k, l in [(1, 3), (4, 6), (7, 8), (9, 10), (11, 17), (18, 20)]:
fig.add_shape(
type="rect",
x0=k-0.5,
y0=0,
x1=l+0.5,
y1=y_max+(y_max*0.05),
line=dict(
width=5,
color='Black'
),
)
fig.update_layout(
yaxis_title='Score',
xaxis_title='Benchmark',
title=title,
height=height,
width=width
)
fig.update_xaxes(showgrid=False, ticks='outside', ticktext=x_labels, tickmode='array', tickvals=list(range(1, 21)),
tickangle=45, range=[0.4, 20.6], showticklabels=True)
fig.update_yaxes(ticks='outside', range=[0, y_max+(y_max*0.05)], showticklabels=True)
fig.write_image(out_loc+'.png')
fig.write_image(out_loc+'.svg')
def load_and_parse_generational_scores(location):
index = pd.MultiIndex.from_product(
[pd.Series(benchmarks, name='benchmark'),
pd.Series(['best', 'worst', 'mean', 'std', 'population_mean'], name='kind'),
pd.Series([os.path.split(location)[-1]], name='name')])
df = pd.DataFrame(index=index, columns=range(1, 1001))
df = df.fillna(0)
for kind in ['best', 'worst', 'mean', 'std', 'population_mean']:
try:
with open(os.path.join(location, f'{kind}_scores_by_generation.txt'), 'r') as f:
lines = f.readlines()
if not len(lines) == 20:
print(os.path.join(location, f'{kind}_scores_by_generation.txt'), 'does not have 20 lines')
continue
for (bm, line) in zip(benchmarks, lines):
vals = line.strip().split(', ')
df.loc[(bm, kind, os.path.split(location)[-1]), 1:len(vals)] = vals if len(vals) > 1 else vals[0]
except FileNotFoundError:
print(os.path.join(location, f'{kind}_scores_by_generation.txt'), 'does not exist')
df = df.reindex(pd.MultiIndex.from_tuples(df.index)).reset_index().set_index('level_0')
return df.rename({'level_1': 'kind', 'level_2': 'experiment'}, axis=1)
def gather_all_generational_results(location='all_results/graph_GA_filtering'):
df = pd.DataFrame(columns=range(1, 1001))
for folder in os.listdir(location):
if 'best_scores_by_generation.txt' in os.listdir(os.path.join(location, folder)):
print(folder)
current_df = load_and_parse_generational_scores(os.path.join(location, folder))
df = | pd.concat([df, current_df], axis=0) | pandas.concat |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
( | pd.date_range('2014-01-01', '2014-01-05', tz='US/Central') | pandas.date_range |
import math
import numpy as np
import pandas as pd
from scipy import sparse
from tqdm import tqdm
def convert_List_to_Dict(adjList):
"""
Convert adjacency list in the form:
[(source, target, time), (source, target time), ...]
to an adjacency dictionary, with timestamps as keys:
{ t: (source, target), ... }
This will improve speed when looking for interactions at a
particular timestamp.
"""
adjDict = {pd.to_datetime(i[2]): [] for i in adjList}
for i in tqdm(adjList):
t = pd.to_datetime(i[2])
adjDict[t].append((i[0], i[1]))
return adjDict
def getdirAdjNow(adjDict, t, n):
"""
Input an unweighted adjacency dictionary in the form:
{ t: (source, target), ... }
and obtain the directed adjacency matrix for time t.
Specify the number of nodes as input.
"""
t = pd.to_datetime(t)
A_t = sparse.csr_matrix((n, n), dtype=np.int8)
if t in adjDict.keys():
for i in adjDict[t]:
row = np.array([i[0]])
col = np.array([i[1]])
data = np.array([1])
A = sparse.csr_matrix((data, (row, col)), shape=(n, n), dtype=np.int8)
A_t += A
else:
pass
return A_t
def getDecayAdjBatch(adjDict, t1, t2, B_1, nodes, alpha):
"""
Input an unweighted adjacency dictionary in the form:
{ t: (source, target), ... }
along with the first time point, t1, and the second time point, t2.
The initial decay matrix B_1 (at t1) is needed as input.
Specify the list of nodes.
Obtain the decay adjacency matrix for this time window.
"""
relKeys = [k for k in adjDict.keys() if (k >= t1 and k < t2)]
decayDict = {
k: math.exp(-alpha * (pd.to_datetime(t2) - pd.to_datetime(k)).total_seconds())
for k in relKeys
}
# A_t = sparse.csr_matrix((n,n),dtype=np.int8)
# sources = [adjDict[k][0] for k in relKeys]
# targets = [adjDict[k][1] for k in relKeys]
B = B_1.multiply(
math.exp(-alpha * ( | pd.to_datetime(t2) | pandas.to_datetime |
# Modifications and additions to code written by brooksandrew
import osmnx as ox
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
import gpxpy
from collections import Counter
def circuit_path_string_to_int(circuit_rpp):
"""
Converts nodes in path lists from strings to integers
Args:
circuit_rpp (list): rpp circuit generated by postman_problems.solver.rpp
Returns:
circuit_rpp (list): modified circuit
"""
for e in circuit_rpp:
if type(e[3]['path']) == str:
exec('e[3]["path"]=' + e[3]["path"])
return(circuit_rpp)
def create_req_and_opt_graph(req_comp_g_contracted, complete_g, circuit_rpp, GranularConnector_EdgeList):
"""
Creates a graph with required and optional edges delineated for visualization
Args:
req_comp_g_contracted (NetworkX MultiDiGraph): required component graph containing contracted edges
generated by initialize_rpp.InnerAndOuterToEdgeListFile
complete_g (NetworkX MultiDiGraph): complete street network graph
generated by initialize_rpp.InnerAndOuterToEdgeListFile
circuit_rpp (list): rpp circuit generated by postman_problems.solver.rpp and
edited by circuit_path_string_to_int
Returns:
final_graph (NetworkX DiGraph): graph of route with optional and required edges delineated
print statements with required and optional edge breakdown
"""
final_graph = req_comp_g_contracted.copy()
unexpanded_edges = 0
unexpanded_edges_list = []
granular_connector_edges = 0
granular_connector_edges_list = []
granular_req_edges = 0
granular_req_edges_list = []
optional_edges = 0
optional_edges_list = []
for e in circuit_rpp:
if [e[0], e[1]] not in unexpanded_edges_list:
unexpanded_edges+=1
unexpanded_edges_list+=[[e[0], e[1]]]
# add granular optional edges to final_graph
path = e[3]['path']
for pair in list(zip(path[:-1], path[1:])):
if (req_comp_g_contracted.has_edge(pair[0], pair[1])):
edge = req_comp_g_contracted[pair[0]][pair[1]][0]
if [pair[0], pair[1]] in GranularConnector_EdgeList:
final_graph[pair[0]][pair[1]][0]['granular_type'] = 'req street and connector'
if [pair[0], pair[1]] not in granular_connector_edges_list:
granular_connector_edges+=1
granular_connector_edges_list+=[[pair[0], pair[1]]]
elif 1 in req_comp_g_contracted[pair[0]][pair[1]]:
granular_connector_edges+=1
else:
if [pair[0], pair[1]] not in granular_req_edges_list:
final_graph[pair[0]][pair[1]][0]['granular_type'] = 'req street'
granular_req_edges+=1
granular_req_edges_list+=[[pair[0], pair[1]]]
else:
if [pair[0], pair[1]] in GranularConnector_EdgeList:
if [pair[0], pair[1]] not in granular_connector_edges_list:
final_graph.add_edge(pair[0], pair[1], granular=True, granular_type='connector')
granular_connector_edges+=1
granular_connector_edges_list+=[[pair[0], pair[1]]]
elif [pair[0], pair[1]] not in optional_edges_list:
final_graph.add_edge(pair[0], pair[1], granular=True, granular_type='optional')
optional_edges+=1
optional_edges_list+=[[pair[0], pair[1]]]
else:
print(pair)
for n in path:
final_graph.add_node(n, y=complete_g.nodes[n]['y'], x=complete_g.nodes[n]['x'])
print('Edges in Circuit')
print('\tTotal Unexpanded Edges: {}'.format(unexpanded_edges))
print('\tTotal Edges (All Contracted Edges Granularized): {}'.format(granular_connector_edges+granular_req_edges+optional_edges))
print('\t\tGranular Connector Edges: {}'.format(granular_connector_edges))
print('\t\tGranular Required Edges: {}'.format(granular_req_edges))
print('\t\tGranular Optional Edges: {}'.format(optional_edges))
return final_graph
def create_number_of_passes_graph(circuit_rpp, complete_g):
"""
labels each edge with number of passes for visualization
Args:
circuit_rpp (list): rpp circuit generated by postman_problems.solver.rpp and
edited by circuit_path_string_to_int
complete_g (NetworkX MultiDiGraph): complete street network graph
generated by initialize_rpp.InnerAndOuterToEdgeListFile
Returns:
grppviz(NetworkX Graph): route graph with number of pass attribute added to each edge
"""
color_seq = [None, 'black', 'magenta', 'orange', 'yellow']
grppviz = nx.Graph()
for e in circuit_rpp:
for n1, n2 in zip(e[3]['path'][:-1], e[3]['path'][1:]):
if grppviz.has_edge(n1, n2):
grppviz[n1][n2]['cnt'] += 1
if grppviz[n1][n2]['cnt'] < 5:
grppviz[n1][n2]['linewidth'] += 2
else:
grppviz.add_edge(n1, n2, linewidth=2.5)
grppviz[n1][n2]['cnt'] = 1
grppviz.add_node(n1, y=complete_g.nodes[n1]['y'], x=complete_g.nodes[n1]['x'])
grppviz.add_node(n2, y=complete_g.nodes[n2]['y'], x=complete_g.nodes[n2]['x'])
for e in grppviz.edges(data=True):
if e[2]['cnt'] < 5:
e[2]['color_cnt'] = color_seq[e[2]['cnt']]
else:
e[2]['color_cnt'] = color_seq[4]
return grppviz
def circuit_parser(circuit_rpp, complete_g, print_file, print_directory):
"""
Creates dataframe of lat/lon coordinates corresponding to each node in the route
Prints the dataframe to a csv file
Args:
circuit_rpp (list): rpp circuit generated by postman_problems.solver.rpp and
edited by circuit_path_string_to_int
complete_g (NetworkX MultiDiGraph): complete street network graph
generated by initialize_rpp.InnerAndOuterToEdgeListFile
lat/lon data contained her
print_file (str): filename to print to
print_directory (str): directory name for print_file
Returns:
rppdf (Pandas DataFrame): table of lat/lon coordinates of nodes in order they appear in route
prints table to csv file
"""
rpplist = []
for ee in circuit_rpp:
path = ee[3]['path'].copy()
if ee == circuit_rpp[0]:
if path[-1] in circuit_rpp[1][3]['path']:
path = path
else:
path.reverse()
path = path
for n in path:
rpplist.append({
'node' : n,
'lat' : complete_g.nodes[n]['y'],
'lon' : complete_g.nodes[n]['x']
})
else:
if path[0] == rpplist[-1]['node']:
path = path[1:]
elif path[-1] == rpplist[-1]['node']:
path.reverse()
path = path[1:]
for n in path:
rpplist.append({
'node' : n,
'lat' : complete_g.nodes[n]['y'],
'lon' : complete_g.nodes[n]['x']
})
rppdf = | pd.DataFrame(rpplist) | pandas.DataFrame |
from pathlib import Path
import pandas as pd
import numpy as np
from matplotlib.font_manager import FontProperties
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
grandpadir = os.path.dirname(os.path.dirname(currentdir))
sys.path.insert(0, grandpadir)
from collections import OrderedDict
from utils.helper_functions import read_nav_files, sort_files_by_dim
from analysis.comparison.comparison_utils import get_dataset_name, read_proteus_files, read_baseline_files, reform_pseudo_samples_dict
from utils.pseudo_samples import PseudoSamplesMger
from utils.shared_names import FileKeys, FileNames
import matplotlib.pyplot as plt
import statsmodels.api as sm
pipeline_grouping = 'results_predictive_grouping'
pipeline_no_grouping = 'results_predictive'
expl_size = 10
noise_level = None
keep_only_prot_fs = False
datasets = {
'wbc',
'ionosphere',
'arrhythmia'
}
# test_confs = [
# {'path': Path('..', pipeline, 'loda'), 'detector': 'loda', 'type': 'test'},
# # {'path': Path('..', pipeline, 'iforest'), 'detector': 'iforest', 'type': 'test'}
# ]
synth_confs =[
{'path': Path('..', pipeline_grouping, 'iforest'), 'detector': 'iforest', 'type': 'synthetic'},
{'path': Path('..', pipeline_grouping, 'lof'), 'detector': 'lof', 'type': 'synthetic'},
{'path': Path('..', pipeline_grouping, 'loda'), 'detector': 'loda', 'type': 'synthetic'}
]
real_confs = [
{'path': Path('..', pipeline_grouping, 'iforest'), 'detector': 'iforest', 'type': 'real'},
{'path': Path('..', pipeline_grouping, 'lof'), 'detector': 'lof', 'type': 'real'},
{'path': Path('..', pipeline_grouping, 'loda'), 'detector': 'loda', 'type': 'real'}
]
synth_confs_no_grouping = [
{'path': Path('..', pipeline_no_grouping, 'iforest'), 'detector': 'iforest', 'type': 'synthetic'},
{'path': Path('..', pipeline_no_grouping, 'lof'), 'detector': 'lof', 'type': 'synthetic'},
{'path': Path('..', pipeline_no_grouping, 'loda'), 'detector': 'loda', 'type': 'synthetic'}
]
confs_to_analyze = synth_confs
def plot_panels():
synth_no_grouping = unstructured_perfs(synth_confs_no_grouping)
synth_grouping = structured_perfs(synth_confs)
real_grouping = unstructured_perfs(real_confs)
bias_plot(synth_grouping, real_grouping, synth_no_grouping)
# test_auc_plot(pred_perfs_dict, 0)
# test_auc_plot(pred_perfs_dict, 1)
def best_models(conf):
best_models_perf_in_sample = pd.DataFrame()
cv_estimates = pd.DataFrame()
ci_in_sample = pd.DataFrame()
error_in_sample = pd.DataFrame()
best_models_perf_out_of_sample = | pd.DataFrame() | pandas.DataFrame |
#%%
# Import everything we need
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
from datetime import datetime
from datetime import datetime as dt
from sklearn.model_selection import cross_val_score, TimeSeriesSplit, RandomizedSearchCV, GridSearchCV
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LassoCV, RidgeCV
import xgboost
from xgboost import XGBRegressor
import lightgbm as lgb
from plotly import __version__
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from plotly import graph_objs as go
import chart_studio.plotly
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
init_notebook_mode(connected = True)
import warnings
warnings.filterwarnings('ignore')
#%%
# Split data in a different way
def timeseries_train_test_split(X, y, test_size):
"""
Perform train-test split with respect to time series structure
"""
# get the index after which test set starts
test_index = int(len(X)*(1-test_size))
X_train = X.iloc[:test_index]
y_train = y.iloc[:test_index]
X_test = X.iloc[test_index:]
y_test = y.iloc[test_index:]
return X_train, X_test, y_train, y_test
# for time-series cross-validation set 5 folds
tscv = TimeSeriesSplit(n_splits=5)
# Metric
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
#%%
#--------------------------------------------------------------------------------------------------
#----------------------------------------- Data paths ---------------------------------------------
data_cases_path = os.path.join('data','cases_localidades.csv')
data_movement_change_path = os.path.join('data','Movement','movement_range_colombian_cities.csv')
data_GT_path = os.path.join('data','Google_Trends','trends_BOG.csv')
data_GT_id_terms_path = os.path.join('data','Google_Trends','terms_id_ES.csv')
data_GT_search_terms_path = os.path.join('data','Google_Trends','search_terms_ES.csv')
#--------------------------------------------------------------------------------------------------
#----------------------------------------- Load data ----------------------------------------------
### Load confirmed cases for Bogota
data_cases = pd.read_csv(data_cases_path, usecols=['date_time','location','num_cases','num_diseased'])
data_cases['date_time'] = pd.to_datetime(data_cases['date_time'], format='%Y-%m-%d') # converted to datetime
# data_cases = data_cases[data_cases['date_time'] <= date_th]
last_cases_conf_date = data_cases['date_time'].iloc[-1]
data_cases = data_cases.groupby('date_time').sum()
# Smooth data
data_cases['num_cases_7dRA'] = data_cases['num_cases'].rolling(window=7).mean()
data_cases['num_diseased_7dRA'] = data_cases['num_diseased'].rolling(window=7).mean()
### Load mobility data for Bogota
data_movement_change = pd.read_csv(data_movement_change_path, parse_dates=['date_time']).set_index('poly_id')
data_movement_change = data_movement_change.loc[11001].sort_values(by='date_time')
# Smooth data
data_movement_change['movement_change_7dRA'] = data_movement_change['movement_change'].rolling(window=7).mean()
#data_movement_change['movement_change_7dRA'].iloc[:6] = data_movement_change['movement_change'].iloc[:6]
data_movement_change = data_movement_change[data_movement_change['date_time'] <= last_cases_conf_date]
data_movement_change = data_movement_change.reset_index() ; data_movement_change = data_movement_change.set_index('date_time')
data_movement_change = data_movement_change.drop('poly_id', axis=1)
### Load Google Trends data for Bogota
data_GT = pd.read_csv(data_GT_path, usecols=['date_time','anosmia','fiebre','covid'])
data_GT['date_time'] = | pd.to_datetime(data_GT['date_time'], format='%Y-%m-%d') | pandas.to_datetime |
import time
import queue
import pandas as pd
import numpy as np
from ..utils import skills_util
from ..inferencing.multi_thread_inference import InferenceThread
def inference(
conversation,
workspace_id,
test_data,
max_retries=10,
max_thread=5,
verbose=False,
user_id="256",
):
"""
query the message api to generate results on the test data
:parameter: conversation: the conversation object produced by AssistantV1 api
:parameter: workspace_id: the workspace id of the
:parameter: test_data: the data that will be sent to the classifier
:parameter: max_retries: the maximum number of retries
:parameter: max_thread: the max number of threads to use for multi-threaded inference
:parameter: verbose: flag indicates verbosity of outputs during mutli-threaded inference
:return result_df: results dataframe
"""
if max_thread == 1:
reach_max_retry = False
responses = []
for test_example, ground_truth in zip(
test_data["utterance"], test_data["intent"]
):
attempt = 1
while attempt <= max_retries:
try:
prediction_json = skills_util.retrieve_classifier_response(
conversation, workspace_id, test_example, True, user_id
)
time.sleep(0.3)
success_flag = True
except Exception:
continue
if success_flag:
break
attempt += 1
if attempt > max_retries:
reach_max_retry = True
if reach_max_retry:
raise Exception("Maximum attempt of {} has reached".format(max_retries))
if not prediction_json["intents"]:
responses.append(
{
"top_intent": skills_util.OFFTOPIC_LABEL,
"top_confidence": 0.0,
"correct_intent": ground_truth,
"utterance": test_example,
"top_predicts": [],
"entities": [],
}
)
else:
responses.append(
{
"top_intent": prediction_json["intents"][0]["intent"],
"top_confidence": prediction_json["intents"][0]["confidence"],
"correct_intent": ground_truth,
"utterance": test_example,
"top_predicts": prediction_json["intents"],
"entities": prediction_json["entities"],
}
)
result_df = pd.DataFrame(data=responses)
else:
result_df = thread_inference(
conversation,
workspace_id,
test_data,
max_retries,
max_thread,
verbose,
user_id,
)
return result_df
def thread_inference(
conversation,
workspace_id,
test_data,
max_retries=10,
max_thread=5,
verbose=False,
user_id="256",
):
"""
Perform multi thread inference for faster inference time
:param conversation:
:param workspace_id: Assistant workspace id
:param test_data: data to test on
:param max_retries: max retries for each call
:param max_thread: max threads to use
:param verbose: verbosity of output
:param user_id: user_id for billing purpose
:return result_df: results dataframe
"""
if max_thread > 5:
print("only maximum of 5 threads are allowed")
thread_list = ["Thread-1", "Thread-2", "Thread-3", "Thread-4", "Thread-5"]
thread_list = thread_list[:max_thread]
query_queue = queue.Queue(0)
threads = []
thread_id = 1
result = list()
start_time = time.time()
for i in range(len(test_data)):
data_point = [test_data["utterance"].iloc[i], test_data["intent"].iloc[i]]
query_queue.put(data_point)
# Create new threads
for thread_name in thread_list:
thread = InferenceThread(
thread_id,
thread_name,
query_queue,
conversation,
workspace_id,
result,
max_retries,
verbose,
user_id,
)
thread.start()
threads.append(thread)
thread_id += 1
while len(result) != len(test_data):
pass
for thread in threads:
thread.join()
print("--- Total time: {} seconds ---".format(time.time() - start_time))
result_df = pd.DataFrame(data=result)
return result_df
def get_intents_confidences(conversation, workspace_id, text_input):
"""
Retrieve a list of confidence for analysis purpose
:param conversation: conversation instance
:param workspace_id: workspace id
:param text_input: input utterance
:return intent_conf: intent confidences
"""
response_info = skills_util.retrieve_classifier_response(
conversation, workspace_id, text_input, True
)["intents"]
intent_conf = [(r["intent"], r["confidence"]) for r in response_info]
return intent_conf
def calculate_mistakes(results):
"""
retrieve the data frame of miss-classified examples
:param results: results after tersting
:return wrongs_df: data frame of mistakes
"""
wrongs = list()
for idx, row in results.iterrows():
if row["correct_intent"] != row["top_intent"]:
wrongs.append(row)
wrongs_df = | pd.DataFrame(data=wrongs) | pandas.DataFrame |
import cairo
import pycha.pie
import pandas
from datos import data
d=data('mtcars')
ps = | pandas.Series([i for i in d.cyl]) | pandas.Series |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for USEquityPricingLoader and related classes.
"""
from parameterized import parameterized
import sys
import numpy as np
from numpy.testing import (
assert_allclose,
assert_array_equal,
)
import pandas as pd
from pandas.testing import assert_frame_equal
from toolz.curried.operator import getitem
from zipline.lib.adjustment import Float64Multiply
from zipline.pipeline.domain import US_EQUITIES
from zipline.pipeline.loaders.synthetic import (
NullAdjustmentReader,
make_bar_data,
expected_bar_values_2d,
)
from zipline.pipeline.loaders.equity_pricing_loader import (
USEquityPricingLoader,
)
from zipline.errors import WindowLengthTooLong
from zipline.pipeline.data import USEquityPricing
from zipline.testing import (
seconds_to_timestamp,
str_to_seconds,
MockDailyBarReader,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
ZiplineTestCase,
)
import pytest
# Test calendar ranges over the month of June 2015
# June 2015
# Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
TEST_CALENDAR_START = pd.Timestamp("2015-06-01", tz="UTC")
TEST_CALENDAR_STOP = pd.Timestamp("2015-06-30", tz="UTC")
TEST_QUERY_START = pd.Timestamp("2015-06-10", tz="UTC")
TEST_QUERY_STOP = pd.Timestamp("2015-06-19", tz="UTC")
# One asset for each of the cases enumerated in load_raw_arrays_from_bcolz.
EQUITY_INFO = pd.DataFrame(
[
# 1) The equity's trades start and end before query.
{"start_date": "2015-06-01", "end_date": "2015-06-05"},
# 2) The equity's trades start and end after query.
{"start_date": "2015-06-22", "end_date": "2015-06-30"},
# 3) The equity's data covers all dates in range.
{"start_date": "2015-06-02", "end_date": "2015-06-30"},
# 4) The equity's trades start before the query start, but stop
# before the query end.
{"start_date": "2015-06-01", "end_date": "2015-06-15"},
# 5) The equity's trades start and end during the query.
{"start_date": "2015-06-12", "end_date": "2015-06-18"},
# 6) The equity's trades start during the query, but extend through
# the whole query.
{"start_date": "2015-06-15", "end_date": "2015-06-25"},
],
index=np.arange(1, 7),
columns=["start_date", "end_date"],
).astype(np.datetime64)
EQUITY_INFO["symbol"] = [chr(ord("A") + n) for n in range(len(EQUITY_INFO))]
EQUITY_INFO["exchange"] = "TEST"
TEST_QUERY_SIDS = EQUITY_INFO.index
# ADJUSTMENTS use the following scheme to indicate information about the value
# upon inspection.
#
# 1s place is the equity
#
# 0.1s place is the action type, with:
#
# splits, 1
# mergers, 2
# dividends, 3
#
# 0.001s is the date
SPLITS = pd.DataFrame(
[
# Before query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-03"),
"ratio": 1.103,
"sid": 1,
},
# First day of query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-10"),
"ratio": 3.110,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 3.112,
"sid": 3,
},
# After query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-21"),
"ratio": 6.121,
"sid": 6,
},
# Another action in query range, should have last_row of 1
{
"effective_date": str_to_seconds("2015-06-11"),
"ratio": 3.111,
"sid": 3,
},
# Last day of range. Should have last_row of 7
{
"effective_date": str_to_seconds("2015-06-19"),
"ratio": 3.119,
"sid": 3,
},
],
columns=["effective_date", "ratio", "sid"],
)
MERGERS = pd.DataFrame(
[
# Before query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-03"),
"ratio": 1.203,
"sid": 1,
},
# First day of query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-10"),
"ratio": 3.210,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 3.212,
"sid": 3,
},
# After query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-25"),
"ratio": 6.225,
"sid": 6,
},
# Another action in query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 4.212,
"sid": 4,
},
# Last day of range. Should have last_row of 7
{
"effective_date": str_to_seconds("2015-06-19"),
"ratio": 3.219,
"sid": 3,
},
],
columns=["effective_date", "ratio", "sid"],
)
DIVIDENDS = pd.DataFrame(
[
# Before query range, should be excluded.
{
"declared_date": pd.Timestamp("2015-05-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-03", tz="UTC").to_datetime64(),
"pay_date": | pd.Timestamp("2015-06-05", tz="UTC") | pandas.Timestamp |
# ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
"""
sources.py
This file will contain more general versions of data containers than
the sources defined in uncertainty_sources.py. The main class that this module
exports is the Source class which is intended to store data of any sort in
a dataframe. This class should not be modified (unless there is a bug) to be
made more specific; it should be subclassed. In addition, unless the
method will obviously change the state of the object, all methods should
produce new objects instead of modifying objects.
"""
import sys
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import prescient.util.distributions.copula as copula
from prescient.util.distributions.distributions import UnivariateEmpiricalDistribution
from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution
import prescient.gosm.derivative_patterns.derivative_patterns as dp
from prescient.gosm.markov_chains.states import State
from prescient.gosm.sources.segmenter import Criterion
power_sources = ['solar', 'wind', 'hydro']
recognized_sources = ['solar', 'wind', 'load', 'hydro']
# Default parameters for the non-required parameters for sources
defaults = {
'is_deterministic': False,
'frac_nondispatch': 1,
'scaling_factor': 1,
'forecasts_as_actuals': False,
'aggregate': False,
}
class Source:
"""
This class should act as a container for all the data related to a single
source. The data is stored internally in a Pandas Dataframe.
This class should have methods for segmentation (more generally pick
all datetimes that satisfy a certain a criterion) and selection.
Attributes:
name (str): The name of the source
data (pd.DataFrame): The internal dataframe storing all the data
source_type (str): The type of the source (wind, solar, load, etc.)
Args:
name (str): the name of the source
dataframe (pd.DataFrame): The frame containing all the data
source_type (str): the type of the source (e.g. 'solar')
"""
def __init__(self, name, dataframe, source_type):
self.name = name
self.data = dataframe
# A little validation is done here
# We check for duplicates
if dataframe.index.duplicated().any():
duplicates = dataframe.index[dataframe.index.duplicated()]
raise ValueError("Error: Source {} has duplicate datetimes at {}"
.format(name, ", ".join(map(str, duplicates))))
self.source_type = source_type.lower()
if source_type.lower() not in recognized_sources:
raise ValueError("The source type '{}' is unrecognized, the only "
"recognized sources are {}"
.format(source_type,
", ".join(recognized_sources)))
def check_for_column(self, column_name):
"""
This method will check if the source has a column with the name
specified. If it doesn't it will raise a RuntimeError.
Args:
column_name (str): The name of the column to check
"""
if column_name not in self.data.columns:
raise RuntimeError("Source {} has no '{}' column".format(
self.name, column_name))
def window(self, column_name, lower_bound=-np.inf, upper_bound=np.inf):
"""
Finds the window of data such that the column value is between
the two bounds specified. Returns a Source object with data
contained in the window. The bounds are inclusive.
Args:
column_name (str): The name of the column
lower_bound (float): The lower bound, if None, no lower bound
upper_bound (float): The upper bound, if None, no upper bound
Returns:
Source: The window of data
"""
self.check_for_column(column_name)
new_frame = self.data[(self.data[column_name] >= lower_bound) &
(self.data[column_name] <= upper_bound)]
return Source(self.name, new_frame, self.source_type)
def enumerate(self, column_name, value):
"""
Finds the window of data such that the column field is equal to the
value. Returns a Source object with the data contained in the window.
Args:
column_name (str): The name of the column
value: The value you want all datetimes to have in the new window
Returns:
Source: The data will have all rows which match value
"""
self.check_for_column(column_name)
new_frame = self.data[self.data[column_name] == value]
return Source(self.name, new_frame, self.source_type)
def rolling_window(self, day, historic_data_start=None,
historic_data_end=None):
"""
Creates a Rolling Window of data which contains a historic dataframe
and a dayahead dataframe. The historic data is all data up to the day
and the dayahead data is the data for that day.
Using non-datetime objects (pd.TimeStamp, strings, np.datetime64)
probably works but not guaranteed. This is contingent on pandas
datetime indexing.
Args:
day (datetime.datetime): The datetime referring to hour zero of the
desired day to create a window up to that day
historic_data_start (datetime.datetime): The datetime of the start
of the historic data, if None just use start of data
historic_data_end (datetime.datetime): The datetime of the end of
the historic data, if None draws up to the day passed
Returns:
RollingWindow: The rolling window of data
"""
# If start not specified, we take the first date in dataframe
if historic_data_start is None:
historic_data_start = min(self.data.index)
# If end not specified, we take last date before the passed in day
if historic_data_end is None:
historic_data_end = day - datetime.timedelta(hours=1)
historic_frame = self.data[historic_data_start:historic_data_end]
dayahead_frame = self.data[day:day+datetime.timedelta(hours=23)]
# This suppresses warnings, This should return a copy anyways, so don't
# need a warning.
historic_frame.is_copy = False
dayahead_frame.is_copy = False
return RollingWindow(self.name, historic_frame,
self.source_type, dayahead_frame)
def solar_window(self, day, historic_data_start=None,
historic_data_end=None):
"""
Creates a SolarWindow of data which contains a historic dataframe
and a dayahead dataframe. The historic data is all data up to the day
and the dayahead data is the data for that day.
Using non-datetime objects (pd.TimeStamp, strings, np.datetime64)
probably works but not guaranteed. This is contingent on pandas
datetime indexing.
Args:
day (datetime.datetime): The datetime referring to hour zero of the
desired day to create a window up to that day
historic_data_start (datetime.datetime): The datetime of the start
of the historic data, if None just use start of data
historic_data_end (datetime.datetime): The datetime of the end of
the historic data, if None draws up to the day passed
Returns:
SolarWindow: The rolling window of data
"""
window = self.rolling_window(day, historic_data_start,
historic_data_end)
return window.solar_window()
def add_column(self, column_name, series):
"""
Adds a column of data to the dataframe. This data should be indexed
by datetime.
Args:
column_name (str): The name of the column to add
series (pd.Series or dict[datetime.datetime,value]): The data
indexed by datetime to add to the dataset
"""
self.data[column_name] = pd.Series(series)
def get_day_of_data(self, column_name, day):
"""
This function returns a pandas Series of all the data in the column
with the specific day as an index.
Args:
column_name (str): The desired column
day (datetime-like): the day, which can be coerced into
a pd.Timestamp
Returns:
pd.Series: A series of relevant data
"""
self.check_for_column(column_name)
dt = pd.Timestamp(day)
column = self.data[column_name]
return column[column.index.date == dt.date()]
def get(self, column_name, row_index):
"""
Get the value stored in column specified by column_name and the row
specified by the row_index
Args:
column_name (str): The name of the column
row_index (datetime.datetime): The datetime for which you want data
"""
self.check_for_column(column_name)
return self.data[column_name][row_index]
def get_column(self, column_name):
"""
Returns the column of data with that column name. This will also return
a column without any nan values.
Args:
column_name (str): The name of the column
Returns:
pd.Series: The requested column
"""
self.check_for_column(column_name)
return self.data[column_name].dropna()
def get_state_walk(self, state_description, class_=State):
"""
This method should walk through the datetimes and construct a sequence
of the different states of the historic data. The specification for
what constitutes a state is passed in the state_description argument.
Args:
state_description (StateDescription): Specification for determining
what the state for each datetime is
class_ (Class): The type of state you wish to instantiate
Returns:
A dictionary of mapping datetimes to states constituting the walk
"""
states = {}
names = state_description.keys()
for dt in self.data.index:
name_value_mapping = {name: self.get(name, dt) for name in names}
states[dt] = state_description.to_state(class_,
**name_value_mapping)
return states
def get_state(self, state_description, dt, class_=State):
"""
This method should create the state for a specific datetime.
The specification for what constitutes a state is passed in
the state_description argument.
Args:
state_description (StateDescription): Specification for determining
what the state for each datetime is
dt (datetime.datetime): The relevant datetime
class_ (Class): The type of state you wish to instantiate
Returns:
State: The state of the datetime
"""
dt = pd.Timestamp(dt)
names = state_description.keys()
name_value_mapping = {name: self.get(name, dt) for name in names}
return state_description.to_state(class_, **name_value_mapping)
def get_quantiles(self, column_name, quantiles):
"""
This method returns the quantiles of the column specified
Args:
column_name (str): The desired column to compute quantiles
quantiles (List[float]): A list of floating points in [0,1]
Returns:
List[float]: The corresponding quantiles
"""
self.check_for_column(column_name)
return list(self.data[column_name].quantile(quantiles))
def sample(self, column_name, lower_bound=-np.inf, upper_bound=np.inf):
"""
This draws a sample from the data in the column that is between
lower bound and upper bound. If no lower or upper bound is specified,
then there is no bound on the data sampled.
Args:
column_name (str): The name of the column
lower_bound (float): The lower bound, if not specified,
no lower bound
upper_bound (float): The upper bound, if not specified,
no upper bound
Returns:
float: A single sampled value
"""
self.check_for_column(column_name)
window = self.window(column_name, lower_bound, upper_bound)
column = window.get_column(column_name)
return float(column.sample())
def apply_bounds(self, column_name, lower_bound=-np.inf,
upper_bound=np.inf):
"""
This function should take the column with the name specified and
fix any value in the column below the corresponding value in
lower_bound to the lower_bound and likewise for upper_bound.
lower_bound and upper_bound may be Pandas Series or they may
be a single value acting as a bound. If no lower bound is passed,
the lower bound is minus infinity, similarly for upper bound, if none
is passed, the upper bound is infinity.
This function changes the state of the source.
Args:
column_name (str): Name of the column
lower_bound (float or pd.Series): A lower bound for data
upper_bound (float or pd.Series): An upper bound for data
"""
self.check_for_column(column_name)
if lower_bound is None:
lower_bound = -np.inf
if upper_bound is None:
upper_bound = np.inf
column = self.data[column_name]
self.data[column_name] = column.clip(lower_bound, upper_bound)
def interpolate(self, column_name):
"""
This function will interpolate the column specified so that every
hour between the start of the data and the end of the data has a value.
This function changes the state of the source.
Args:
column_name (str): name of the column to interpolate
"""
self.check_for_column(column_name)
start_date = min(self.data.index)
end_date = max(self.data.index)
date_range = | pd.date_range(start_date, end_date, freq='H') | pandas.date_range |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import csv, json, shutil
import numpy as np
import pandas as pd
from flask import Flask, request, jsonify
from flask_cors import CORS
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.arima_model import ARIMAResults
from calendar import monthrange
from datetime import datetime, timedelta
app=Flask(__name__)
CORS(app)
data_to_send = []
def mlwilldoit(time):
arima_fitted_model = ARIMAResults.load("models/arima_model.pkl")
diff = monthdelta(datetime.strptime("2017-07-01","%Y-%m-%d"),datetime.strptime(time,"%Y-%m-%d"))
return str(arima_fitted_model.forecast(diff)[0][-1])
def monthdelta(d1, d2):
delta = 0
while True:
mdays = monthrange(d1.year, d1.month)[1]
d1 += timedelta(days = mdays)
if d1 <= d2:
delta += 1
else:
break
return delta
def csv_to_json(csv_file = "C:/Python35/projs/ml/cpi_prediction/data/CPIndex_Jan13-To-Jul19.csv"):
df = pd.read_csv(csv_file)
columns = list(df)
with open(csv_file, "r") as f:
reader = csv.DictReader(f)
out = json.dumps([row for row in reader])
return out
#def linear_regression():
def pd_csv_to_json(csv_file = "C:/Python35/projs/ml/cpi_prediction/data/CPIndex_Jan13-To-Jul19.csv"):
df = pd.read_csv(csv_file)
df.drop(columns=["Rural", "Urban", "Status"], axis = 1, inplace = True)
df.rename(columns = {"Combined": "CPI"}, inplace = True)
df.to_json("data/no_rural_urban_cpi.json", orient = "records")
def train():
print("Training model now")
global data_to_send
cpi_index = pd.read_json("data/no_rural_urban_cpi.json", orient = "records")
fish_cpi_index = cpi_index[cpi_index.Group==1.0]
fish_cpi_index = fish_cpi_index[fish_cpi_index["Sub Group"]=="1.1.02"]
combined_fish_cpi = fish_cpi_index[['Year', 'Month','CPI']]
months = {"January" : 1, "February" : 2, "March" : 3, "April" : 4, "May": 5, "June" : 6, "July" : 7, "August" : 8, "September" : 9, "October" : 10, "November" : 11, "December" : 12}
combined_fish_cpi['Month'] = combined_fish_cpi['Month'].map(lambda x: months[x])
combined_fish_cpi['Timestamp'] = pd.to_datetime({'year' : combined_fish_cpi['Year'], 'month' : combined_fish_cpi['Month'], 'day':[1] * combined_fish_cpi.shape[0]})
combined_fish_cpi = combined_fish_cpi.drop(["Year", "Month"], axis = 1)
combined_fish_cpi = combined_fish_cpi.set_index("Timestamp")
combined_fish_cpi["CPI"] = | pd.to_numeric(combined_fish_cpi["CPI"]) | pandas.to_numeric |
from mock import patch, MagicMock
import six
import cufflinks.datagen as cfdg
import pandas as pd
from datetime import datetime, timedelta
from sklearn.datasets import make_classification
from crowdsource.types.utils import _metric, checkAnswer, fetchDataset, answerPrototype
from crowdsource.persistence.models import Competition, Submission
from crowdsource.types.competition import CompetitionSpec
from crowdsource.types.submission import SubmissionSpec
from crowdsource.enums import CompetitionType, CompetitionMetric, DatasetFormat
def foo3(competitionSpec, *args, **kwargs):
import pandas
# import time
from sklearn import linear_model
data = competitionSpec.dataset
answers = []
if isinstance(competitionSpec.targets, dict):
return
targets = [competitionSpec.targets] if isinstance(competitionSpec.targets, six.string_types) else data.columns if competitionSpec.targets is None else competitionSpec.targets
val = competitionSpec.when
when = val.timestamp()
# print(targets)
for col in targets:
reg = linear_model.LinearRegression()
x = data[col].index.astype(int).values.reshape(len(data[col].index), 1)
y = data[col].values.reshape(len(data[col]), 1)
reg.fit(x, y)
print('******************')
answers.append(reg.predict([[when]])[0][0])
answers.append(when)
return pandas.DataFrame([answers], columns=targets+['when']).set_index(['when'])
def foo5(competitionSpec, *args, **kwargs):
if isinstance(competitionSpec.dataset, six.string_types):
dataset = fetchDataset(competitionSpec)
else:
return
answer = answerPrototype(competitionSpec, dataset)
return answer.fillna(0)
class TestUtils:
def test_validateSpec(self):
SubmissionSpec.validate(None, None, None)
def test_metric1(self):
x = _metric(CompetitionMetric.LOGLOSS, pd.Series([1, 2]), pd.Series([2, 3]))
print(x)
assert x > 17
assert x < 18
def test_metric2(self):
x = _metric(CompetitionMetric.ABSDIFF, | pd.DataFrame([1]) | pandas.DataFrame |
import datetime as dt
import os
import unittest
import numpy as np
import pandas as pd
import devicely
class SpacelabsTestCase(unittest.TestCase):
READ_PATH = "tests/SpaceLabs_test_data/spacelabs.abp"
WRITE_PATH = "tests/SpaceLabs_test_data/spacelabs_written.abp"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.expected_subject = "000002"
timestamps = pd.to_datetime([
"1.1.99 17:03",
"1.1.99 17:05",
"1.1.99 17:07",
"1.1.99 17:09",
"1.1.99 17:11",
"1.1.99 17:13",
"1.1.99 17:25",
"1.1.99 17:28",
"1.1.99 17:31",
"1.1.99 17:34",
"1.1.99 17:36",
"1.1.99 17:39",
"1.1.99 23:42",
"1.1.99 23:59",
"1.2.99 00:01",
])
self.expected_data = pd.DataFrame({
"timestamp": timestamps,
"date": timestamps.map(lambda timestamp: timestamp.date()),
"time": timestamps.map(lambda timestamp: timestamp.time()),
"SYS(mmHg)": [11, 142, 152, 151, 145, 3, 4, 164, 154, 149, 153, 148, 148, 148, 148],
"DIA(mmHg)": [0, 118, 112, 115, 110, 0, 0, 119, 116, 119, 118, 114, 114, 114, 114],
"ACC_x": [0, 99, 95, 96, 91, 0, 0, 95, 95, 98, 96, 93, 93, 93, 93],
"ACC_y": [0, 61, 61, 61, 59, 0, 0, 63, 63, 63, 60, 62, 62, 62, 62],
"ACC_z": 15 * [np.nan],
"error": ["EB", np.nan, np.nan, np.nan, np.nan, "EB", "EB", np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
})
self.expected_metadata = {
"PATIENTINFO": {
"DOB": "16.09.1966",
"RACE": "native american"
},
"REPORTINFO": {
"PHYSICIAN": "Dr. <NAME>",
"NURSETECH": "admin",
"STATUS": "NOTCONFIRMED",
"CALIPERSUMMARY": {
"COUNT": "0"
},
},
}
def setUp(self):
self.spacelabs_reader = devicely.SpacelabsReader(self.READ_PATH)
def test_read(self):
# Tests if a basic reading operation.
pd.testing.assert_frame_equal(self.spacelabs_reader.data,
self.expected_data)
self.assertEqual(self.spacelabs_reader.subject, self.expected_subject)
self.assertEqual(self.spacelabs_reader.metadata,
self.expected_metadata)
def test_deidentify(self):
# Tests if the SpacelabsReader.deidentify method removes all patient metadata.
self.spacelabs_reader.deidentify()
self.assertEqual(self.spacelabs_reader.subject, "")
self.assertEqual(
self.spacelabs_reader.metadata,
{
"PATIENTINFO": {
"DOB": "",
"RACE": ""
},
"REPORTINFO": {
"PHYSICIAN": "",
"NURSETECH": "",
"STATUS": "",
"CALIPERSUMMARY": {
"COUNT": ""
},
},
},
)
def test_write(self):
# Tests the SpacelabsReader.write operation by writing, reading again and comparing the old and new signals.
self.spacelabs_reader.write(self.WRITE_PATH)
new_reader = devicely.SpacelabsReader(self.WRITE_PATH)
pd.testing.assert_frame_equal(new_reader.data,
self.spacelabs_reader.data)
self.assertEqual(new_reader.metadata, self.spacelabs_reader.metadata)
self.assertEqual(new_reader.subject, self.spacelabs_reader.subject)
os.remove(self.WRITE_PATH)
def test_random_timeshift(self):
earliest_possible_shifted_time_col = pd.to_datetime([
'1997-01-01 17:03:00',
'1997-01-01 17:05:00',
'1997-01-01 17:07:00',
'1997-01-01 17:09:00',
'1997-01-01 17:11:00',
'1997-01-01 17:13:00',
'1997-01-01 17:25:00',
'1997-01-01 17:28:00',
'1997-01-01 17:31:00',
'1997-01-01 17:34:00',
'1997-01-01 17:36:00',
'1997-01-01 17:39:00',
'1997-01-01 23:42:00',
'1997-01-01 23:59:00',
'1997-01-02 00:01:00'
])
latest_possible_shifted_time_col = pd.to_datetime([
'1998-12-02 17:03:00',
'1998-12-02 17:05:00',
'1998-12-02 17:07:00',
'1998-12-02 17:09:00',
'1998-12-02 17:11:00',
'1998-12-02 17:13:00',
'1998-12-02 17:25:00',
'1998-12-02 17:28:00',
'1998-12-02 17:31:00',
'1998-12-02 17:34:00',
'1998-12-02 17:36:00',
'1998-12-02 17:39:00',
'1998-12-02 23:42:00',
'1998-12-02 23:59:00',
'1998-12-03 00:01:00'
])
old_timestamp_column = self.spacelabs_reader.data["timestamp"].copy()
self.spacelabs_reader.timeshift()
new_timestamp_column = self.spacelabs_reader.data["timestamp"]
self.assertTrue((earliest_possible_shifted_time_col <= new_timestamp_column).all())
self.assertTrue((new_timestamp_column <= latest_possible_shifted_time_col).all())
new_date_column = self.spacelabs_reader.data["date"]
new_time_column = self.spacelabs_reader.data["time"]
testing_timestamp_column = pd.Series([
dt.datetime.combine(new_date_column[i], new_time_column[i])
for i in range(len(self.spacelabs_reader.data))
])
pd.testing.assert_series_equal(new_timestamp_column, testing_timestamp_column, check_names=False)
def test_drop_EB(self):
# The drop_EB method should make timestamp the index column and remove all rows with 'EB' entries in the error column.
self.expected_data.drop(index=[0, 5, 6], inplace=True)
self.expected_data.set_index("timestamp", inplace=True)
self.spacelabs_reader.drop_EB()
pd.testing.assert_frame_equal(self.spacelabs_reader.data, self.expected_data)
# When run again, drop_EB should not do anythin.
self.spacelabs_reader.drop_EB()
pd.testing.assert_frame_equal(self.spacelabs_reader.data, self.expected_data)
def test_set_window_column(self):
self.spacelabs_reader.set_window(dt.timedelta(seconds=30), "bfill")
window_start = pd.to_datetime("1.1.99 17:02:30")
window_end = pd.to_datetime("1.1.99 17:03:00")
self.assertEqual(window_start, self.spacelabs_reader.data.loc[0, "window_start"])
self.assertEqual(window_end, self.spacelabs_reader.data.loc[0, "window_end"])
self.spacelabs_reader.set_window(dt.timedelta(seconds=30), "bffill")
window_start = pd.to_datetime("1.1.99 17:02:45")
window_end = pd.to_datetime("1.1.99 17:03:15")
self.assertEqual(window_start, self.spacelabs_reader.data.loc[0, "window_start"])
self.assertEqual(window_end, self.spacelabs_reader.data.loc[0, "window_end"])
self.spacelabs_reader.set_window(dt.timedelta(seconds=30), "ffill")
window_start = pd.to_datetime("1.1.99 17:03:00")
window_end = pd.to_datetime("1.1.99 17:03:30")
self.assertEqual(window_start, self.spacelabs_reader.data.loc[0, "window_start"])
self.assertEqual(window_end, self.spacelabs_reader.data.loc[0, "window_end"])
def test_set_window_index(self):
self.spacelabs_reader.drop_EB()
self.spacelabs_reader.set_window(dt.timedelta(seconds=30), "bfill")
window_start = pd.to_datetime("1.1.99 17:04:30")
window_end = pd.to_datetime("1.1.99 17:05:00")
self.assertEqual(window_start, self.spacelabs_reader.data[["window_start"]].iloc[0].values)
self.assertEqual(window_end, self.spacelabs_reader.data[["window_end"]].iloc[0].values)
self.spacelabs_reader.set_window(dt.timedelta(seconds=30), "bffill")
window_start = pd.to_datetime("1.1.99 17:04:45")
window_end = pd.to_datetime("1.1.99 17:05:15")
self.assertEqual(window_start, self.spacelabs_reader.data[["window_start"]].iloc[0].values)
self.assertEqual(window_end, self.spacelabs_reader.data[["window_end"]].iloc[0].values)
self.spacelabs_reader.set_window(dt.timedelta(seconds=30), "ffill")
window_start = | pd.to_datetime("1.1.99 17:05:00") | pandas.to_datetime |
import pandas as pd
import argparse
import numpy as np
import seaborn as sns
import matplotlib.pylab as plt
titles = ["Evaluation on all documents", "Evaluation on tweets only"]
def visualize_days(path, count):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 3))
for enum, algo in enumerate(["louvain_macro_tfidf", "louvain_macro_tfidf tweets only"]):
res = pd.DataFrame()
for i in range(count):
results = pd.read_csv(path.replace(".csv", "_{}.csv".format(i)))
results = results[
(results.algo == algo) & (results.weights_text == 1) & (results.similarity == 0.3) & (results.t == 0.7)]
res = | pd.concat([res, results]) | pandas.concat |
import argparse
import shelve
import dbm
import os.path
import pandas as pd
import tikzplotlib
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.ticker import FuncFormatter
import pprint
from datetime import timedelta
from pathlib import Path
import numpy as np
LOG_PATH = "/Users/madsadrian/Springfield/ChangeDetection/logs/"
LOG_PATH = "/Users/madsadrian/OneDrive - UiT Office 365/tensorboard-logs/"
MODEL_SORT_ORDER = {key: i for i, key in enumerate(["--X", "-CX", "A-X", "ACX"])}
model_sorter = lambda tuple_: MODEL_SORT_ORDER[tuple_[0]]
PRIOR_BASELINE = {"Texas": 0.6516, "California": 0.2038}
def td_print(td):
""" Print formatter for pd.timedelta """
comps = td.round("1s").components
retval = ""
for i, s in enumerate(["days", "hours", "min", "sec"]):
if comps[i] > 0:
retval += f"{comps[i]}{s} "
if retval == "":
retval = "$\\cdot$"
return retval
def experiment_one_boxplot(epoch=99, dataset="Texas"):
""" Make boxplot for experiment one """
path_ = os.path.join(LOG_PATH, "Final/Experiment 01", dataset)
df = pd.read_csv(os.path.join(path_, "metrics.csv"))
df = df[df["epoch"] == epoch]
df["training time"] = pd.to_timedelta(df["training time"], unit="s")
order = {
"A--": [0],
"-C-": [1],
"--X": [2],
"AC-": [3],
"A-X": [4],
"-CX": [5],
"ACX": [6],
}
if ARGS.to_tikz:
df["model"] = df["model"].apply(lambda s: f"\\m{{{s}}}")
order = {f"\\m{{{key}}}": value for key, value in order.items()}
print("Model", "Average", "$\\sigma$", "Max", sep="\t& ", end="\t\\\\\n")
fig, ax = plt.subplots()
for name, gdf in df.groupby("model"):
ax.boxplot(gdf["cohens kappa"], positions=order[name], widths=0.8)
d = gdf["training time"].describe()
print(
name,
td_print(d["mean"]),
td_print(d["std"]),
td_print(d["max"]),
sep="\t& ",
end="\t\\\\\n",
)
ax.set_xticks(range(len(order)))
ax.set_xticklabels(order.keys())
ax.set_xlabel("Model")
ax.set_ylabel("Cohens $\\kappa$")
ax.grid(axis="y")
ax.axhline(PRIOR_BASELINE[dataset], c="red")
if ARGS.to_tikz:
tikzplotlib.save(
figure=fig,
filepath=os.path.join(path_, f"E1-{dataset}-cohens-kappa-boxplot.tex"),
figureheight=".4\\textheight",
figurewidth=".95\\textwidth",
)
else:
plt.show()
def color_box_plot(bp, edge_color=None, fill_color=None, hatch=None, label=None):
if edge_color is not None:
for element in ["boxes", "whiskers", "fliers", "means", "medians", "caps"]:
plt.setp(bp[element], color=edge_color)
if fill_color is not None:
for patch in bp["boxes"]:
patch.set(facecolor=fill_color)
if hatch is not None:
patch.set_hatch(hatch)
if label is not None:
patch.set_label(label)
return patch
def recurse(obj, prefix=""):
print(prefix, type(obj), "\t", obj)
for child in obj.get_children():
recurse(child, prefix + "\t")
COLORS = {"A": "yellow", "B": "cyan", "C": "magenta", "D": "orange"}
PLT_HATCHES = {"A": r"////", "B": r"\\\\", "C": r"O", "D": r"."}
TIKZ_HATCHES = {
"A": "north east lines",
"B": "north west lines",
"C": "crosshatch",
"D": "dots",
}
Y_LIMS = {"Texas": (0.64, 0.89), "California": (0.19, 0.56)}
def experiment_two_paired_boxplot(
epoch=99,
dataset="Texas",
pairs=(("A", "C"), ("B", "D")),
metric="cohens kappa",
labels=("Fixed Learning Rate", "Decay Learning Rate"),
y_label="Cohens $\\kappa$",
training_times=False,
):
""" Make boxplot for experiment one """
assert dataset in ["Texas", "California"]
e_num = 2 if dataset == "Texas" else 3
order = {"A-X": np.array([3]), "-CX": np.array([1]), "ACX": np.array([5])}
if ARGS.to_tikz:
order = {f"\\m{{{key}}}": value for key, value in order.items()}
path_ = LOG_PATH + "/Resultater/"
figs, axes = [], []
for pair in pairs:
fig, ax = plt.subplots()
figs.append(fig)
axes.append(ax)
legend_hs = []
for name, x_mod, label in zip(pair, [-0.25, 0.25], labels):
fill_color, hatch = COLORS[name], PLT_HATCHES[name]
df = pd.read_csv(path_ + f"{name}-{dataset}-metrics.csv")
df = df[df["epoch"] == epoch]
df["training time"] = pd.to_timedelta(df["training time"], unit="s")
df["model"] = df["model"].apply(lambda s: s.strip("p"))
if ARGS.to_tikz:
df["model"] = df["model"].apply(lambda s: f"\\m{{{s}}}")
if training_times:
print(f"\n\n{dataset} {name} Training times")
print(
"Model",
"Average",
"$\\sigma$",
"Max",
sep="\t& ",
end="\t\\\\\\hline\n",
)
for name, gdf in df.groupby("model"):
bp = ax.boxplot(
gdf[metric],
whis=[5, 95],
positions=order[name] + x_mod,
widths=0.4,
patch_artist=True,
)
lh = color_box_plot(bp, fill_color=fill_color, hatch=hatch, label=label)
if training_times:
d = gdf["training time"].describe()
print(
name,
td_print(d["mean"]),
td_print(d["std"]),
td_print(d["max"]),
sep="\t& ",
end="\t\\\\\n",
)
if label is not None:
legend_hs.append(lh)
ax.axhline(PRIOR_BASELINE[dataset], c="red")
ax.set_xticks(np.concatenate(list(order.values())))
ax.set_xticklabels(order.keys())
ax.set_xlabel("Model")
ax.set_ylabel(y_label)
ax.set_ylim(Y_LIMS[dataset])
ax.grid(axis="y")
ax.legend(handles=legend_hs, loc="lower right")
# for pair, fig, ax in zip(pairs, figs, axes):
# recurse(fig)
if ARGS.to_tikz:
for pair, fig in zip(pairs, figs):
code = tikzplotlib.get_tikz_code(
figure=fig, figureheight=".4\\textheight", figurewidth=".95\\textwidth"
)
legend_str = "\n"
colors = ("color1", "color2")
directions = ("east", "west")
for label, name, col in zip(labels, pair, colors):
legend_str += "\\addlegendentry{" + label + "};\n"
old = f"fill={col}"
new = f"pattern={TIKZ_HATCHES[name]}, pattern color={col}"
code = code.replace(old, new)
legend_str += (
"\\addlegendimage{draw=black, area legend, " + new + "};\n"
)
if metric == "cohens kappa":
code = code.replace("\\end{axis}", legend_str + "\n\\end{axis}")
tag = metric if metric != "cohens kappa" else "K"
filepath = os.path.join(LOG_PATH, f"{dataset}-{pair[0]}{pair[1]}-{tag}.tex")
with open(filepath, "w") as f:
f.write(code)
else:
plt.show()
def min_sec_formatter(value, pos):
td = timedelta(seconds=value * 1e-9)
return td
def boxplot_training_time(df):
ax = df.boxplot(column="training time", by="model")
ax.set_title(f"Training time after epoch {epoch}")
ax.set_xlabel("Method")
ax.set_ylabel("Training time [hh:mm:ss]")
ax.ticklabel_format(axis="y", style="plain")
ymajor_formatter = FuncFormatter(min_sec_formatter)
ax.yaxis.set_major_formatter(ymajor_formatter)
if ARGS.to_tikz:
tikzplotlib.save(os.path.join(LOG_PATH, TAG, "training_time.tex"))
else:
plt.show()
def boxplot_metrics(df):
# for epoch, edf in df.groupby(by="epoch"):
if True:
edf = df
epoch = 50
for model, mdf in edf.groupby("model"):
print(f"{model} average training time", mdf["training time"].mean())
ax = edf.boxplot(column="cohens kappa", by="model", grid=False)
ax.set_title(f"Cohens $\\kappa$ after epoch {epoch}")
ax.set_xlabel("Method")
ax.set_ylabel("Cohens $\\kappa$")
if ARGS.to_tikz:
tikzplotlib.save(os.path.join(LOG_PATH, TAG, "cohens_kappa.tex"))
else:
plt.show()
def labeled_subplots(nrows, ncols, row_labs, col_labs, pad=5):
"""
fig
col_labs
row_labs
pad=5 in points
"""
assert len(col_labs) == ncols
assert len(row_labs) == nrows
# cols = ["Column {}".format(col) for col in range(1, 4)]
# rows = ["Row {}".format(row) for row in ["A", "B", "C", "D"]]
fig, axes = plt.subplots(nrows=nrows, ncols=ncols)
plt.setp(axes.flat, xlabel="X-label", ylabel="Y-label")
for ax, col in zip(axes[0], col_labs):
ax.annotate(
col,
xy=(0.5, 1),
xytext=(0, pad),
xycoords="axes fraction",
textcoords="offset points",
size="large",
ha="center",
va="baseline",
)
for ax, row in zip(axes[:, 0], row_labs):
ax.annotate(
row,
xy=(0, 0.5),
xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label,
textcoords="offset points",
size="large",
ha="right",
va="center",
)
fig.tight_layout()
# tight_layout doesn't take these labels into account. We'll need
# to make some room. These numbers are are manually tweaked.
# You could automatically calculate them, but it's a pain.
fig.subplots_adjust(left=0.15, top=0.95)
return fig, axes
def tensor_string_to_float(s):
return float(s[10:].split(",")[0])
def plot_experiment_losses(model_dir, metrics=None, ax=None):
convs = {
"reg": tensor_string_to_float,
"cross": tensor_string_to_float,
"aff": tensor_string_to_float,
"cycle": tensor_string_to_float,
}
metrics = metrics or {"x_loss": "fx_losses.csv", "y_loss": "fy_losses.csv"}
dfs = {key: [] for key in metrics.keys()}
print(model_dir.path, model_dir.name)
with os.scandir(model_dir.path) as it:
for entry in it:
# iterate timestamp level
if entry.name == ".DS_Store":
continue
print(entry.name, model_dir.name)
for key, file_name in metrics.items():
df = pd.read_csv(os.path.join(entry.path, file_name), converters=convs)
# df["model"] = model_dir.name
# df["timestamp"] = entry.name
dfs[key].append(df)
colors = {"reg": "y", "cross": "r", "aff": "b", "cycle": "g"}
fig, axes = plt.subplots(1, 2)
fig.set_title()
for ax, (key, df_list) in zip(axes.flatten(), dfs.items()):
df = pd.concat(df_list)
df2 = df.groupby(["epoch"], as_index=False).agg(["mean", "std"])
ax.set_yscale("log")
ax.grid(axis="y", which="both")
ax.set_title(key)
ax.set_xlabel("epoch")
ax.set_ylabel("loss value")
for loss in df2.columns.get_level_values(0).unique():
ax.plot(df2[loss, "mean"], color=colors[loss], label=loss)
ax.fill_between(
df2.index,
df2[loss, "mean"] - 2 * df2[loss, "std"],
df2[loss, "mean"] + 2 * df2[loss, "std"],
color=colors[loss],
alpha=0.2,
)
ax.legend()
plt.show()
def experiment_loss_plot(experiment="Experiment 02b"):
path_ = os.path.join(
"/Users/madsadrian/OneDrive - UiT Office 365/tensorboard-logs/keep", experiment
)
runs = {
# "a": "20191201-035206-Texas-A",
# "b": "20191201-035213-Texas-B/",
# "c": "20191201-035221-Texas-C/",
"d": "20191201-035234-Texas-D/"
}
for key, folder in runs.items(): # iterate experiment level
with os.scandir(os.path.join(path_, folder)) as it:
# iterate model level
for entry in it:
if not entry.is_dir() or entry.name == "shelf":
continue
plot_experiment_losses(entry)
return
def experiment_three_boxplot(epoch=99):
runs = {
"a": "Experiment 03/3A/",
"b": "Experiment 03/3B/",
"c": "Experiment 03/3C/",
"d": "Experiment 03/3D/",
}
note = {
"a": "lrd=False, aff_patch=32, ps=128, bs=1, #b=12",
"b": "lrd=False, aff_patch=False, ps=50/100, bs=2, #b=40/10",
"c": "lrd=True, aff_patch=32, ps=128, bs=1, #b=12",
"d": "lrd=True, aff_patch=False, ps=50/100, bs=2, #b=40/10",
}
dfs = {}
for key, folder in runs.items():
df = pd.read_csv(os.path.join(LOG_PATH, folder, "metrics.csv"))
df = df[df["epoch"] == epoch]
df["training time"] = pd.to_timedelta(df["training time"], unit="s")
dfs[key] = df
_, axes = plt.subplots(2, 2, sharey=True)
model_order = {"-CX": [0], "A-X": [1], "A-Xp": [1], "ACX": [2], "ACXp": [2]}
if ARGS.to_tikz:
for key, df in dfs.items():
dfs[key]["model"] = df["model"].apply(lambda s: f"\\m{{{s}}}")
model_order = {f"\\m{{{key}}}": value for key, value in model_order.items()}
for ax, (key, df) in zip(axes.flatten(), dfs.items()):
for name, gdf in df.groupby("model"):
ax.boxplot(gdf["cohens kappa"], positions=model_order[name])
print(key, name, gdf["training time"].mean())
ax.set_xticks(range(3))
ax.set_xticklabels(["-CX", "A-X", "ACX"])
ax.set_xlabel(note[key])
ax.set_ylabel("Cohens $\\kappa$")
ax.grid(axis="y")
ax.axhline(0.2038, c="red")
if ARGS.to_tikz:
print("tikz not configured")
exit()
tikzplotlib.save(
figure=fig50,
filepath=os.path.join(
LOG_PATH, "Experiment 02", "cohens-kappa-boxplot-a.tex"
),
figureheight="4in",
figurewidth="4in",
# strict=True,
)
tikzplotlib.save(
figure=fig32,
filepath=os.path.join(
LOG_PATH, "Experiment 02", "cohens-kappa-boxplot-a.tex"
),
figureheight="4in",
figurewidth="4in",
# strict=True,
)
else:
plt.show()
def experiment_two_b_boxplot(epoch=99, wrong_one=False):
runs = {
"a": "Experiment 02/2A/",
"b": "Experiment 02/2B/",
"c": "Experiment 02/2C/",
"d": "Experiment 02/2D/",
}
note = {
"a": "lrd=False, aff_patch=32, ps=128, bs=1, #b=12",
"b": "lrd=False, aff_patch=False, ps=50/100, bs=2, #b=40/10",
"c": "lrd=True, aff_patch=32, ps=128, bs=1, #b=12",
"d": "lrd=True, aff_patch=False, ps=50/100, bs=2, #b=40/10",
}
if wrong_one:
runs = {
"a": "errors/bs nb wrong/20191201-035213-Texas-B",
"b": "Experiment 02/2B",
"c": "errors/bs nb wrong/20191201-035234-Texas-D",
"d": "Experiment 02/2D",
}
note = {
"a": "lrd=False, ps=50/100, bs=2/10, #b=40/2",
"b": "lrd=False, ps=50/100, bs=2, #b=40/10",
"c": "lrd=True, ps=50/100, bs=2/10, #b=40/2",
"d": "lrd=True, ps=50/100, bs=2, #b=40/10",
}
dfs = {}
for key, folder in runs.items():
df = pd.read_csv(os.path.join(LOG_PATH, folder, "metrics.csv"))
df = df[df["epoch"] == epoch]
df["training time"] = pd.to_timedelta(df["training time"], unit="s")
dfs[key] = df
fig, axes = plt.subplots(2, 2, sharey=True)
# fig, axes = labeled_subplots(
# 2,
# 2,
# ["lrd=False", "lrd=True"],
# [
# "aff_patch=32, ps=128, bs=1, #b=12",
# "aff_patch=False, ps=50/100, bs=10, #b=2",
# ],
# )
model_order = {
"--X": [0],
"A-X": [1],
"A-Xp": [1],
"-CX": [2],
"ACX": [3],
"ACXp": [3],
}
if ARGS.to_tikz:
for key, df in dfs.items():
dfs[key]["model"] = df["model"].apply(lambda s: f"\\m{{{s}}}")
model_order = {f"\\m{{{key}}}": value for key, value in model_order.items()}
lpos = 0
for ax, (key, df) in zip(axes.flatten(), dfs.items()):
for name, gdf in df.groupby("model"):
ax.boxplot(gdf["cohens kappa"], positions=model_order[name])
ax.set_xticks(range(4))
ax.set_xticklabels(["--X", "A-X", "-CX", "ACX"])
ax.set_xlabel(note[key], x=lpos)
ax.set_ylabel("Cohens $\\kappa$")
ax.grid(axis="y")
ax.axhline(0.6516, c="red")
if ARGS.to_tikz:
print("tikz not configured")
exit()
tikzplotlib.save(
figure=fig50,
filepath=os.path.join(
LOG_PATH, "Experiment 02", "cohens-kappa-boxplot-a.tex"
),
figureheight="4in",
figurewidth="4in",
# strict=True,
)
tikzplotlib.save(
figure=fig32,
filepath=os.path.join(
LOG_PATH, "Experiment 02", "cohens-kappa-boxplot-a.tex"
),
figureheight="4in",
figurewidth="4in",
# strict=True,
)
else:
plt.show()
def experiment_two_boxplot(epoch=49):
path_ = (
"/Users/madsadrian/OneDrive - UiT Office 365/tensorboard-logs/keep/"
+ "Experiment 02/"
)
runs = {"ps50": "20191129-191422-Texas/", "ps32": "20191130-185159-Texas/"}
df50 = | pd.read_csv(path_ + runs["ps50"] + "metrics.csv") | pandas.read_csv |
from scipy.sparse import csc_matrix
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
class Dispersion(object):
def __init__(self, corpus=None, term_doc_mat=None):
"""
From https://www.researchgate.net/publication/332120488_Analyzing_dispersion
<NAME>. Analyzing dispersion. April 2019. Practical handbook of corpus linguistics. Springer.
Parts are considered documents
:param X: term document (part) matrix
"""
'''
following Gries' notation, for the following example:
b a m n i b e u p
b a s a t b e w q n
b c a g a b e s t a
b a g h a b e a a t
b a h a a b e a x a t
(1) l = 50 (the length of the corpus in words)
(2) n = 5 (the length of the corpus in parts)
(3) s = (0.18, 0.2, 0.2, 0.2, 0.22) (the percentages of the n corpus part sizes)
(4) f = 15 (the overall frequency of a in the corpus)
(5) v = (1, 2, 3, 4, 5) (the frequencies of a in each corpus part 1-n)
(6) p = (1/9, 2/10, 3/10, 4/10, 5 /11) (the percentages a makes up of each corpus part 1-n)
'''
self.corpus = None
if corpus is not None:
self.corpus = corpus
X = corpus.get_term_doc_mat()
else:
X = term_doc_mat
part_sizes = X.sum(axis=1)
self.l = X.sum().sum()
self.n = X.shape[0]
self.f = X.sum(axis=0)
self.v = X
self.p = X.multiply(csc_matrix(1. / X.sum(axis=1)))
self.s = part_sizes / self.l
def dispersion_range(self):
"""
range: number of parts containing a = 5
"""
return (self.v > 0).sum(axis=0).A1
def sd_population(self):
return np.sqrt(StandardScaler(with_mean=False).fit(self.v).var_)
def vc(self):
"""
Direct quote from Gries (2019)
A maybe more useful variant of this measure is its normalized version, the variation
coefficient (vc, see (9)); the normalization consists of dividing sdpopulation by the mean frequency
of the element in the corpus parts f/n:
"""
ss = StandardScaler(with_mean=False).fit(self.v)
return np.sqrt(ss.var_) / ss.mean_
def jullands_d(self):
"""
The version of Juilland's D that can handle differently large corpus parts is then computed
as shown in (10). In order to accommodate the different sizes of the corpus parts, however, the
variation coefficient is not computed using the observed frequencies v1-n (i.e. 1, 2, 3, 4, 5 in files
1 to 5 respectively, see (5) above) but using the percentages in p1-n (i.e. how much of each corpus
part is made up by the element in question, i.e. 1/9, 2/10, 3/10, 4/10, 5/11, see (6) above), which is what
corrects for differently large corpus parts:
"""
ss = StandardScaler(with_mean=False).fit(self.p)
return 1 - (np.sqrt(ss.var_) / ss.mean_) / np.sqrt(self.n - 1)
def rosengrens(self):
'''
The version of Rosengren’s S that can handle differently large corpus parts is
shown in (12). Each corpus part size’s in percent (in s) is multiplied with the
frequencies of the element in question in each corpus part (in v1-n); of each product,
one takes the square root, and those are summed up, that sum is squared, and divided
by the overall frequency of the element in question in the corpus (f)'''
return np.power(
np.sqrt(self.v.multiply(self.s)).sum(axis=0).A1,
2
) * 1. / self.get_frequency()
def dp(self):
'''
Finally, Gries (2008, 2010) and the follow-up by Lijffijt and Gries (2012)
proposed a measure called DP (for deviation of proportions), which falls between
1-min s (for an extremely even distribution) and 1 (for an extremely clumpy
distribution) as well as a normalized version of DP, DPnorm, which falls between 0
and 1, which are computed as shown in (13). For DP, one computes the differences
between how much of the element in question is in each corpus file in percent on the
one hand and the sizes of the corpus parts in percent on the other – i.e. the differences
between observed and expected percentages. Then, one adds up the absolute values
of those and multiplies by 0.5; the normalization then consists of dividing this values
by the theoretically maximum value of DP given the number of corpus parts (in a
way reminiscent of (11)'''
return np.sum(np.abs(self.v.multiply(1. / self.get_frequency()) - self.s),
axis=0).A1 / 2
def dp_norm(self):
return self.dp() / (1 - self.s.min())
def kl_divergence(self):
'''The final measure to be discussed here is one that, as far as I can tell, has never
been proposed as a measure of dispersion, but seems to me to be ideally suited to be
one, namely the Kullback-Leibler (or KL-) divergence, a non-symmetric measure
that quantifies how different one probability distribution (e.g., the distribution of
all the occurrences of a across all corpus parts, i.e. v/f) is from another (e.g., the
corpus part sizes s); the KL-divergence is computed as shown in (14) (with log2s of 167
0 defined as 0):'''
vf = self.v.multiply(1. / self.f)
vfs = vf.multiply(1. / self.s)
vfs.data = np.log(vfs.data) / np.log(2)
return np.sum(vf.multiply(vfs), axis=0).A1
def da(self):
'''
Metrics from Burch (2017).
<NAME>, <NAME> and <NAME>. Measuring Lexical Dispersion in Corpus Linguistics. JRDS. 2016.
Article: https://journal.equinoxpub.com/JRDS/article/view/9480
D_A = 1 - ((n * (n - 1))/2) * sum_{i in 0, n - 1} sum{j in i + 1, n} |v_i - v_j|/(2*mean(v))
:return:
'''
n = self.n
constant = 1./(n * (n - 1)/2)
da = []
for word_i in range(self.v.shape[1]):
y = self.v.T[word_i].todense().A1
yt = np.tile(y, (n, 1))
s = np.sum(np.abs(yt - yt.T)) / 2
da.append(1 - constant * s * 0.5 * y.mean())
return np.array(da)
def get_df(self, terms = None):
if terms is None and self.corpus is not None:
terms = self.corpus.get_terms()
df_content = {
'Frequency': self.get_frequency(),
'Range': self.dispersion_range(),
'SD': self.sd_population(),
'VC': self.vc(),
"Juilland's D": self.jullands_d(),
"Rosengren's S": self.rosengrens(),
'DP': self.dp(),
'DP norm': self.dp_norm(),
'KL-divergence': self.kl_divergence()
}
if terms is None:
return pd.DataFrame(df_content)
return | pd.DataFrame(df_content, index=terms) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 6 11:33:59 2017
解析天软数据格式
@author: ws
"""
import pandas as pd
_max_iter_stocks = 100
def _int2date(int_date):
if int_date < 10000000:
return pd.NaT
return | pd.datetime(int_date//10000, int_date%10000//100, int_date%100) | pandas.datetime |
# ActivitySim
# See full license in LICENSE.txt.
import logging
import pandas as pd
import numpy as np
from activitysim.core import assign
from activitysim.core import tracing
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import pipeline
from activitysim.core import mem
from activitysim.core import los
from activitysim.core.pathbuilder import TransitVirtualPathBuilder
logger = logging.getLogger(__name__)
# class AccessibilitySkims(object):
# """
# Wrapper for skim arrays to facilitate use of skims by accessibility model
#
# Parameters
# ----------
# skims : 2D array
# omx: open omx file object
# this is only used to load skims on demand that were not preloaded
# length: int
# number of zones in skim to return in skim matrix
# in case the skims contain additional external zones that should be trimmed out so skim
# array is correct shape to match (flattened) O-D tiled columns in the od dataframe
# transpose: bool
# whether to transpose the matrix before flattening. (i.e. act as a D-O instead of O-D skim)
# """
#
# def __init__(self, skim_dict, orig_zones, dest_zones, transpose=False):
#
# logger.info(f"init AccessibilitySkims with {len(dest_zones)} dest zones {len(orig_zones)} orig zones")
#
# assert len(orig_zones) <= len(dest_zones)
# assert np.isin(orig_zones, dest_zones).all()
# assert len(np.unique(orig_zones)) == len(orig_zones)
# assert len(np.unique(dest_zones)) == len(dest_zones)
#
# self.skim_dict = skim_dict
# self.transpose = transpose
#
# num_skim_zones = skim_dict.get_skim_info('omx_shape')[0]
# if num_skim_zones == len(orig_zones) and skim_dict.offset_mapper.offset_series is None:
# # no slicing required because whatever the offset_int, the skim data aligns with zone list
# self.map_data = False
# else:
#
# logger.debug("AccessibilitySkims - applying offset_mapper")
#
# skim_index = list(range(num_skim_zones))
# orig_map = skim_dict.offset_mapper.map(orig_zones)
# dest_map = skim_dict.offset_mapper.map(dest_zones)
#
# # (we might be sliced multiprocessing)
# # assert np.isin(skim_index, orig_map).all()
#
# out_of_bounds = ~np.isin(skim_index, dest_map)
# # if out_of_bounds.any():
# # print(f"{(out_of_bounds).sum()} skim zones not in dest_map")
# # print(f"dest_zones {dest_zones}")
# # print(f"dest_map {dest_map}")
# # print(f"skim_index {skim_index}")
# assert not out_of_bounds.any(), \
# f"AccessibilitySkims {(out_of_bounds).sum()} skim zones not in dest_map: {np.ix_(out_of_bounds)[0]}"
#
# self.map_data = True
# self.orig_map = orig_map
# self.dest_map = dest_map
#
# def __getitem__(self, key):
# """
# accessor to return flattened skim array with specified key
# flattened array will have length length*length and will match tiled OD df used by assign
#
# this allows the skim array to be accessed from expressions as
# skim['DISTANCE'] or skim[('SOVTOLL_TIME', 'MD')]
# """
#
# data = self.skim_dict.get(key).data
#
# if self.transpose:
# data = data.transpose()
#
# if self.map_data:
# # slice skim to include only orig rows and dest columns
# # 2-d boolean slicing in numpy is a bit tricky
# # data = data[orig_map, dest_map] # <- WRONG!
# # data = data[orig_map, :][:, dest_map] # <- RIGHT
# # data = data[np.ix_(orig_map, dest_map)] # <- ALSO RIGHT
#
# data = data[self.orig_map, :][:, self.dest_map]
#
# return data.flatten()
@inject.step()
def compute_accessibility(accessibility, network_los, land_use, trace_od):
"""
Compute accessibility for each zone in land use file using expressions from accessibility_spec
The actual results depend on the expressions in accessibility_spec, but this is initially
intended to permit implementation of the mtc accessibility calculation as implemented by
Accessibility.job
Compute measures of accessibility used by the automobile ownership model.
The accessibility measure first multiplies an employment variable by a mode-specific decay
function. The product reflects the difficulty of accessing the activities the farther
(in terms of round-trip travel time) the jobs are from the location in question. The products
to each destination zone are next summed over each origin zone, and the logarithm of the
product mutes large differences. The decay function on the walk accessibility measure is
steeper than automobile or transit. The minimum accessibility is zero.
"""
trace_label = 'compute_accessibility'
model_settings = config.read_model_settings('accessibility.yaml')
assignment_spec = assign.read_assignment_spec(config.config_file_path('accessibility.csv'))
accessibility_df = accessibility.to_frame()
logger.info("Running %s with %d dest zones" % (trace_label, len(accessibility_df)))
constants = config.get_model_constants(model_settings)
land_use_columns = model_settings.get('land_use_columns', [])
land_use_df = land_use.to_frame()
land_use_df = land_use_df[land_use_columns]
# don't assume they are the same: accessibility may be sliced if we are multiprocessing
orig_zones = accessibility_df.index.values
dest_zones = land_use_df.index.values
orig_zone_count = len(orig_zones)
dest_zone_count = len(dest_zones)
logger.info("Running %s with %d dest zones %d orig zones" %
(trace_label, dest_zone_count, orig_zone_count))
# create OD dataframe
od_df = pd.DataFrame(
data={
'orig': np.repeat(orig_zones, dest_zone_count),
'dest': np.tile(dest_zones, orig_zone_count)
}
)
if trace_od:
trace_orig, trace_dest = trace_od
trace_od_rows = (od_df.orig == trace_orig) & (od_df.dest == trace_dest)
else:
trace_od_rows = None
# merge land_use_columns into od_df
od_df = pd.merge(od_df, land_use_df, left_on='dest', right_index=True).sort_index()
locals_d = {
'log': np.log,
'exp': np.exp,
'network_los': network_los,
}
skim_dict = network_los.get_default_skim_dict()
locals_d['skim_od'] = skim_dict.wrap('orig', 'dest').set_df(od_df)
locals_d['skim_do'] = skim_dict.wrap('dest', 'orig').set_df(od_df)
if network_los.zone_system == los.THREE_ZONE:
locals_d['tvpb'] = TransitVirtualPathBuilder(network_los)
if constants is not None:
locals_d.update(constants)
results, trace_results, trace_assigned_locals \
= assign.assign_variables(assignment_spec, od_df, locals_d, trace_rows=trace_od_rows)
for column in results.columns:
data = np.asanyarray(results[column])
data.shape = (orig_zone_count, dest_zone_count) # (o,d)
accessibility_df[column] = np.log(np.sum(data, axis=1) + 1)
logger.info("{trace_label} added {len(results.columns} columns")
# - write table to pipeline
pipeline.replace_table("accessibility", accessibility_df)
if trace_od:
if not trace_od_rows.any():
logger.warning(f"trace_od not found origin = {trace_orig}, dest = {trace_dest}")
else:
# add OD columns to trace results
df = | pd.concat([od_df[trace_od_rows], trace_results], axis=1) | pandas.concat |
#!/usr/bin/env python3
import os
import sys
import numpy as np
import pandas as pd
np.set_printoptions(edgeitems=3)
np.core.arrayprint._line_width = 80
fname = "res/output_360_merged_2.50.vcf.gz_summary.bin"
# fname = "res/output_360_merged_2.50.vcf.gz_chromosomes.bin"
def readIbrowserBinary(infile):
dt0 = np.dtype([
('hasData' , bool ),
('serial' , np.int64),
('counterBits', np.int64),
('dataLen' , np.int64),
('sumData' , np.uint64)
])
fileSize = os.stat(infile).st_size
with open(infile, 'rb') as fhd:
d = np.fromfile(fhd, dtype=dt0, count=1)[0]
counterBits = d["counterBits"]
dataLen = d["dataLen"]
dataFmt = None
dataFmtLen = None
if counterBits == 16:
dataFmt = np.uint16
dataFmtLen = 2
elif counterBits == 32:
dataFmt = np.uint32
dataFmtLen = 4
elif counterBits == 64:
dataFmt = np.uint64
dataFmtLen = 8
else:
print("unknown counter bits", counterBits)
sys.exit(1)
dataSize = dataLen * dataFmtLen
registerSize = 1 + 8 + 8 + 8 + 8 + dataSize
dt = np.dtype([
('hasData' , bool ), #1 1
('serial' , np.int64 ), #8 9
('counterBits', np.int64 ), #8 17
('dataLen' , np.int64 ), #8 25
('sumData' , np.uint64), #8 33
('data', dataFmt, dataLen)
])
assert (fileSize % registerSize) == 0, "wrong file size: fileSize {} % registerSize {} != 0. {}".format(fileSize, registerSize, fileSize % registerSize)
numRegisters = int(fileSize / registerSize) - 1
memmap = np.memmap(infile, dtype=dt, mode='r')
return numRegisters, memmap
def registerToDataframe(regs):
matrix = None
for reg in regs:
if matrix is None:
matrix = | pd.DataFrame({reg['serial']: reg['data']}, copy=False) | pandas.DataFrame |
'''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-21-<NAME>
1. this dataset was first cleaned in excel
2. but the character symbol '-' was found in all the datasets
3. this code will replace all the symbols with -999
'''
import os
import glob
import string
import datetime
import pandas as pd
# specify the path
data_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/Backup/OB Database Consolidation 2021-06-11/21-Done-Done/sql/data_1st_round_cleaned/'
save_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/Backup/OB Database Consolidation 2021-06-11/21-Done-Done/sql/'
# read templates into pandas
window_df = | pd.read_csv(data_path + 'Window_Status.csv') | pandas.read_csv |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([False, False])))
def test_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).suffix_matches_regex({
"target": "--r1",
"comparator": "es.*",
"suffix": 3
}).equals(pandas.Series([False, True])))
self.assertTrue(DataframeType({"value": df}).suffix_matches_regex({
"target": "var1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([False, False])))
def test_not_prefix_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_prefix_matches_regex({
"target": "--r1",
"comparator": ".*",
"prefix": 2
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).not_prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([True, True])))
def test_not_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_suffix_matches_regex({
"target": "var1",
"comparator": ".*",
"suffix": 3
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_suffix_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([True, True])))
def test_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).matches_regex({
"target": "--r1",
"comparator": ".*",
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).matches_regex({
"target": "var2",
"comparator": "[0-9].*",
}).equals(pandas.Series([False, False])))
def test_not_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_matches_regex({
"target": "var1",
"comparator": ".*",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
}).equals(pandas.Series([True, True])))
def test_starts_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).starts_with({
"target": "--r1",
"comparator": "WO",
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).starts_with({
"target": "var2",
"comparator": "ABC",
}).equals(pandas.Series([False, False])))
def test_ends_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).ends_with({
"target": "--r1",
"comparator": "abc",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).ends_with({
"target": "var1",
"comparator": "est",
}).equals(pandas.Series([False, True])))
def test_has_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([True, False])))
def test_has_not_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_not_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([False, True])))
def test_longer_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
def test_longer_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_shorter_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'val']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
def test_shorter_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_all({
"target": "--r1",
"comparator": "--r2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_not_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_invalid_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2099'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).invalid_date({"target": "--r1"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var3"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var2"})
.equals(pandas.Series([False, False, False, True, True])))
def test_date_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var1", "comparator": '2021'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "1997-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([False, False, True, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).date_equal_to({"target": "--r3", "comparator": "--r4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "minute"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "second"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "microsecond"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_not_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_greater_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "1996-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var1", "comparator": '2023'})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "1996-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([True, True, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_is_incomplete_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": [ '2021', '2021', '2099'],
"var2": [ "1997-07-16", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df}).is_incomplete_date({"target" : "var1"})
.equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_incomplete_date({"target" : "var2"})
.equals(pandas.Series([False, False, False])))
def test_is_complete_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ["2021", "2021", "2099"],
"var2": ["1997-07-16", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df}).is_complete_date({"target": "var1"})
.equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_complete_date({"target": "var2"})
.equals(pandas.Series([True, True, True])))
def test_is_unique_set(self):
df = pandas.DataFrame.from_dict( {"ARM": ["PLACEBO", "PLACEBO", "A", "A"], "TAE": [1,1,1,2], "LAE": [1,2,1,2], "ARF": [1,2,3,4]})
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": "LAE"})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": ["LAE"]})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": ["TAE"]})
.equals(pandas.Series([False, False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": "TAE"})
.equals(pandas.Series([False, False, True, True])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_unique_set({"target" : "--M", "comparator": "--F"})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_unique_set({"target" : "--M", "comparator": ["--F"]})
.equals(pandas.Series([True, True, True, True])))
def test_is_not_unique_set(self):
df = pandas.DataFrame.from_dict( {"ARM": ["PLACEBO", "PLACEBO", "A", "A"], "TAE": [1,1,1,2], "LAE": [1,2,1,2], "ARF": [1,2,3,4]})
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": "LAE"})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": ["LAE"]})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": ["TAE"]})
.equals(pandas.Series([True, True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": "TAE"})
.equals(pandas.Series([True, True, False, False])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_not_unique_set({"target" : "--M", "comparator": "--F"})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_not_unique_set({"target" : "--M", "comparator": ["--F"]})
.equals(pandas.Series([False, False, False, False])))
def test_is_ordered_set(self):
df = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [1,1,2,2] })
self.assertTrue(DataframeType({"value": df}).is_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "SE"}}).is_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
df2 = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [3,1,2,2] })
self.assertFalse(DataframeType({"value": df2}).is_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertFalse(DataframeType({"value":df2, "column_prefix_map": {"--": "SE"}}).is_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
def test_is_not_ordered_set(self):
df = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [3,1,2,2] })
self.assertTrue(DataframeType({"value": df}).is_not_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "SE"}}).is_not_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
df2 = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [1,1,2,2] })
self.assertFalse(DataframeType({"value": df2}).is_not_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertFalse(DataframeType({"value":df2, "column_prefix_map": {"--": "SE"}}).is_not_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
def test_is_unique_relationship(self):
"""
Test validates one-to-one relationship against a dataset.
One-to-one means that a pair of columns can be duplicated
but its integrity should not be violated.
"""
one_to_one_related_df = pandas.DataFrame.from_dict(
{
"STUDYID": [1, 2, 3, 1, 2],
"USUBJID": ["TEST", "TEST-1", "TEST-2", "TEST-3", "TEST-4", ],
"STUDYDESC": ["Russia", "USA", "China", "Russia", "USA", ],
}
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df}).is_unique_relationship(
{"target": "STUDYID", "comparator": "STUDYDESC"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df}).is_unique_relationship(
{"target": "STUDYDESC", "comparator": "STUDYID"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df, "column_prefix_map":{"--": "STUDY"}}).is_unique_relationship(
{"target": "--ID", "comparator": "--DESC"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df, "column_prefix_map":{"--": "STUDY"}}).is_unique_relationship(
{"target": "--DESC", "comparator": "--ID"}
).equals(pandas.Series([True, True, True, True, True]))
)
df_violates_one_to_one = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"TESTID": [1, 2, 1, 3],
"TESTNAME": ["Functional", "Stress", "Functional", "Stress", ],
}
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_unique_relationship(
{"target": "TESTID", "comparator": "TESTNAME"}).equals(pandas.Series([True, False, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_unique_relationship(
{"target": "TESTNAME", "comparator": "TESTID"}).equals(pandas.Series([True, False, True, False]))
)
def test_is_not_unique_relationship(self):
"""
Test validates one-to-one relationship against a dataset.
One-to-one means that a pair of columns can be duplicated
but its integrity should not be violated.
"""
valid_df = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"VISITNUM": [1, 2, 1, 3],
"VISIT": ["Consulting", "Surgery", "Consulting", "Treatment", ],
}
)
self.assertTrue(DataframeType({"value": valid_df}).is_not_unique_relationship(
{"target": "VISITNUM", "comparator": "VISIT"}).equals(pandas.Series([False, False, False, False]))
)
self.assertTrue(DataframeType({"value": valid_df}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITNUM"}).equals( | pandas.Series([False, False, False, False]) | pandas.Series |
""" test parquet compat """
import datetime
from distutils.version import LooseVersion
import os
from warnings import catch_warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.io.parquet import (
FastParquetImpl,
PyArrowImpl,
get_engine,
read_parquet,
to_parquet,
)
try:
import pyarrow # noqa
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
import fastparquet # noqa
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
pytestmark = pytest.mark.filterwarnings(
"ignore:RangeIndex.* is deprecated:DeprecationWarning"
)
# setup engines & skips
@pytest.fixture(
params=[
pytest.param(
"fastparquet",
marks=pytest.mark.skipif(
not _HAVE_FASTPARQUET, reason="fastparquet is not installed"
),
),
pytest.param(
"pyarrow",
marks=pytest.mark.skipif(
not _HAVE_PYARROW, reason="pyarrow is not installed"
),
),
]
)
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return "pyarrow"
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
return "fastparquet"
@pytest.fixture
def df_compat():
return pd.DataFrame({"A": [1, 2, 3], "B": "foo"})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
}
)
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{
"string": list("abc"),
"string_with_nan": ["a", np.nan, "c"],
"string_with_none": ["a", None, "c"],
"bytes": [b"foo", b"bar", b"baz"],
"unicode": ["foo", "bar", "baz"],
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_nan": [2.0, np.nan, 3.0],
"bool": [True, False, True],
"datetime": pd.date_range("20130101", periods=3),
"datetime_with_nat": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
}
)
def check_round_trip(
df,
engine=None,
path=None,
write_kwargs=None,
read_kwargs=None,
expected=None,
check_names=True,
check_like=False,
repeat=2,
):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
check_like: bool, optional
If True, ignore the order of index & columns.
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {"compression": None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs["engine"] = engine
read_kwargs["engine"] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(
expected, actual, check_names=check_names, check_like=check_like
)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def test_invalid_engine(df_compat):
with pytest.raises(ValueError):
check_round_trip(df_compat, "foo", "bar")
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context("io.parquet.engine", "pyarrow"):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context("io.parquet.engine", "fastparquet"):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context("io.parquet.engine", "auto"):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "pyarrow"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "fastparquet"):
assert isinstance(get_engine("auto"), FastParquetImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "auto"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
def test_get_engine_auto_error_message():
# Expect different error messages from get_engine(engine="auto")
# if engines aren't installed vs. are installed but bad version
from pandas.compat._optional import VERSIONS
# Do we have engines installed, but a bad version of them?
pa_min_ver = VERSIONS.get("pyarrow")
fp_min_ver = VERSIONS.get("fastparquet")
have_pa_bad_version = (
False
if not _HAVE_PYARROW
else LooseVersion(pyarrow.__version__) < LooseVersion(pa_min_ver)
)
have_fp_bad_version = (
False
if not _HAVE_FASTPARQUET
else LooseVersion(fastparquet.__version__) < LooseVersion(fp_min_ver)
)
# Do we have usable engines installed?
have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version
have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version
if not have_usable_pa and not have_usable_fp:
# No usable engines found.
if have_pa_bad_version:
match = f"Pandas requires version .{pa_min_ver}. or newer of .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
if have_fp_bad_version:
match = f"Pandas requires version .{fp_min_ver}. or newer of .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
if (
LooseVersion(pyarrow.__version__) < "0.15"
and LooseVersion(pyarrow.__version__) >= "0.13"
):
pytest.xfail(
"Reading fastparquet with pyarrow in 0.14 fails: "
"https://issues.apache.org/jira/browse/ARROW-6492"
)
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
class Base:
def check_error_on_write(self, df, engine, exc):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc):
to_parquet(df, path, engine, compression=None)
class TestBasic(Base):
def test_error(self, engine):
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
self.check_error_on_write(obj, engine, ValueError)
def test_columns_dtypes(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# unicode
df.columns = ["foo", "bar"]
check_round_trip(df, engine)
def test_columns_dtypes_invalid(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# numeric
df.columns = [0, 1]
self.check_error_on_write(df, engine, ValueError)
# bytes
df.columns = [b"foo", b"bar"]
self.check_error_on_write(df, engine, ValueError)
# python object
df.columns = [
datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1),
]
self.check_error_on_write(df, engine, ValueError)
@pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"])
def test_compression(self, engine, compression):
if compression == "snappy":
pytest.importorskip("snappy")
elif compression == "brotli":
pytest.importorskip("brotli")
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine, write_kwargs={"compression": compression})
def test_read_columns(self, engine):
# GH18154
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
expected = pd.DataFrame({"string": list("abc")})
check_round_trip(
df, engine, expected=expected, read_kwargs={"columns": ["string"]}
)
def test_write_index(self, engine):
check_names = engine != "fastparquet"
df = | pd.DataFrame({"A": [1, 2, 3]}) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": | pandas.StringDtype() | pandas.StringDtype |
import pandas as pd
import io
import requests
from datetime import datetime
#Import data file if it already exists
try:
past_data = pd.read_excel("Utah_Data.xlsx")
past_dates = past_data["Date"].tolist()
except:
past_data = pd.DataFrame({})
past_dates = []
#Get today's date and then generate a list of dates starting 01-22-2020 and ending today
todays_date = datetime.today().strftime('%m-%d-%Y')
datetimerange = pd.date_range(start='03/12/2020', end=datetime.today()).to_pydatetime().tolist()
daterange = []
for date in datetimerange:
daterange.append(date.strftime('%m-%d-%Y'))
#Make empty lists to fill in later
confirmed = []
deaths = []
recovered = []
active = []
good_dates = []
daily_confirmed = [0]
#iterate over the list of dates
#grab each file
#grab only utah county data
#get specific data from each date and append to a list
for date in daterange:
if date in past_dates:
pass
else:
url = ("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/"
+ date + ".csv")
try:
s = requests.get(url).content
full_csv = pd.read_csv(io.StringIO(s.decode('utf-8')), index_col="Combined_Key")
utah = | pd.DataFrame(full_csv.loc["Utah, Utah, US"]) | pandas.DataFrame |
import pandas as pd
from pathlib import Path
from utils.aioLogger import aioLogger
from typing import List
from config.aioConfig import CESDataConfig
from utils.aioError import aioPreprocessError
import re
import matplotlib.pyplot as plt
class CESCsvReader:
"""read data from csv file df, save it in #* self.df"""
def __init__(self, csv_file: Path, use_offset: bool = True) -> None:
self.use_offset = use_offset
self.csv_file = csv_file
self.logger = aioLogger(self.__class__.__name__).logger
self.logger.info(f"{self.csv_file} loaded for processing...")
self.id = self._get_id_from_file_name()
self._csv_file_correction()
self.read_csv_to_df()
# self.show_plots()
def show_plots(self):
fig, ax = plt.subplots(nrows=2, figsize=(12, 6))
self.df[self.offset_sensor_columns].plot(ax=ax[0])
self.df[["stability", "pssc"]].plot(ax=ax[1])
plt.show()
def _convert_df_to_numeric(self, df: pd.DataFrame) -> pd.DataFrame:
return df.apply(pd.to_numeric, errors="ignore")
def read_csv_to_df(self) -> None:
df = pd.read_csv(self.csv_file, skiprows=CESDataConfig.SKIP_ROWS, sep=";")
df = self._drop_according_to_column_and_row(df)
offset_column_name: str = "offsets"
df_info_columns = df.columns.difference(CESDataConfig.SENSOR_COLUMNS)
df_info_columns = df_info_columns.difference([offset_column_name])
df_offset = self.unpack_data_series(df[offset_column_name])
df_offset = self.set_num_of_row_to_zero_from_start(df_offset)
df_offset = self._convert_df_to_numeric(df_offset)
self.offset_sensor_columns = df_offset.columns
df_info = df[df_info_columns].copy()
if self.use_offset:
self.df = pd.concat([df_info, df_offset], axis=1)
else:
df_sensor = self._convert_df_to_numeric(df[CESDataConfig.SENSOR_COLUMNS])
if self._validate_sensor_df(df_sensor):
self.df = pd.concat([df_info, df_sensor], axis=1)
else:
raise aioPreprocessError(
f"preprocess for csv {self.csv_file} NOK ! please double check"
)
def _drop_according_to_column_and_row(
self, df: pd.DataFrame, thresh_percentage: float = 0.5
):
df.dropna(
thresh=len(df) * thresh_percentage, axis=1, inplace=True
) # remove empty columns
df.dropna(
thresh=len(df.columns) * thresh_percentage, axis=0, how="all", inplace=True
) # remove n/a rows
return df
def unpack_data_series(
self, ds: pd.Series, new_column_name_prefix: str = "offset_C"
):
ds = ds.apply(self.convert_list_str_to_list)
item_length = len(ds.iloc[0])
new_ds = ds.explode()
df = | pd.DataFrame(data=new_ds) | pandas.DataFrame |
import pandas as pd
import lightgbm as lgb
from sklearn.model_selection import KFold,StratifiedKFold
import warnings
import gc
import time
import sys
import datetime
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error,roc_auc_score,roc_curve
import seaborn,numpy as np
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings('ignore')
def measure_importance():
train_data=pd.read_csv('data/train.csv')
test_data=pd.read_csv('data/test.csv')
features=[c for c in train_data.columns if c not in ['ID_code','target']]
target=train_data['target']
skf=StratifiedKFold(n_splits=5,shuffle=True,random_state=2019)
oof=np.zeros(len(train_data))
predictions=np.zeros(len(test_data))
features_importance_df=pd.DataFrame()
param = {
'num_leaves': 10,
'max_bin': 119,
'min_data_in_leaf': 11,
'learning_rate': 0.02,
'min_sum_hessian_in_leaf': 0.00245,
'bagging_fraction': 1.0,
'bagging_freq': 5,
'feature_fraction': 0.05,
'lambda_l1': 4.972,
'lambda_l2': 2.276,
'min_gain_to_split': 0.65,
'max_depth': 14,
'save_binary': True,
'seed': 1337,
'feature_fraction_seed': 1337,
'bagging_seed': 1337,
'drop_seed': 1337,
'data_random_seed': 1337,
'objective': 'binary',
'boosting_type': 'gbdt',
'verbose': 1,
'metric': 'auc',
'is_unbalance': True,
'boost_from_average': False,
}
print('start KFold {}'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
for fold_,(trn_index,val_index) in enumerate(skf.split(train_data.values,target.values)):
print('fold {}'.format(fold_))
print('start-time {}'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
trn_d=lgb.Dataset(train_data.iloc[trn_index][features],label=target.iloc[trn_index])
val_d=lgb.Dataset(train_data.iloc[val_index][features],label=target.iloc[val_index])
clf=lgb.train(params=param,train_set=trn_d,num_boost_round=10000,valid_sets=[trn_d,val_d],
verbose_eval=1000,early_stopping_rounds=100)
oof[val_index]=clf.predict(train_data.iloc[val_index][features],num_iteration=clf.best_iteration)
fold_importance_df=pd.DataFrame()
fold_importance_df['feature']=features
fold_importance_df['importance']=clf.feature_importance()
fold_importance_df['fold']=fold_+1
features_importance_df= | pd.concat([features_importance_df,fold_importance_df],axis=0) | pandas.concat |
import pandas as pd
import datetime
import dateutil.parser
import Utils
#
# given a synthea object, covert it to it's equivalent omop objects
#
class SyntheaToOmop6:
#
# Check the model matches
#
def __init__(self, model_schema, utils):
self.model_schema = model_schema
self.utils = utils
#
# synthea patients to omop
#
def patientsToOmop(self, df, personmap, person_id, location_id):
#df = df.sort_values('Id') sort to get better match to original synthea to omop conversion for comparison
df['persontmp'] = df.index + person_id # copy index into a temp column. If accessed directly corrupts dataframe
df['locationtmp'] = df.index + location_id # copy index into a temp column. If accessed directly corrupts dataframe
person = pd.DataFrame(columns=self.model_schema['person'].keys())
person['person_id'] = df['persontmp']
person['gender_concept_id'] = df['GENDER'].apply(self.utils.getGenderConceptCode)
person['year_of_birth'] = df['BIRTHDATE'].apply(self.utils.getYearFromSyntheaDate)
person['month_of_birth'] = df['BIRTHDATE'].apply(self.utils.getMonthFromSyntheaDate)
person['day_of_birth'] = df['BIRTHDATE'].apply(self.utils.getDayFromSyntheaDate)
person['race_concept_id'] = df['RACE'].apply(self.utils.getRaceConceptCode)
person['ethnicity_concept_id'] = df['ETHNICITY'].apply(self.utils.getEthnicityConceptCode)
person['birth_datetime'] = df['BIRTHDATE'].apply(self.utils.getDefaultTimestamp)
person['death_datetime'] = df['DEATHDATE'].apply(self.utils.getDefaultTimestamp)
person['location_id'] = df['locationtmp']
person['gender_source_value'] = df['GENDER']
person['person_source_value'] = df['Id']
person['gender_source_concept_id'] = '0'
person['race_source_value'] = df['RACE']
person['race_source_concept_id'] = '0'
person['ethnicity_source_value'] = df['ETHNICITY']
person['ethnicity_source_concept_id'] = '0'
personappend = pd.DataFrame(columns=["person_id","synthea_patient_id"])
personappend["person_id"] = person['person_id']
personappend["synthea_patient_id"] = df['Id']
personmap = personmap.append(personappend)
person = person[person['gender_concept_id'] != 0] # filter out person's with missing or unknown gender
location = pd.DataFrame(columns=self.model_schema['location'].keys())
location['location_id'] = df['locationtmp']
location['address_1'] = df['ADDRESS']
location['city'] = df['CITY']
location['state'] = df['STATE']
location['zip'] = df['ZIP']
location['county'] = df['COUNTY']
location['location_source_value'] = df['Id']
location['latitude'] = df['LAT']
location['longitude'] = df['LON']
# create empty death dataframe
death = pd.DataFrame()
return (person, location, death, personmap, person_id + len(person), location_id + len(location))
def conditionsToOmop(self, df, srctostdvm, condition_occurrence_id, drug_exposure_id, observation_id, personmap, visitmap):
df['conditiontmp'] = df.index + condition_occurrence_id # copy index into a temp column.
df['drugexposuretmp'] = df.index + drug_exposure_id # copy index into a temp column.
df['observationtmp'] = df.index + observation_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left')
condition_occurrence = pd.DataFrame(columns=self.model_schema['condition_occurrence'].keys())
condition_occurrence['condition_occurrence_id'] = df['conditiontmp']
condition_occurrence['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Condition') & (srctostdvm["target_vocabulary_id"]=='SNOMED') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
condition_occurrence['condition_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
condition_occurrence['condition_start_date'] = df['START']
condition_occurrence['condition_start_datetime'] = df['START'].apply(self.utils.getDefaultTimestamp)
condition_occurrence['condition_end_date'] = df['STOP']
condition_occurrence['condition_end_datetime'] = df['STOP'].apply(self.utils.getDefaultTimestamp)
condition_occurrence['condition_type_concept_id'] = '32020'
condition_occurrence['stop_reason'] = '0'
condition_occurrence['visit_occurrence_id'] = df['visit_occurrence_id']
condition_occurrence['visit_detail_id'] = '0'
condition_occurrence['condition_source_value'] = df['CODE']
condition_occurrence['condition_source_concept_id'] = df['CODE']
drug_exposure = pd.DataFrame(columns=self.model_schema['drug_exposure'].keys())
drug_exposure['drug_exposure_id'] = df['drugexposuretmp']
drug_exposure['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Drug') & (srctostdvm["target_vocabulary_id"]=='RxNorm') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
drug_exposure['drug_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
drug_exposure['drug_exposure_start_date'] = df['START']
drug_exposure['drug_exposure_start_datetime'] = df['START'].apply(self.utils.getDefaultTimestamp)
drug_exposure['drug_exposure_end_date'] = df['STOP']
drug_exposure['drug_exposure_end_datetime'] = df['STOP'].apply(self.utils.getDefaultTimestamp)
drug_exposure['verbatim_end_date'] = df['STOP']
drug_exposure['visit_occurrence_id'] = df['visit_occurrence_id']
drug_exposure['drug_source_value'] = df['CODE']
drug_exposure['drug_source_concept_id'] = df['CODE']
drug_exposure['drug_type_concept_id'] = '581452'
drug_exposure['refills'] = '0'
drug_exposure['quantity'] = '0'
drug_exposure['days_supply'] = '0'
drug_exposure['route_concept_id'] = '0'
drug_exposure['lot_number'] = '0'
drug_exposure['visit_detail_id'] = '0'
observation = pd.DataFrame(columns=self.model_schema['observation'].keys())
observation['observation_id'] = df['observationtmp']
observation['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Observation') & (srctostdvm["target_vocabulary_id"]=='SNOMED') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
observation['observation_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
observation['observation_date'] = df['START']
observation['observation_datetime'] = df['START'].apply(self.utils.getDefaultTimestamp)
observation['value_as_concept_id'] = '0'
observation['qualifier_concept_id'] = '0'
observation['unit_concept_id'] = '0'
observation['visit_occurrence_id'] = df['visit_occurrence_id']
observation['visit_detail_id'] = '0'
observation['observation_source_value'] = df['CODE']
observation['observation_source_concept_id'] = df['CODE']
observation['observation_type_concept_id'] = '38000280'
return (condition_occurrence, drug_exposure, observation, condition_occurrence_id + len(condition_occurrence) , drug_exposure_id + len(drug_exposure), observation_id + len(observation))
def careplansToOmop(self, df):
pass
def observationsToOmop(self, df, srctostdvm, srctosrcvm, measurement_id, personmap,visitmap):
# filter synthea observations with no encounter (original etl does this)
df['measurementtmp'] = df.index + measurement_id # copy index into a temp column.
df = df[~df.ENCOUNTER.isnull()]
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left')
measurement = pd.DataFrame(columns=self.model_schema['measurement'].keys())
measurement['measurement_id'] = df['measurementtmp']
measurement['person_id'] = df['person_id']
measurement['measurement_date'] = df['DATE']
measurement['measurement_datetime'] = df['DATE'].apply(self.utils.getDefaultTimestamp)
measurement['measurement_time'] = df['DATE'] # check
measurement['visit_occurrence_id'] = df['visit_occurrence_id']
measurement['visit_detail_id'] = '0'
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Measurement') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df[['CODE']],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
measurement['measurement_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
measurement['measurement_source_value'] = df['CODE']
measurement['measurement_source_concept_id'] = df['CODE']
measurement['measurement_type_concept_id'] = '5001'
measurement['operator_concept_id'] = '0'
measurement['value_as_number'] = df['VALUE']
measurement['value_as_concept_id'] = '0'
measurement['unit_source_value'] = df['UNITS']
measurement['value_source_value'] = df['VALUE']
return (measurement, measurement_id + len(measurement))
def proceduresToOmop(self, df, srctostdvm, procedure_occurrence_id, personmap, visitmap):
df['proceduretmp'] = df.index + procedure_occurrence_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left')
# do procedures really map to measurements? There is no value and units?
#measurement = pd.DataFrame(columns=self.model_schema['measurement'].keys())
#measurement['person_id'] = df['PATIENT'].apply(self.patienthash)
#measurement['measurement_date'] = df['DATE']
#measurement['measurement_time'] = df['DATE'] # check
#measurement['value_as_number'] = df['VALUE']
#measurement['visit_occurrence_id'] = df['CODE']
#measurement['measurement_concept_id'] = df['CODE']
#measurement['measurement_type_concept_id'] = '5001'
#measurement['measurement_source_value'] = df['CODE']
#measurement['measurement_source_concept_id'] = df['CODE']
#measurement['unit_source_value'] = df['UNITS']
#measurement['value_source_value'] = df['VALUE']
procedure_occurrence = pd.DataFrame(columns=self.model_schema['procedure_occurrence'].keys())
procedure_occurrence['procedure_occurrence_id'] = df['proceduretmp']
procedure_occurrence['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Procedure') & (srctostdvm["target_vocabulary_id"]=='SNOMED') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df[['CODE']],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
procedure_occurrence['procedure_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
procedure_occurrence['procedure_date'] = df['DATE']
procedure_occurrence['procedure_datetime'] = df['DATE'].apply(self.utils.getDefaultTimestamp)
procedure_occurrence['visit_occurrence_id'] = df['visit_occurrence_id']
procedure_occurrence['visit_detail_id'] = '0'
procedure_occurrence['procedure_type_concept_id'] = '38000275'
procedure_occurrence['modifier_concept_id'] = '0'
procedure_occurrence['procedure_source_value'] = df['CODE']
procedure_occurrence['procedure_source_concept_id'] = df['CODE']
return (procedure_occurrence, procedure_occurrence_id + len(procedure_occurrence))
def immunizationsToOmop(self, df, srctostdvm, drug_exposure_id, personmap, visitmap):
df['drugexposuretmp'] = df.index + drug_exposure_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left')
drug_exposure = pd.DataFrame(columns=self.model_schema['drug_exposure'].keys())
drug_exposure['drug_exposure_id'] = df['drugexposuretmp']
drug_exposure['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Drug') & (srctostdvm["target_vocabulary_id"]=='CVX') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
drug_exposure['drug_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
drug_exposure['drug_exposure_start_date'] = df['DATE']
drug_exposure['drug_exposure_start_datetime'] = df['DATE'].apply(self.utils.getDefaultTimestamp)
drug_exposure['drug_exposure_end_date'] = df['DATE']
drug_exposure['drug_exposure_end_datetime'] = df['DATE'].apply(self.utils.getDefaultTimestamp)
drug_exposure['verbatim_end_date'] = df['DATE']
drug_exposure['visit_occurrence_id'] = df['visit_occurrence_id']
drug_exposure['drug_source_value'] = df['CODE']
drug_exposure['drug_source_concept_id'] = df['CODE']
drug_exposure['drug_type_concept_id'] = '581452'
drug_exposure['refills'] = '0'
drug_exposure['quantity'] = '0'
drug_exposure['days_supply'] = '0'
drug_exposure['route_concept_id'] = '0'
drug_exposure['lot_number'] = '0'
drug_exposure['visit_detail_id'] = '0'
return (drug_exposure, drug_exposure_id + len(drug_exposure))
def encountersToOmop(self, df, observation_period_id, visit_occurrence_id, personmap, visitmap):
df['visittmp'] = df.index + visit_occurrence_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
# preprocess df
df['observation_period_start_date'] = df['START'].apply(self.utils.isoTimestampToDate)
df['observation_period_end_date'] = df['STOP'].apply(self.utils.isoTimestampToDate)
start = df.groupby('person_id')['observation_period_start_date'].agg(['first']).reset_index()
stop = df.groupby('person_id')['observation_period_end_date'].agg(['last']).reset_index()
observation_tmp = pd.merge(start, stop, on='person_id', how='inner')
observation_period = pd.DataFrame(columns=self.model_schema['observation_period'].keys())
observation_period['observationtmp'] = observation_tmp.index + observation_period_id
observation_period['observation_period_id'] = observation_period['observationtmp']
observation_period['person_id'] = observation_tmp['person_id']
observation_period['observation_period_start_date'] = observation_tmp['first']
observation_period['observation_period_end_date'] = observation_tmp['last']
observation_period['period_type_concept_id'] = '44814724'
observation_period = observation_period.drop('observationtmp', 1)
observation_period_id = observation_period_id + len(observation_period)
visit_occurrence = pd.DataFrame(columns=self.model_schema['visit_occurrence'].keys())
visit_occurrence['visit_occurrence_id'] = df['visittmp']
visit_occurrence['person_id'] = df['person_id']
visit_occurrence['visit_start_date'] = df['START']
visit_occurrence['visit_end_date'] = df['STOP']
visit_occurrence['visit_concept_id'] = df['ENCOUNTERCLASS'].apply(self.utils.getVisitConcept)
visit_occurrence['visit_source_value'] = df['ENCOUNTERCLASS']
visit_occurrence['visit_type_concept_id'] = '44818517'
visitappend = pd.DataFrame(columns=["visit_occurrence_id","synthea_encounter_id"])
visitappend["visit_occurrence_id"] = visit_occurrence['visit_occurrence_id']
visitappend["synthea_encounter_id"] = df['Id']
visitmap = visitmap.append(visitappend)
return (observation_period, visit_occurrence, visit_occurrence_id + len(visit_occurrence), observation_period_id + len(observation_period), visitmap)
def organizationsToOmop(self, df, care_site_id):
care_site = pd.DataFrame(columns=self.model_schema['care_site'].keys())
return (care_site, care_site_id + len(care_site))
def providersToOmop(self, df, provider_id):
provider = pd.DataFrame(columns=self.model_schema['provider'].keys())
return (provider, provider_id)
def payertransitionToOmop(self, df):
pass
def allergiesToOmop(self, df, srctostdvm, observation_id, personmap, visitmap):
df['observationtmp'] = df.index + observation_id # copy index into a temp column.
df = | pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 2 10:02:48 2020
@author: Matteo
"""
import numpy as np
import matplotlib.pyplot as plt
import math
from pynverse import inversefunc
from IPython import get_ipython
get_ipython().magic('reset -sf')
import pandas as pd
from scipy.optimize import leastsq, least_squares, curve_fit
import os
import re
from scipy import stats
from PCI_o_B import SharedFunctions as sf
from datetime import datetime
import shutil
class CI():
def __init__(self):
"""Initialize ROIs
Parameters
----------
Filename: complete filename of the CI file
"""
self.FileList = []
self.ConfigFolder = 'C:\\Scattering_CCD\\ConfigFiles'
self.FolderName = []
self.Input_101 = []
self.Timepulse = False
self.Timepulse2 = False
# ROI specification...
self.nROI = []
self.ROIlist = []
self.ROIfilelist = []
self.hsize = 0
self.vsize = 0
self.nROI = 0
self.ROIlist = []
self.GlobalROIhsize = 0
self.GlobalROIvsize = 0
self.GlobalROItopx = 0
self.GlobalROItopy = 0
self.ROI_x_pos = []
self.lag = 0
self.CI = []
self.tau = []
self.qvetcros = []
self.Iav = []
#
self.filename = 'ROI'
self.cI_file_digits = 4
self.extension = 'cI.dat'
self.g2 = []
self.g2var = []
self.g2decaytime = []
#self.Input_101 =[]
def __repr__(self):
return '<ROI: fn%s>' % (self.FolderName)
def __str__(self):
str_res = '\n|---------------|'
str_res += '\n| CI class: '
str_res += '\n|--------------------+--------------------|'
str_res += '\n| folder : ' + str(self.FolderName)
str_res += '\n| number of ROIs : ' + str(self.nROI)
str_res += '\n| ROIs size : ' + str(self.GetROIsize())+ ' px'
str_res += '\n| lag time : ' +"{:.4f}".format(self.lag ) + ' s'
str_res += '\n| timepulse : ' +str(self.Timepulse2)
str_res += '\n|--------------------+--------------------|'
return str_res
def GetLagtime(self):
return self.lag
def GetFoldername(self):
return self.FolderName
def GetCI(self,*argv):
try:
return self.CI[argv[0]-1]
except IndexError:
return self.CI
def GetROIlist(self):
return self.ROIlist
def GetTau(self):
return self.tau
def GetROIsize(self):
size=[]
size.append(self.hsize)
size.append(self.vsize)
return size
def GetROInumber(self):
return self.nROI
def GetWINDOWsize(self):
size=[]
size.append(self.GlobalROIhsize)
size.append(self.GlobalROIvsize)
return size
def GetWINDOWtop(self):
top=[]
top.append(self.GlobalROItopx)
top.append(self.GlobalROItopy)
return top
def SetROIlist(self,window_top,window_size,ROI_size,Overlap=False):
if len(self.ROIlist) != 0:
print('WARNING: ROIlist already set, using this function you are changing ROIs. They won t be anymore the one of 101 file')
self.hsize = ROI_size[0]
self.vsize = ROI_size[1]
self.GlobalROIhsize = window_size[0]
self.GlobalROIvsize = window_size[1]
self.GlobalROItopx = window_top[0]
self.GlobalROItopy = window_top[1]
self.ROIlist = []
if Overlap == False:
n_ROI=[]
spaces=[]
for i in range(len(window_top)):
n,r=divmod(window_size[i]/ROI_size[i], 1)
n_ROI.append(n)
spaces.append(r)
self.nROI = int(n_ROI[0]*n_ROI[1])
if n_ROI[0] == 0 :
print('ROI horizontal size larger then the orizontal size of the image')
return
if n_ROI[1] == 0 :
print('ROI vertical size larger then the vertical size of the image')
return
gap_x = int((window_size[0] - n_ROI[0]*ROI_size[0])/n_ROI[0])
top_x=[]
for i in range(int(n_ROI[0])):
if spaces[0] == 0:
if i == 0:
top_x.append(window_top[0])
else:
top_x.append(window_top[0]+i*ROI_size[0])
else:
if i == 0:
top_x.append(window_top[0]+int(gap_x/2))
else:
top_x.append(window_top[0]+int(gap_x/2)+i*ROI_size[0]+gap_x)
#this part of the code shuold be optimize but I'm lazy.....
gap_y = int((window_size[1] - n_ROI[1]*ROI_size[1])/n_ROI[1])
top_y=[]
for i in range(int(n_ROI[1])):
if spaces[1] == 0:
if i == 0:
top_y.append(window_top[1])
else:
top_y.append(window_top[1]+i*ROI_size[1])
else:
if i == 0:
top_y.append(window_top[1]+int(gap_y/2))
else:
top_y.append(window_top[1]+int(gap_y/2)+i*ROI_size[1]+gap_y)
for j in range(len(top_y)):
for i in range(len(top_x)):
self.ROIlist.append(top_x[i])
self.ROIlist.append(top_y[j])
self.ROIlist.append(ROI_size[0])
self.ROIlist.append(ROI_size[1])
else:
summ = 0;
top_x=[]
top_y=[]
self.nROI = 0
while self.GlobalROItopx + summ + self.hsize < self.GlobalROItopx +self.GlobalROIhsize :
top_x.append(self.GlobalROItopx+summ)
top_y.append(self.GlobalROItopy)
summ = summ + 50
self.nROI = self.nROI + 1
for j in range(len(top_y)):
self.ROIlist.append(top_x[j])
self.ROIlist.append(top_y[j])
self.ROIlist.append(ROI_size[0])
self.ROIlist.append(ROI_size[1])
return
def UploadInput_101_CalCI(self):
#
ROIliststr = []
for i in range(len(self.ROIlist)):
ROIliststr.append(str(self.ROIlist[i])+'\n')
try:
with open(self.ConfigFolder+'\\Input_101_CalcCI.dat') as fp:
self.Input_101 = fp.readlines()
fi = self.Input_101.index('** IMPORTANT: the top left pixel of the image has coordinates(1, 1)\n')
si = self.Input_101.index('** intensity threshold \n')
f2 = self.Input_101.index('** id number of the first ROI (will be used for the name of the cI output file(s))\n')
f1 = self.Input_101.index('** # of ROIs for which the correlation function will be calculated\n')
fp.close()
self.Input_101[fi+1:si] = ROIliststr
self.Input_101[f1+1:f2] = str(str(self.nROI)+'\n')
open(self.ConfigFolder+'\\Input_101_CalcCI.dat','w').close()
with open(self.ConfigFolder+'\\Input_101_CalcCI.dat', 'w') as f:
for item in self.Input_101:
f.write("%s" % item)
f.close()
except FileNotFoundError:
print('FileNotFoundError: no Input_101_CalcCI.dat in this directory!')
return
def LoadInput_101_CalCI(self):
#Loading the ROIs starting from the Input_101_CalcCI.dat, this file is supposed to be in the same folder of the ROI files
#this function allows to obtain the ROI list regardless the ROI list
# is generated or not with the method GenerateROIlist
self.nROI = 0
self.ROIlist = []
try:
with open(self.FolderName+'\\Input_101_CalcCI.dat') as fp:
self.Input_101 = fp.readlines()
for i in range(len(self.Input_101)):
if self.Input_101[i] == '** IMPORTANT: the top left pixel of the image has coordinates(1, 1)\n':
j=i+1
while self.Input_101[j] != '** intensity threshold \n':
self.ROIlist.append(int(self.Input_101[j]))
j=j+1
fp.close()
except FileNotFoundError:
print('FileNotFoundError: no Input_101_CalcCI.dat in this directory!')
return
self.nROI = int(len(self.ROIlist)/4)
self.hsize = int(self.ROIlist[2])
self.vsize = int(self.ROIlist[3])
return
def LoadCI(self, FolderName,lagtime,Normalization = False,Timepulse = False):
#This method automatically call the method LoadInput_101_CalCI(),
#and the load the CI files for each ROI
self.FolderName = FolderName
self.lag = lagtime
self.LoadInput_101_CalCI()
if Timepulse == False:
ROI_name_list=list()
for i in range(self.nROI):
ROI_name_list.append(str(1 + i).zfill(self.cI_file_digits))
self.ROIfilelist.append(self.filename + ROI_name_list[i]+ self.extension)
self.FileList.append(self.FolderName + '\\' + self.filename + ROI_name_list[i] + self.extension)
for i in range(self.nROI):
self.CI.append(pd.read_csv(self.FileList[i], sep='\\t', engine='python'))
if Normalization == True:
self.NoiseNormalization()
# get the tau list starting from the lag time
for i in range(len(self.CI[0].columns)):
if self.CI[0].columns[i].startswith('d'):
for char in self.CI[0].columns[i].split('d'):
if char.isdigit():
self.tau.append(float(char)*self.lag)
return
else:
self.Timepulse = True
print('deprecated (2021/04/16) use the function TimepulseOraganization instead')
self.TimepulseOraganization()
return
return
def TimepulseOraganization(self):
ROI_name_list=list()
CIlong = []
filenamelong = 'longtcI'
extension = '.dat'
for i in range(self.nROI):
ROI_name_list.append(str(1 + i).zfill(self.cI_file_digits))
self.ROIfilelist.append(filenamelong + ROI_name_list[i]+ self.extension)
self.FileList.append(self.FolderName + '\\' + filenamelong + ROI_name_list[i] + extension)
for i in range(self.nROI):
CIlong.append(pd.read_csv(self.FileList[i], sep='\\t', engine='python'))
ROI_name_list=list()
CIshort = []
filenameshort = 'shorttcI'
extension = '.dat'
for i in range(self.nROI):
ROI_name_list.append(str(1 + i).zfill(self.cI_file_digits))
self.ROIfilelist.append(filenameshort + ROI_name_list[i]+ self.extension)
self.FileList.append(self.FolderName + '\\' + filenameshort + ROI_name_list[i] + extension)
for i in range(self.nROI):
CIshort.append(pd.read_csv(self.FileList[self.nROI + i], sep='\\t', engine='python'))
CIall = []
cibho = []
for i in range(self.nROI):
CIall.append(CIshort[i].merge(CIlong[i], how='right', on = 'tsec', suffixes=('short', '')))
CIall[i].set_index(CIlong[i].index, inplace=True)
CIall[i].sort_values(by=['tsec'], inplace=True)
CIall[i].reset_index(drop=True, inplace=True)
#CIall[i].drop(['Iave', 'd0ave'], axis=1)
CIall[i].drop(columns=['Iave', 'd0ave'], inplace=True)
col_name="d0"
first_col = CIall[i].pop(col_name)
CIall[i].insert(0, col_name, first_col)
col_name="I"
first_col = CIall[i].pop(col_name)
CIall[i].insert(0, col_name, first_col)
col_name="tsec"
first_col = CIall[i].pop(col_name)
CIall[i].insert(0, col_name, first_col)
cibho.append(CIall[i])
self.CI = cibho
self.tau.append(0)
for i in range(len(self.CI[0].columns)):
if self.CI[0].columns[i].startswith('usec'):
for char in self.CI[0].columns[i].split('usec'):
if char.isdigit():
self.tau.append(float(char)*10**-6)
for i in range(len(self.CI[0].columns)):
if self.CI[0].columns[i].startswith('sec'):
for char in self.CI[0].columns[i].split('sec'):
try:
self.tau.append(float(char))
except ValueError:
a = 0
return
def LoadTimePulseCI(self, FolderName,Normalization = False):
#This method automatically call the method LoadInput_101_CalCI(),
#and the load the CI files for each ROI
self.FolderName = FolderName
self.LoadInput_101_CalCI()
#get list of time delays within a cycle
pulse_time,n_pulses,cycle_dur = self.TimePulseLoad()
self.lag = pulse_time[1]
ROI_name_list=[]
for i in range(self.nROI):
ROI_name_list.append(str(1 + i).zfill(self.cI_file_digits))
self.ROIfilelist.append(self.filename + ROI_name_list[i]+ self.extension)
self.FileList.append(self.FolderName + '\\' + self.filename + ROI_name_list[i] + self.extension)
for i in range(self.nROI):
self.CI.append(pd.read_csv(self.FileList[i], sep='\\t', engine='python'))
self.Timepulse2 = True
if Normalization == True:
self.NoiseNormalization()
delays = np.asarray(self.CI[0].columns[2:],dtype = str)
for i in range(delays.size): delays[i] = delays[i].replace('d','')
delays = delays.astype(int)
ndelays = delays.size
#time at which each image was taken (in sec, t=0 at the beginning of the cI file)
ntimes = self.CI[0]['n'].size
time_im = np.zeros(ntimes,dtype = np.float64)
time_im[0] = pulse_time[0]
for j in range(0,time_im.size):
time_im[j] = cycle_dur*(j//n_pulses) + pulse_time[j%n_pulses]
#time delay between all pairs of images for which cI has been calculated
tau_true = np.ones((ntimes,ndelays),dtype = np.float64)*np.nan
for r in range(ntimes):
for c in range(ndelays):
r2 = r+delays[c]
if r2 < ntimes: tau_true[r,c] = time_im[r2]-time_im[r]
tau_true = np.round(tau_true,6) #the time resolution is 1E-6 sec....
#get a sorted array with all unique delays, excluding nan and inf
a = np.sort(np.unique(tau_true))
a = a[np.isfinite(a)]
# "consolidate" list of delays, by grouping delays whose ratio is between 1
# and rel_diff
rel_diff = 1.05
#define bins to which all delays will be assigned. To avoid problems with roundoff, we slightly shift all bin edges to the left
epsilon = 1E-6
bins = [a[0]-epsilon,a[1]-epsilon]
print(bins)#define the first bin so as it contains a[0]
pb = a[1]
for j in range(2,a.size):
if a[j] >= rel_diff*pb:
bins.append(a[j]-epsilon)
pb = a[j]
#get time delay corresponding to each bin: average of time delays that belong to
# that bin
tau_mean, bin_edges, binnum = stats.binned_statistic(a,a,statistic = 'mean', bins=bins)
self.tau = list(tau_mean)
col_names = ['tsec','n','Iav']
for t in tau_mean:
col_names.append(format(t,'.3e')+' s')
# "consolidate" cIs, i.e. for each t average them over delays tau that fall in
# the same bin. Store in pandas dataframe and output to file consolidated cIs
for i in range(self.nROI):
now = datetime.now()
print("time =", now)
print('Calculating cIs for all available time delays (in sec), for ROI ' + str(i+1) + ' over ' +str(self.nROI))
cIcons = pd.DataFrame(index=range(ntimes),columns=col_names)
cIcons['tsec'] = time_im
cIcons['n'] = self.CI[i]['n']
cIcons['Iav'] = self.CI[i]['Iav']
binning = []
binindex = []
for j in range(time_im.size):
cI = np.asarray(self.CI[i].iloc[j,2:])
good = ~np.isnan(tau_true[j]) & ~np.isnan(cI)
prova = []
if (cI[good].size>0):
now = datetime.now()
#print("now_befor_av =", now)
cImean, bin_edges, binnum2 = stats.binned_statistic(tau_true[j][good],\
cI[good], statistic = 'mean', bins=bins)
now = datetime.now()
#print("now_after_av =", now)
now = datetime.now()
#print("now_befor_iloc =", now)
cIcons.iloc[j,3:] = cImean
now = datetime.now()
#print("now_after_iloc =", now)
binning.append(bin_edges)
binindex.append(binnum2)
self.CI[i] = []
self.CI[i] = cIcons
for i in range(self.nROI):
self.Iav.append(self.CI[i]['Iav'])
self.CI[i].drop(['Iav'], axis=1,inplace=True)
return
def Save_CSV(self):
folder_CI_Processed = self.FolderName + '\\processed_CI\\'
try:
os.mkdir(folder_CI_Processed)
except FileExistsError:
print('directory already existing, graphs will be uploaded')
for i in range(self.nROI):
self.CI[i].to_csv(folder_CI_Processed + 'ROI' + str(i+1).zfill(4) + 'cI.dat',sep='\t',index=False,na_rep='NaN')
tausave = | pd.Series(self.tau) | pandas.Series |
import bz2
import numpy as np
import pandas as pd
import pickle
import requests
import re
import os
import shutil
import tarfile
from zipfile import ZipFile
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.datasets import load_iris, load_digits, load_svmlight_file
from sklearn.datasets import fetch_20newsgroups, fetch_openml, fetch_covtype
from sklearn.preprocessing import MinMaxScaler
def get_bz(name, url):
bzfile = "/tmp/{}.bz".format(name)
txtfile = "/tmp/{}.txt".format(name)
r = requests.get(url, allow_redirects=True)
with open(bzfile, "wb") as f:
f.write(r.content)
with open(bzfile, 'rb') as f:
d = bz2.decompress(f.read())
with open(txtfile, 'wb') as f:
f.write(d)
data = load_svmlight_file(txtfile)[0].toarray()
os.remove(bzfile)
os.remove(txtfile)
return data
def get_anuran():
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00406/Anuran%20Calls%20(MFCCs).zip'
zipresp = requests.get(url, allow_redirects=True)
with open("/tmp/tempfile.zip", "wb") as f:
f.write(zipresp.content)
zf = ZipFile("/tmp/tempfile.zip")
zf.extractall(path='/tmp/')
zf.close()
data = pd.read_csv('/tmp/Frogs_MFCCs.csv').iloc[:,:22].values
os.remove('/tmp/Frogs_MFCCs.csv')
return data.astype(float), 10
def get_avila():
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00459/avila.zip'
zipresp = requests.get(url, allow_redirects=True)
with open("/tmp/tempfile.zip", "wb") as f:
f.write(zipresp.content)
zf = ZipFile("/tmp/tempfile.zip")
zf.extractall(path='/tmp/')
zf.close()
with open('/tmp/avila/avila-tr.txt', 'r') as f:
text = [l for l in f.readlines()]
data_train = np.array([[float(j) for j in text[i].split(',')[:-1]]
for i in range(len(text))])
with open('/tmp/avila/avila-ts.txt', 'r') as f:
text = [l for l in f.readlines()]
data_test = np.array([[float(j) for j in text[i].split(',')[:-1]]
for i in range(len(text))])
data = np.concatenate((data_train, data_test))
mms = MinMaxScaler()
data = mms.fit_transform(data)
shutil.rmtree('/tmp/avila')
os.remove('/tmp/tempfile.zip')
return np.array(data), 12
def get_beer():
url = 'https://www.openml.org/data/download/21552938/dataset'
r = requests.get(url, allow_redirects=True)
lines = r.text.split('\n')
floats = [re.findall(r'\d\.\d', l) for l in lines]
data = np.array([[float(v) for v in f][:-1]
for f in floats
if len(f) == 6])
return data, 104
def get_bng():
url = 'https://www.openml.org/data/download/583865/BNG_audiology_1000_5.arff'
r = requests.get(url, allow_redirects=True)
lines = r.text.split('\n')
vals = [l.split(',') for l in lines]
vals = [v[:-1] for v in vals if len(v) == 70]
data = | pd.DataFrame(vals) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = | pd.Series([1015., 1020., 1030.], dtype='float') | pandas.Series |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contaminant of concern dataset for superfund sites import
This import uses the dataset:
- ./data/401062.xlsx
This file describes Contaminant of concern data from Superfund decision documents issued in fiscal years 1982-2017. Includes sites 1) final or deleted on the National Priorities List (NPL); and 2) sites with a Superfund Alternative Approach (SAA) Agreement in place. The only sites included that are 1) not on the NPL; 2) proposed for NPL; or 3) removed from proposed NPL, are those with an SAA Agreement in place.
- ./data/contaminants.csv
This file is a curated list of contaminant names and their respective ids on the Data Commons knowledge graph
"""
from absl import app, flags
import os
import sys
import pandas as pd
# Allows the following module imports to work when running as a script
_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_SCRIPT_PATH, '../../..')) # for superfund_vars
from us_epa.util.superfund_vars import _CONTAMINATED_THING_DCID_MAP
sys.path.append(os.path.join(_SCRIPT_PATH,
'../../../../util/')) # for statvar_dcid_generator
from statvar_dcid_generator import get_statvar_dcid
_TEMPLATE_MCF = """
Node: E:SuperfundSite->E0
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->observationAbout
observationDate: C:SuperfundSite->observationDate
variableMeasured: C:SuperfundSite->variableMeasured
value: C:SuperfundSite->value
"""
_CONTAMINANTS_MAP = './contaminants.csv'
_DATASET_NAME = "./401062.xlsx"
_COL_NAME_MAP = {
'EPA ID': 'observationAbout',
'Actual Completion Date': 'observationDate'
}
def make_contamination_svobs(df: pd.DataFrame,
output_path: str) -> pd.DataFrame:
"""
Function makes SVObs of contaminated medium at the site concatenated with '&' as the observed value.
"""
df = df.drop_duplicates()
# there are some rows where contaminatedThing is nan, which we drop
df = df.dropna()
df['Media'] = df['Media'].apply(lambda x: _CONTAMINATED_THING_DCID_MAP[x])
df = df.groupby(['EPA ID', 'Actual Completion Date'],
as_index=False)['Media'].apply('&'.join).reset_index()
# NOTE: The following check is put in place to resolve pandas version issues that affects the returning dataframe of the `groupby` method. Without this check, the tests in cloud-build where the container uses Python3.7 with pandas==1.0.4 replaces the columns after group by with 0-based indices. In this case, the column name `Media` is replaced as 0. Whereas in pandas==1.3.4 in Python3.9 environment the column name is preserved after groupby.
if 'Media' not in df.columns and 0 in df.columns:
df.columns = ['EPA ID', 'Actual Completion Date', 'Media']
else:
df.drop(columns='index', inplace=True)
df['Media'] = 'dcs:' + df['Media']
df['variableMeasured'] = 'dcs:ContaminatedThing_SuperfundSite'
df.rename(columns={
'EPA ID': 'observationAbout',
'Actual Completion Date': 'observationDate',
'Media': 'value'
},
inplace=True)
df = df[[
'observationAbout', 'observationDate', 'variableMeasured', 'value'
]]
## write or create the statvar.mcf file
f = open(os.path.join(output_path, "superfund_sites_contamination.mcf"),
"w")
node_str = f"Node: dcid:ContaminatedThing_SuperfundSite\n"
node_str += "typeOf: dcs:StatisticalVariable\n"
node_str += "populationType: dcs:SuperfundSite\n"
node_str += "statType: dcs:measurementResult\n"
node_str += "measuredProperty: dcs:contaminatedThing\n\n"
f.write(node_str)
f.close()
return df
def write_sv_to_file(row, contaminant_df, file_obj):
"""
Function generates Statistical Variables for the contaminant + contaminatedThing as properites. The value observed for the generated Statistical Variables is a boolean, and is always True
"""
try:
contaminated_thing = _CONTAMINATED_THING_DCID_MAP[row['Media']]
contaminant_series = contaminant_df[contaminant_df['CommonName'] ==
row['Contaminant Name']]
# NOTE: Currently, the script does not handle isotopes and cases where
# there are multiple node dcids mapping to the same element/compund's
# commonName -- hence we take the first occurance of this name
# TODO: Handle isotopes and different compound names
contaminant_series = contaminant_series.iloc[0]
sv_dict = {
"contaminatedThing": f"{contaminated_thing}",
"measuredProperty": "isContaminated",
"contaminant": f"{contaminant_series['CommonName'].title()}"
}
dcid_str = get_statvar_dcid(sv_dict)
node_str = f"Node: dcid:{dcid_str}\n"
node_str += "typeOf: dcs:StatisticalVariable\n"
node_str += "populationType: dcs:SuperfundSite\n"
node_str += "statType: dcs:measurementResult\n"
node_str += f"contaminant: dcs:{contaminant_series['dcid']}\n"
node_str += f"contaminatedThing: dcs:{contaminated_thing}\n"
node_str += "measuredProperty: dcs:isContaminated\n\n"
## append generated statvars to statvar.mcf file
file_obj.write(node_str)
row['variableMeasured'] = dcid_str
row['value'] = True
return row
except KeyError:
## when no matching contaminatedThing or contaminant is found.
print("No matching contaminatedThing or contaminant is found")
row['variableMeasured'] = None
row['value'] = None
return row
def process_site_contamination(input_path: str, contaminant_csv_path: str,
output_path: str) -> int:
"""
Function to process the raw dataset and generate clean csv + tmcf files.
"""
## Create output directory if not present
if not os.path.exists(output_path):
os.makedirs(output_path)
## contaminant to csv mapping
con_csv_path = os.path.join(contaminant_csv_path, _CONTAMINANTS_MAP)
contaminant_csv = pd.read_csv(con_csv_path,
sep='|',
usecols=['dcid', 'CommonName'])
## drop rows with empty dcid
contaminant_csv = contaminant_csv.loc[~pd.isnull(contaminant_csv['dcid'])]
## replace all non-alphanumeric with `_`
contaminant_csv['CommonName'] = contaminant_csv['CommonName'].str.replace(
'[^\w\s]', '_', regex=True)
## contamination at superfund sites
contamination_data_path = os.path.join(input_path, _DATASET_NAME)
contamination_data = pd.read_excel(contamination_data_path,
header=1,
usecols=[
'EPA ID', 'Actual Completion Date',
'Media', 'Contaminant Name'
])
contamination_data['Actual Completion Date'] = pd.to_datetime(
contamination_data['Actual Completion Date']).dt.strftime('%Y-%m-%d')
contamination_data[
'EPA ID'] = 'epaSuperfundSiteId/' + contamination_data['EPA ID']
clean_csv = pd.DataFrame()
df = make_contamination_svobs(
contamination_data[['EPA ID', 'Actual Completion Date', 'Media']],
output_path)
clean_csv = | pd.concat([clean_csv, df], ignore_index=True) | pandas.concat |
import os
import gc
import sys
import time
import click
import random
import sklearn
import numpy as np
import pandas as pd
import lightgbm as lgb
from tqdm import tqdm
from pprint import pprint
from functools import reduce
from lightgbm import LGBMClassifier
from sklearn.metrics import roc_auc_score, roc_curve
from config import read_config, KEY_FEATURE_MAP, KEY_MODEL_MAP
from utils import timer
from features.base import Base
from features.stacking import StackingFeaturesWithPasses
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def get_train_test(conf):
df = Base.get_df(conf) # pd.DataFrame
feature_classes = [KEY_FEATURE_MAP[key] for key in conf.features]
features = [df]
for feature in feature_classes:
with timer(f"load (or create) {feature.__name__}"):
f = feature.get_df(conf)
features.append(f)
with timer("join on SK_ID_CURR"):
df = reduce(lambda lhs, rhs: lhs.merge(rhs, how='left', on='SK_ID_CURR'), features)
del features
gc.collect()
train_df = df[df['TARGET'].notnull()].copy()
test_df = df[df['TARGET'].isnull()].copy()
del df
gc.collect()
return train_df, test_df
def get_feature_importances(data, shuffle, seed=None):
# Gather real features
train_features = [f for f in data.columns if f not in ([
'TARGET', 'SK_ID_CURR', 'SK_ID_BUREAU', 'SK_ID_PREV', 'index'
])]
# Go over fold and keep track of CV score (train and valid) and feature importances
# Shuffle target if required
y = data['TARGET'].copy()
if shuffle:
# Here you could as well use a binomial distribution
y = data['TARGET'].copy().sample(frac=1.0)
# Fit LightGBM in RF mode, yes it's quicker than sklearn RandomForest
dtrain = lgb.Dataset(data[train_features], y, free_raw_data=False, silent=True)
lgb_params = {
'objective': 'binary',
'boosting_type': 'rf',
'subsample': 0.623,
'colsample_bytree': 0.7,
'num_leaves': 127,
'max_depth': 8,
'seed': seed,
'bagging_freq': 1,
'num_threads': 4,
'verbose': -1
}
# Fit the model
clf = lgb.train(params=lgb_params, train_set=dtrain, num_boost_round=600)
# Get feature importances
imp_df = pd.DataFrame()
imp_df["feature"] = list(train_features)
imp_df["importance_gain"] = clf.feature_importance(importance_type='gain')
imp_df["importance_split"] = clf.feature_importance(importance_type='split')
imp_df['trn_score'] = roc_auc_score(y, clf.predict(data[train_features]))
return imp_df
def score_feature_selection(df=None, train_features=None, target=None):
# Fit LightGBM
dtrain = lgb.Dataset(df[train_features], target, free_raw_data=False, silent=True)
lgb_params = {
'objective': 'binary',
'boosting_type': 'gbdt',
'learning_rate': .1,
'subsample': 0.8,
'colsample_bytree': 0.8,
'num_leaves': 31,
'max_depth': -1,
'seed': 13,
'num_threads': 4,
'min_split_gain': .00001,
'reg_alpha': .00001,
'reg_lambda': .00001,
'metric': 'auc'
}
# Fit the model
hist = lgb.cv(
params=lgb_params,
train_set=dtrain,
num_boost_round=2000,
nfold=5,
stratified=True,
shuffle=True,
early_stopping_rounds=50,
verbose_eval=500,
seed=47
)
# Return the last mean / std values
return hist['auc-mean'][-1], hist['auc-stdv'][-1]
@click.command()
@click.option('--config_file', type=str, default='./configs/lgbm_0.json')
def main(config_file):
np.random.seed(47)
conf = read_config(config_file)
print("config:")
pprint(conf)
data, _ = get_train_test(conf)
with timer("calc actual importance"):
if os.path.exists("misc/actual_imp_df.pkl"):
actual_imp_df = pd.read_pickle("misc/actual_imp_df.pkl")
else:
actual_imp_df = get_feature_importances(data=data, shuffle=False)
actual_imp_df.to_pickle("misc/actual_imp_df.pkl")
print(actual_imp_df.head())
with timer("calc null importance"):
nb_runs = 100
if os.path.exists(f"misc/null_imp_df_run{nb_runs}time.pkl"):
null_imp_df = | pd.read_pickle(f"misc/null_imp_df_run{nb_runs}time.pkl") | pandas.read_pickle |
#Import the libraries
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
import yfinance as yf
import datetime
import math
from datetime import timedelta
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt import plotting
import cvxpy as cp
# Import libraries to fetch historical EUR/USD prices
from forex_python.converter import get_rate
from joblib import Parallel, delayed
DATE_FORMAT = '%Y-%m-%d'
# Database maintainance functions
#Connects to a the pre-existing CSV price database
def connectAndLoadDb(exchange):
"""Connects to and loads the data for an exchange.
Parameters
----------
exchange : str
The name of the exchange stored at
"Price Databases\database_"+str(exchange)+".csv"
Returns
-------
DataFrame
database with dates & assets prices
in the native currency in each column
"""
print("Connecting database:"+str(exchange))
filename="Price Databases\database_"+str(exchange)+".csv"
database = pd.read_csv(filename,index_col=False)
print("Database connected!")
return database
#Gets the latest date of data in the db
def getLastEntryDate(database):
"""Gets the most recent entry date from
the prices database
Parameters
----------
database : DataFrame
The database of prices with a date column or index 'Date'
Returns
-------
str
The most recent entry date in '%Y-%m-%d' format
"""
lastDateEntry = database.iloc[-1]['Date']
lastDateEntry = datetime.datetime.strptime(lastDateEntry, DATE_FORMAT)
return lastDateEntry
#Writes the updated pandas dataframe to the CSV
def writeDbToExcelFile(database,exchange):
"""Saves the database as a csv to the directory:
'Price Databases\database_'+str(exchange)+'.csv'
----------
database : DataFrame
The database of prices with a date column or index 'Date'
exchange : str
The name of the index to use in the filename
"""
filename='Price Databases\database_'+str(exchange)+'.csv'
print('Writing database to filename: '+ filename)
database.index=database['Date']
database.drop(['Date'],axis=1,inplace=True)
database.to_csv(filename)
print('Database updated with new entries!!')
#Formats the date from number for printing
def prettyPrintDate(date):
"""Formats a date string to '%Y-%m-%d' format,
used to consistantly print the same date format
----------
date : str
The date we want to format
"""
return date.strftime(DATE_FORMAT)
#Data Fetching functions
#get ticker list from our tsv files
def getTickers(exchange):
"""Pulls in the list of stock tickers for an exchange
stored at 'Company lists/companylist_'+str(exchange)+'.tsv'
Parameters
----------
exchange : str
The name of the exchange stored at
'Company lists/companylist_'+str(exchange)+'.tsv'
Returns
-------
l_tickers : list
list of stock tickers listed on the exchange
"""
#We have the lists saved as TSV ie delimited with tabs rather than commas
df_info=pd.read_csv('Company lists/companylist_'+str(exchange)+'.tsv',sep='\t')
l_tickers=df_info.Symbol.tolist()
return l_tickers
# Updates data for a given exchange or creates a db from a given ticker list
def fetchData(database,exchange,start_date, refetchAll = False):
"""adds adj closing price data from a given exchange
from date using Yfinance.
Parameters
----------
database : DataFrame
The data base of prices to be appended.
Empty DataFrame if starting a new prices database.
start_date : str
When refetchAll=True this denotes the start date 'YYYY-MM-DD'
to pull data from up to yesterday
default is '2006-01-01'.
exchange : str
The name of the exchange stored at
'Company lists/companylist_'+str(exchange)+'.tsv'
refetchAll : Boolean
False: updates price data from the latest entry up to yesterday
True: refetches all price data from '2006-01-01' to yesterday
Returns
-------
database : DataFrame
The database of with latest prices added.
"""
if refetchAll == True:
lastEntryDate = datetime.datetime.strptime(start_date, DATE_FORMAT) #Start date here
else:
lastEntryDate = getLastEntryDate(database)
ydaysDate = datetime.datetime.today() - timedelta(days = 1)
# checks is the data base already up to date
if lastEntryDate >= ydaysDate:
print('Data already loaded up to Yesterday')
return database
else:
print("Last entry in Db is of :" + prettyPrintDate(lastEntryDate))
print("----------------------------------------------")
dateToFetch = lastEntryDate + timedelta(days=1)
dateStr = prettyPrintDate(dateToFetch)
print('Fetching stock closing price of '+str(exchange)+' for days over: ' + dateStr)
l_tickers=getTickers(exchange)
#Pulling adj closing price data from yfinance
mergedData = yf.download(l_tickers,dateToFetch)['Adj Close']
#Making date the index col
mergedData['Date']=mergedData.index
#append our new data onto the existing databae
database = database.append(mergedData, ignore_index=True)
print("----------------------------------------------")
print("Data fill completed! 👍👍")
return database
# one line function to create or update a db for a given exchange
def update_db(exchange, start_date='2006-01-01',refetchAll = False):
"""One line funcion that pulls adj closing price data for
a given exchange into a DataFrame and saves as a csv to:
'Price Databases\database_'+str(exchange)+'.csv'.
Parameters
----------
exchange : str
The name of the exchange stored at
'Company lists/companylist_'+str(exchange)+'.tsv'
start_date : str
When refetchAll=True this denotes the start date 'YYYY-MM-DD'
to pull data from up to yesterday
default is '2006-01-01'.
refetchAll : Boolean
False: updates price data from the latest entry up to yesterday
True: refetches all price data from '2006-01-01' to yesterday
Returns
-------
database : DataFrame
The database of with latest prices added for the exchange.
"""
if refetchAll == True:
#For a fresh run
database = pd.DataFrame()
database = fetchData(database, exchange, start_date, refetchAll)
else:
# Load in & Update an existing database
database = connectAndLoadDb(exchange)
database = fetchData(database,exchange, start_date)
# Drop the last entry prior to saving as it probably is not a full days data
database.drop(database.tail(1).index, inplace = True)
# Write the data to CSV
writeDbToExcelFile(database,exchange)
return
# for a given echange removes any tickers which have all NULLS in the data base
def cleanCompanyList(exchange):
"""After database is created run this to check for any empty
columns and remove the ticket from the company list.
After this is ran re run update_db with Refetchall = True.
Parameters
----------
exchange : str
The name of the database stored at
'Company lists/companylist_'+str(exchange)+'.tsv'
"""
#Load db
df=connectAndLoadDb(exchange)
#create list of NULL columns
l_drop=df.columns[df.isna().all()].tolist()
#read in company list TSV
df_info=pd.read_csv('Company lists/companylist_'+str(exchange)+'.tsv',sep='\t')
df_info.drop(columns=['Unnamed: 0'],inplace=True)
df_info.index=df_info.Symbol
#drop listed rows
df_info.drop(index=l_drop, inplace=True)
df_info.reset_index(drop=True, inplace=True)
df_info.to_csv('Company lists/companylist_'+str(exchange)+'.tsv',sep='\t')
print(str(len(l_drop))+' Rows dropped from '+str(exchange))
return
def net_gains(principal,expected_returns,years,people=1):
"""Calculates the net gain after Irish Capital Gains Tax of a given principal for a given expected_returns over a given period of years"""
cgt_tax_exemption=1270*people #tax free threashold all gains after this are taxed at the cgt_ta_rate
cgt_tax_rate=0.33 #cgt_tax_rate as of 19/3/21
total_p=principal
year=0
while year < years:
year+=1
gross_returns=total_p*expected_returns
if gross_returns >cgt_tax_exemption:
taxable_returns=gross_returns-cgt_tax_exemption
net_returns=cgt_tax_exemption+(taxable_returns*(1-cgt_tax_rate))
else:
net_returns=gross_returns
total_p= total_p + net_returns
return total_p
def gen_curr_csv(start_date='2006-01-01'):
"""
Generates dataframe for currency pairs between 1st Jan. 2006 up to yesterday,
and saves to "Price Databases\curr_rates.csv
start_date : str
When refetchAll=True this denotes the start date 'YYYY-MM-DD'
to pull data from up to yesterday
default is '2006-01-01'.
"""
input_currencies = ['USD','JPY','GBP']
start_date = datetime.datetime.strptime(start_date, DATE_FORMAT)
print("Fetching Currecy rates from : "+prettyPrintDate(start_date))
print("For Eur from : "+str(input_currencies))
# May take up to 50 minutes to generate full set of rates
end_date = (datetime.datetime.today() - timedelta(1))
#end_date = datetime.datetime(2008,2,2).date() # For testing
print("Generating date list")
# Generate list of dates
dates = []
for i in range((end_date - start_date).days + 1):
dates.append((start_date + timedelta(i)))
# Add dates to dataframe
rates_df = pd.DataFrame()
rates_df['Date'] = dates
#attempted to speed up by parallelising the date loops, this just over halves the time to run on the 15 years of data
for curr in input_currencies:
print("Fetching exchange data for: "+str(curr))
rates_df[curr]=Parallel(n_jobs=-1)(delayed(get_rate)(curr,'EUR', date) for date in dates)
print("Currecy rates updated")
# Saved into the folder with the rest of our pricing data
print("Writing database to filename: Price Databases\curr_rates.csv")
rates_df.to_csv("Price Databases\curr_rates.csv")
print("Database updated with new entries!!")
return
def load_curr_csv(stocks_df,input_curr):
"""
Loads FX rates data, and converts historical stock prices to EUR using the rate at the time
"""
rates_df = pd.read_csv("Price Databases\curr_rates.csv")
rates_df=rates_df.set_index(pd.DatetimeIndex(rates_df['Date'].values))
rates_df.drop(columns=['Date'],axis=1, inplace=True)
if not input_curr in list(rates_df.columns):
return 'Currency not supported'
rates_df = rates_df.merge(stocks_df,left_index=True, right_index=True).drop(columns=stocks_df.columns)
# Multiply each row of stocks dataframe by its' corresponding exchange rate
result = pd.DataFrame(np.expand_dims(np.array(rates_df[input_curr]), axis=-1) * np.array(stocks_df),columns=stocks_df.columns,index=stocks_df.index)
return result
def priceDB_validation(database):
"""Takes the prices database checkes for negative stock prices,
if there are it attempts to repull the data,
if it cannot it drops those columns.
Parameters
----------
database : DataFrame
The dataframe of stock prices to be checked.
Returns
-------
database : DataFrame
The database of negative prices ammended
or offending stocks removed.
"""
#check for negative prices (should not have any)
neg_cols=database.columns[(database < 0).any()]
print('---------------------------------------------------------------------')
print('Negative prices are seen in the following assets: '+str(len(neg_cols)))
if len(neg_cols) >0:
print(neg_cols.tolist())
#Drop the offending columns
print('The following columns have been dropped: ')
print(neg_cols.tolist())
database.drop(columns=neg_cols.tolist(), inplace=True)
#I cant get this part working so i am just droping the columns that have issues for now
#Try to fix by rerunning the data
#df_retry=yf.download(neg_cols.tolist(),'2006-1-1')['Adj Close']
#print('Are there negatives in the repulled data : '+str((df_retry< 0).any()))
#if (df_retry< 0).any() ==True:
# print('Issue not solved by repulling data so the following columns have been dropped:')
# print(neg_cols.tolist())
# database.drop(columns=neg_cols.tolist(), inplace=True)
#else:
# print('Issue has been solved by repulling data, the following columns have been updated with repulled data:')
# print(neg_cols.tolist())
# database[neg_cols]=yf.download(neg_cols.tolist(),'2006-1-1')['Adj Close']
return database
#generates historic performance data
def portfolio_generate_test(database,startdate,enddate,p_max=400, min_returns=0.01, s_asset=0, asset_len=50, obj_method='SHARPE', target_percent=0.1, silent=True):
"""Takes the prices database checkes for negative stock prices,
if there are it attempts to repull the data,
if it cannot it drops those columns.
Parameters
----------
database : DataFrame
The dataframe of stock prices.
Returns
-------
database : DataFrame
The database of negative prices ammended
or offending stocks removed.
"""
if silent == False:
print('Running for :'+str(startdate)+' to '+str(enddate))
# Subset for date range
df_input=database[startdate:enddate]
if silent == False:
print ("Initial number of stocks: "+str(len(df_input.columns)))
#Check for stocks which are too expensive for us to buy & drop those
p_now=database.iloc[-1,:]
df_unaffordable=p_now[p_now>p_max] #we can set max price here maybe as an optional
l_unaffordable=df_unaffordable.index.tolist()
df_input.drop(columns=l_unaffordable, inplace=True)
if silent == False:
print ("-----------------------------------------------------")
print ("Our max price is : €"+str(p_max))
print ("Number of stocks to drop due being unnaffordble: "+str(len(l_unaffordable)))
print ("Number of stocks remaining: "+str(len(df_input.columns)))
# drop any columns with more than half or more Nas as the models dont like these
half_length=int(len(df_input)*0.50)
l_drop=df_input.columns[df_input.iloc[:half_length,:].isna().all()].tolist()
df_input.drop(columns=l_drop, inplace=True)
if silent == False:
print ("-----------------------------------------------------")
print ("Number of stocks due to NAs: "+str(len(l_drop)))
print ("Number of stocks remaining: "+str(len(df_input.columns)))
# drop any columns with more Nas for their last 5 rows as these have been delisted
l_drop=df_input.columns[df_input.iloc[-3:,:].isna().all()].tolist()
df_input.drop(columns=l_drop, inplace=True)
if silent == False:
print ("-----------------------------------------------------")
print ("Number of stocks due to being delisted: "+str(len(l_drop)))
print ("Number of stocks remaining: "+str(len(df_input.columns)))
#see which stocks have negative returns or low returns in the period & drop those
df_pct=(df_input.iloc[-1,:].fillna(0) / df_input.iloc[0,:])
df_pct=df_pct[df_pct<= (min_returns + 1)] #we can set minimum returns here maybe as an optional
l_pct=df_pct.index.tolist()
df_input.drop(columns=l_pct, inplace=True)
if silent == False:
print ("-----------------------------------------------------")
print ("Number of stocks due to Negative returns: "+str(len(l_pct)))
print ("Number of stocks remaining: "+str(len(df_input.columns)))
print ("Number of days data: "+str(len(df_input)))
print ("As default we will only keep the top 50 performing stocks when creating our portfolio(this can be varied using s_asset & asset_len)")
#We will only keep the X best performing assets can make this an optional input
e_asset=s_asset + asset_len
df=df_input
mu = expected_returns.mean_historical_return(df)
top_stocks = mu.sort_values(ascending=False).index[s_asset:e_asset]
df = df[top_stocks]
#Calculate expected annulised returns & annual sample covariance matrix of the daily asset
mu = expected_returns.mean_historical_return(df)
S = risk_models.sample_cov(df)
# Optomise for maximal Sharpe ratio
ef= EfficientFrontier(mu, S) #Create the Efficient Frontier Object
#We can try a variety of objectives look at adding this as an input
if obj_method == "SHARPE":
objective_summary=obj_method #description of the objective we used for our output df
weights = ef.max_sharpe()
elif obj_method == "MIN_VOL":
objective_summary=obj_method #description of the objective we used for our output df
weights = ef.min_volatility()
elif obj_method == "RISK":
objective_summary=obj_method+"_"+str(target_percent) #description of the objective we used for our output df
weights = ef.efficient_risk(target_percent)
elif obj_method == "RETURN":
objective_summary=obj_method+"_"+str(target_percent) #description of the objective we used for our output df
weights = ef.efficient_return(target_percent)
else:
print("obj_method must be one of SHARPE, MIN_VOL, RISK, RETURN")
cl_weights= ef.clean_weights()
#print(cl_weights)
if silent == False:
print("-------------------------------------------------------------")
print("Our Benchmark portfolio the S&P 500 has: Volatility 18.1% & Annual Return: 10.6%")
ef.portfolio_performance(verbose=True)
expected_portfolio_returns=ef.portfolio_performance()[0]
volatility=ef.portfolio_performance()[1]
r_sharpe=ef.portfolio_performance()[2]
#calculates the actual performance date range work on this
actual_startdate = pd.to_datetime(enddate) + pd.DateOffset(days=2)
actual_enddate = pd.to_datetime(actual_startdate) + | pd.DateOffset(years=1) | pandas.DateOffset |
# Machine Learning Project 1 - House Price Prediction
import pandas as pd
df1 = pd.read_csv('bengaluru_house_prices.csv')
df1.head()
df1.info()
df1.shape
df2 = df1.drop(['area_type', 'society', 'balcony'], axis=1)
df2.head()
df2.isnull().sum()
df3 = df2.dropna()
df3.isnull().sum()
df3.head()
df3['availability'].unique()
df3.groupby("availability")["availability"].count()
df3['availability'] = df3['availability'].apply(lambda x: x if x in ('Ready To Move') else 'Future Possession')
df3.groupby("availability")["availability"].count()
df3['location'].unique()
df3.groupby("location")["location"].count().sort_values(ascending=False)
locations = df3.groupby('location')['location'].count().sort_values()
locations
locations_20cnt = locations[locations <= 20]
locations_20cnt
df3['location'] = df3['location'].apply(lambda x: 'Others' if x in locations_20cnt else x)
df3.groupby("location")["location"].count().sort_values(ascending=False)
df3.head()
df3['size'].unique()
import re
df3['bhks'] = df3['size'].apply(lambda x: int(re.findall('\d+', x)[0].strip()))
df3.head()
df3['total_sqft'].unique()
def get_mean(x):
if re.findall('-', x):
ss = x.strip().split('-')
return ((float(ss[0]) + float(ss[0])) / 2)
try:
return float(x.strip())
except:
return None
df3['total_sqft_new'] = df3['total_sqft'].apply(get_mean)
df3.head()
df3.isnull().sum()
df4 = df3.dropna()
df4.isnull().sum()
df4['bath'].unique()
df4.groupby('bath')['bath'].count().sort_values()
df5 = df4[df4['bath'] <= 10]
df5.head()
df6 = df5.drop(['size', 'total_sqft'], axis=1)
df6.head()
df6[df6['total_sqft_new'] / df6['bhks'] < 400]
df7 = df6[df6['total_sqft_new'] / df6['bhks'] > 400]
df7.head()
df7['price_per_sqft'] = df7['price'] * 100000 / df7['total_sqft_new']
df7
df7['price_per_sqft'].describe()
def rmv_price_outlier(df):
df_new = pd.DataFrame()
for key, sdf in df.groupby('location'):
m = sdf['price_per_sqft'].mean()
s = sdf['price_per_sqft'].std()
# print (sdf['location'])
rdf = sdf[(sdf['price_per_sqft'] <= m + s) & (sdf['price_per_sqft'] > m - s)]
# print(rdf)
df_new = pd.concat([df_new, rdf], ignore_index=True)
return df_new
df8 = rmv_price_outlier(df7)
df8.head()
df8.shape
availability_dummy = | pd.get_dummies(df8['availability'], drop_first=True) | pandas.get_dummies |
import glob
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from matplotlib.lines import Line2D
def plot_metrics(names='default', logs='training', save_name='default', timesteps=1000000, ci='sd', rolling=10):
name = save_name
names_print = names
if logs == 'training':
rolling = rolling
if logs == 'evaluation':
rolling = None
logging_dir = []
for i in names:
logging_dir.append('./logs/benchmark/' + i)
all_in_one = []
ci = ci
y = 0
timestep_lim = timesteps
for z in logging_dir:
if logs is 'training':
path = glob.glob(os.path.join(z, "*", "monitor*"))
if logs is 'evaluation':
path = glob.glob(os.path.join(z, "*", "*", "monitor*"))
path.sort()
all_in_one.append(pd.DataFrame({"reward": [], "timestep": [], "crash": [], "supervised": [], "intervention": [],
"safelyDone": [], "timeout": [], "time": []}))
for i in range(0, len(path)):
run_reward = pd.read_csv(path[i], skiprows=2, names=["reward", "timestep", "crash", "supervised",
"intervention", "safelyDone", "timeout", "time", "TF",
"distance"])
run_reward.loc[:, "repl"] = i
run_reward.loc[:, "intervention"] = run_reward.loc[:, "intervention"] / run_reward.loc[:, "timestep"]
run_reward.loc[:, "timestep2"] = run_reward.loc[:, "timestep"]
run_reward.loc[:, "crash2"] = run_reward.loc[:, "crash"]
run_reward.loc[:, "Episode ended by"] = run_reward.loc[:, "crash"]
run_reward.loc[:, "timestepSafe"] = run_reward.loc[:, "timestep"] * run_reward.loc[:, "safelyDone"]
for jj in range(0, len(run_reward)):
if run_reward.loc[jj, "timestepSafe"] == 0:
run_reward.loc[jj, "timestepSafe"] = np.nan
for jj in range(0, len(run_reward)):
if run_reward.loc[jj, "Episode ended by"] == 0:
if run_reward.loc[jj, "safelyDone"]:
run_reward.loc[jj, "Episode ended by"] = "Reach target"
if run_reward.loc[jj, "Episode ended by"] == 1:
run_reward.loc[jj, "Episode ended by"] = "Crash"
if run_reward.loc[jj, "Episode ended by"] == 0:
run_reward.loc[jj, "Episode ended by"] = "Time out"
run_reward = run_reward.reset_index()
del run_reward['index']
for jj in range(0, len(run_reward)-1):
run_reward.loc[jj+1, "timestep"] += run_reward.loc[jj, "timestep"]
for jj in range(0, len(run_reward)-1):
run_reward.loc[jj+1, "crash2"] += run_reward.loc[jj, "crash2"]
all_in_one[y] = all_in_one[y].append(run_reward)
all_in_one[y] = all_in_one[y].sort_values(by=["timestep"])
if rolling is not None:
all_in_one[y]["reward"] = all_in_one[y]["reward"].rolling(rolling).mean()
all_in_one[y]["crash"] = all_in_one[y]["crash"].rolling(rolling).mean()
all_in_one[y]["crash2"] = all_in_one[y]["crash2"].rolling(rolling).mean()
all_in_one[y]["supervised"] = all_in_one[y]["supervised"].rolling(rolling).mean()
all_in_one[y]["timestep2"] = all_in_one[y]["timestep2"].rolling(rolling).mean()
all_in_one[y]["timestepSafe"] = all_in_one[y]["timestepSafe"].rolling(rolling).mean()
all_in_one[y]["intervention"] = all_in_one[y]["intervention"].rolling(rolling).mean()
all_in_one[y]["safelyDone"] = all_in_one[y]["safelyDone"].rolling(rolling).mean()
all_in_one[y]["timeout"] = all_in_one[y]["timeout"].rolling(rolling).mean()
all_in_one[y]["timestep"] = (all_in_one[y]["timestep"] / 20000)
all_in_one[y]["timestep"] = all_in_one[y]["timestep"].astype(int) * 20000
y += 1
if logs is 'training':
#y = 0
#for data in all_in_one:
# plt.figure()
# sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
# sns.kdeplot(data=data, x="timestep", hue="Episode ended by", multiple="fill")
# plt.xlabel('Time step')
# plt.ylabel('Density')
# plt.xlim(0, timestep_lim)
# plt.legend(loc=4, borderaxespad=0, labels=['Reach Target', 'Time out', 'Crash'], title='Episode ended by')
# plt.savefig("art/plots/3in1/" + names[y] + ".png", dpi=100, transparent=True)
# plt.show()
# y += 1
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="crash2", err_style='band', ci=ci, estimator='mean')
plt.xlabel('Time step')
plt.ylabel('Total number of crashes')
plt.legend(labels=names_print)
plt.xlim(0, timestep_lim)
plt.ylim(0, 6000)
plt.savefig("art/plots/total_crashes" + name + ".png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="intervention", err_style='band', ci=ci, estimator='mean')
plt.xlabel('Time step')
plt.ylabel('Probability of an supervisors intervention per time step ')
plt.legend(labels=names)
plt.ylim(bottom=-0.05, top=1.05)
plt.xlim(0, timestep_lim)
plt.savefig("art/plots/intervention" + name + ".png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="timestep2", err_style='band', ci=ci, estimator='mean')
plt.xlabel('time step')
plt.ylabel('Number of time steps until episode ends')
plt.legend(labels=names)
plt.xlim(0, timestep_lim)
plt.savefig("art/plots/timestepepisode" + name + ".png", dpi=100, transparent=True)
plt.show()
#plt.figure()
#sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
#for data in all_in_one:
# sns.lineplot(data=data, x="timestep", y="timestepSafe", err_style='band', ci=ci, estimator='mean')
#plt.xlabel('time step')
#plt.ylabel('Number of time steps until episode ends safely')
#plt.legend(labels=names)
#plt.xlim(0, timestep_lim)
#plt.savefig("art/plots/timestepSafeEpisode.png", dpi=100, transparent=True)
#plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="supervised", err_style='band', ci=ci, estimator='mean')
plt.xlabel('time step')
plt.ylabel('Probability of an supervisors intervention')
plt.legend(labels=names)
plt.ylim(bottom=-0.05, top=1.05)
plt.xlim(0, timestep_lim)
plt.savefig("art/plots/supervisor" + name + ".png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True,
'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="crash", err_style='band', ci=ci)
plt.xlabel('time step')
plt.ylabel('probability of crashing')
plt.legend(labels=names_print)
plt.ylim(bottom=-0.05, top=1.05)
plt.xlim(0, timestep_lim)
plt.savefig("art/plots/crash" + name + ".png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True,
'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="safelyDone", err_style='band', ci=ci)
plt.xlabel('time step')
plt.ylabel('probability of reaching the target safely')
plt.legend(labels=names_print)
plt.ylim(bottom=-0.05, top=1.05)
plt.xlim(0, timestep_lim)
plt.savefig("art/plots/reachtarget" + name + ".png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True,
'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="timeout", err_style='band', ci=ci)
plt.xlabel('time step')
plt.ylabel('probability of reaching the max # of time steps')
plt.legend(labels=names_print)
plt.ylim(bottom=-0.05, top=1.05)
plt.xlim(0, timestep_lim)
plt.savefig("art/plots/timeout" + name + ".png", dpi=100, transparent=True)
plt.show()
if logs is 'evaluation':
for i in range(0, len(logging_dir)):
ended_by_all = []
ended_by = np.array([all_in_one[i]["Episode ended by"], all_in_one[i]["repl"],
[names[i]]*len(all_in_one[i]["Episode ended by"])])
ended_by = np.transpose(ended_by)
ended_by_all.append(ended_by)
ended_by_all = np.concatenate(ended_by_all)
ended_by_all = pd.DataFrame(ended_by_all)
ended_by_all = ended_by_all.groupby(0)[1].value_counts().unstack()
ended_by_all = np.transpose(ended_by_all)
number_episodes = ended_by_all.sum(axis=1)
ended_by_all['Crash'] = ended_by_all['Crash'] / number_episodes
ended_by_all['Reach target'] = ended_by_all['Reach target'] / number_episodes
ended_by_all['Time out'] = ended_by_all['Time out'] / number_episodes
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
sns.boxplot(data=ended_by_all)
plt.xlabel(names[i])
plt.ylim(bottom=0, top=1)
plt.ylabel('Density')
plt.savefig("art/plots/3in1/eval_" + names[i] + '_' + name + ".png", dpi=100, transparent=True)
plt.show()
def plot_mbo(name=None):
front = | pd.read_csv('./logs/mbo/' + name + '/iterations/front.csv', header=None) | pandas.read_csv |
from argparse import ArgumentParser
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from utils import resample
def get_volumes(trade, underlying):
strikes = trade.index.get_level_values('Strike')
timestamps = trade.index.get_level_values('Time')
underlying_aligned = resample(underlying, timestamps).loc[timestamps]
moneyness = strikes/underlying_aligned.mean(axis=1)
trade['Log-moneyness'] = np.log(moneyness).values
return trade.reset_index(['Strike', 'Time'], drop=True
).set_index('Log-moneyness', append=True
)['Volume']
def plot_activity(volumes):
fig, axes = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(8, 10))
expiries = np.unique(volumes.index.get_level_values('Expiry'))
for ax, expiry in zip(axes, expiries):
for option_type in ['Call', 'Put']:
v = volumes.xs((option_type[0], expiry))
sns.distplot(np.repeat(v.index.values, v), ax=ax,
label=option_type)
ax.set_ylabel('Relative frequency')
expiry = pd.to_datetime(expiry).strftime('%Y-%m-%d')
ax.set_title("Expiry: {}".format(expiry))
axes[0].legend()
axes[-1].set_xlabel('Log-moneyness')
return fig
def table_activity(volumes):
total_volume = volumes.groupby('Expiry').sum()
relative_volume = total_volume/total_volume.sum()
table = pd.concat([total_volume, relative_volume],
keys=['Total volume', 'Relative volume'], axis=1)
return table
if __name__ == '__main__':
cli = ArgumentParser()
cli.add_argument('ticker')
cli.add_argument('trade_filename')
cli.add_argument('underlying_filename')
cli.add_argument('dest_plot_filename')
cli.add_argument('dest_table_filename')
args = cli.parse_args()
trade = pd.read_parquet(args.trade_filename).xs(args.ticker)
underlying = | pd.read_parquet(args.underlying_filename) | pandas.read_parquet |
from typing import List, Tuple
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import time
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
CLASSIFICATION_DICT = {'technical_debt': 0, 'feature': 1,
'architecture': 2, 'defect': 3, 'research_spike': 4}
PRIORITY_DICT = {'': 0, '1 - Critical': 1, '2 - High': 2,
'3 - Moderate': 3, '4 - Low': 4, '5 - Planning': 5}
STATE_DICT = {'Draft': 0, 'Ready': 1, 'Work in progress': 2, 'Review': 3,
'Testing': 4, 'Quality Assurance': 5, 'Deployment': 6, 'Complete': 7, 'Cancelled': 8,
# Added not real states
'Stale': 9,
'Other': 10
}
TEXT_COLUMNS = ['short_description', 'description',
'proposed_solution', 'acceptence_crit']
def reverse_dict(d):
return dict((v, k) for k, v in d.items())
def load_story_commit(path: str) -> pd.DataFrame:
data = pd.read_csv(path)
return data
def get_finished_stories(frame: pd.DataFrame) -> pd.DataFrame:
# cancelled = str(state_dict["Cancelled"])
completed = STATE_DICT['Complete']
cancelled = STATE_DICT['Cancelled']
result = frame[(frame.state == completed) | (frame.state == cancelled)]
result = result.drop(['is_stale', 'last_update'])
return result
def row_state_mapper(row) -> int:
completed = STATE_DICT['Complete']
cancelled = STATE_DICT['Cancelled']
stale = STATE_DICT['Stale']
other = STATE_DICT['Other']
state = row['state']
is_stale = row['is_stale']
if state == completed or state == cancelled:
return state
if is_stale:
return stale
else:
return other
def vectorize_text_field(field: str, df: pd.DataFrame, count_vect=None) -> Tuple[pd.DataFrame, CountVectorizer]:
word_frame = df[field].replace(np.nan, '', regex=True)
if not count_vect:
count_vect = CountVectorizer(analyzer='word',
token_pattern='[a-zA-Z0-9]{3,}',
stop_words='english')
count_vect.fit(word_frame)
word_vecs = count_vect.transform(word_frame)
# word_vecs = count_vect.fit_transform(word_frame)
vector_frame = pd.DataFrame(
word_vecs.toarray(), columns=count_vect.get_feature_names())
return (vector_frame, count_vect)
def vectorize_and_join(field: str, df: pd.DataFrame, count_vect=None, prefix_word=None) -> Tuple[pd.DataFrame, CountVectorizer]:
(vector_frame, count_vect) = vectorize_text_field(field, df, count_vect)
prefix_word = prefix_word if prefix_word is not None else field
frame = pd.concat([
df.drop(field, axis=1).reset_index(),
vector_frame.add_prefix("__{}_".format(prefix_word))
], axis=1)
return (frame, count_vect)
def get_xy_from_stories(df: pd.DataFrame, fields_to_drop=['state'], y_field='state') -> Tuple[pd.DataFrame, pd.DataFrame]:
if y_field not in fields_to_drop:
fields_to_drop.append(y_field)
X = df.drop(fields_to_drop, axis=1)
y = df[y_field]
return (X, y)
def scale_stories(df: pd.DataFrame, scaler=None) -> pd.DataFrame:
if scaler is None:
scaler = MinMaxScaler()
scaler.fit(df[df.columns])
scaled = scaler.transform(df)
scaled_df = | pd.DataFrame(scaled, columns=df.columns) | pandas.DataFrame |
from cbs import cbs
import pandas as pd
import pytest
#Get the CBS K dataframe
@pytest.fixture(scope="module")
def DEF():
return cbs.Cbs().parser('DEF')
def test_cbs_def_columns(DEF):
assert DEF.columns.tolist() == ['Name', 'def_int', 'def_saftey', 'def_sk', 'tackles', 'fum_rec',
'forced_fumbles', 'def_td', 'itd', 'ftd', 'pts_allowd','pass_yds_allowed', 'rush_yds_allowed', 'yds_allowed',
'kick_rt_td']
#Test top and bottom names
def test_cbs_def_projection1(DEF):
assert len(DEF.iloc[0].Name) <= 3
def test_cbs_def_projection2(DEF):
assert len(DEF.iloc[1].Name) <= 3
def test_cbs_def_projection3(DEF):
assert len(DEF.iloc[30].Name) <= 3
def test_cbs_def_projection4(DEF):
assert len(DEF.iloc[31].Name) <= 3
#Test top and bottom Stats
def test_cbs_def_projection5(DEF):
assert pd.to_numeric(DEF.iloc[0].def_int, errors='ignore') > 0
def test_cbs_def_projection6(DEF):
assert pd.to_numeric(DEF.iloc[1].def_sk, errors='ignore') > 0
def test_cbs_def_projection7(DEF):
assert | pd.to_numeric(DEF.iloc[30].def_td, errors='ignore') | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Script to play around with Dash
"""
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import numpy as np
import datetime as dt
import plotly.offline as pyo
import plotly.graph_objs as go
#PLOTLY
#import data
df = pd.read_csv('../Data/weather_data.csv')
#get temp and time
df['datetime'] = | pd.to_datetime(df['datetime'], format='%d/%m/%Y') | pandas.to_datetime |
import re
import pandas as pd
import numpy as np
from gensim import corpora, models, similarities
from difflib import SequenceMatcher
from build_tfidf import split
def ratio(w1, w2):
'''
Calculate the matching ratio between 2 words.
Only account for word pairs with at least 90% similarity
'''
m = SequenceMatcher(None, w1, w2)
r = m.ratio()
if r < 0.9: r = 0.0
return r
def build_features(data, tfidf, dictionary):
'''
Generate features:
1. Cosine similarity between tf-idf vectors of query vs. title
2. Cosine similarity between tf-idf vectors of query vs. description
3. Cosine similarity between tf-idf vectors of query vs. attribute text
4. Sum of word match ratios between query vs. title
5. Sum of word match ratios between query vs. description
6. Sum of word match ratios between query vs. attribute text
7. Query word count
'''
result = []
for loc in xrange(len(data)):
rowdata = data.loc[loc, ["product_title", "product_description", "attr_value", "search_term"]]
rowbow = [[str(text)] if isinstance(text, float) else split(text) for text in rowdata]
# query match level
titleMatch = descMatch = attrMatch = 0
for q in rowbow[3]:
titleMatch = titleMatch + np.sum(map(lambda w: ratio(q, w), rowbow[0]))
descMatch = descMatch + np.sum(map(lambda w: ratio(q, w), rowbow[1]))
attrMatch = attrMatch + np.sum(map(lambda w: ratio(q, w), rowbow[2]))
# get tfidf vectors
rowdata = [tfidf[dictionary.doc2bow(text)] for text in rowbow]
# prepare to get similarities
index = similarities.SparseMatrixSimilarity(rowdata[:3], num_features=len(dictionary))
# append everything to the result
result.append(np.concatenate((index[rowdata[3]], [titleMatch, descMatch, attrMatch, len(rowbow[3])]), axis=0).tolist())
# end loop
return np.array(result)
def main():
# load data
df_desc = pd.read_csv('data/product_descriptions.csv', encoding="ISO-8859-1")
df_attr = pd.read_csv('data/attributes_combined.csv', encoding="ISO-8859-1")
df_train = pd.read_csv('data/train.csv', encoding="ISO-8859-1")
df_train = pd.merge(df_train, df_desc, how='left', on='product_uid')
df_train = pd.merge(df_train, df_attr, how='left', on='product_uid')
df_test = pd.read_csv('data/test.csv', encoding="ISO-8859-1")
df_test = pd.merge(df_test, df_desc, how='left', on='product_uid')
df_test = pd.merge(df_test, df_attr, how='left', on='product_uid')
# load tfidf model
dictionary = corpora.Dictionary.load('homedepot.dict')
corpus = corpora.MmCorpus('homedepot.mm')
tfidf = models.TfidfModel.load('homedepot.tfidf')
# build features
trainData = build_features(df_train, tfidf, dictionary)
testData = build_features(df_test, tfidf, dictionary)
# save to csv
df = | pd.DataFrame(trainData, columns=['qt', 'qd', 'qa', 'mt', 'md', 'ma', 'ql']) | pandas.DataFrame |
""" manage PyTables query interface via Expressions """
import ast
from functools import partial
import numpy as np
from pandas._libs.tslibs import Timedelta, Timestamp
from pandas.compat.chainmap import DeepChainMap
from pandas.core.dtypes.common import is_list_like
import pandas as pd
from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.core.computation import expr, ops
from pandas.core.computation.common import _ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import UndefinedVariableError, is_term
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
class Scope(expr.Scope):
__slots__ = 'queryables',
def __init__(self, level, global_dict=None, local_dict=None,
queryables=None):
super().__init__(level + 1,
global_dict=global_dict,
local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, str) else cls
supr_new = StringMixin.__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == 'left':
if self.name not in self.env.queryables:
raise NameError('name {name!r} is not defined'
.format(name=self.name))
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
@property
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
def __init__(self, op, lhs, rhs, queryables, encoding):
super().__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.filter = None
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if (isinstance(left, ConditionBinOp) and
isinstance(right, ConditionBinOp)):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if (isinstance(left, FilterBinOp) and
isinstance(right, FilterBinOp)):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(self.op, left, right, queryables=self.queryables,
encoding=self.encoding).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and | is_term(right) | pandas.core.computation.ops.is_term |
# Copyright (c) 2022 RWTH Aachen - Werkzeugmaschinenlabor (WZL)
# Contact: <NAME>, <EMAIL>
from sklearn.preprocessing import Normalizer,MinMaxScaler,MaxAbsScaler,StandardScaler,RobustScaler,QuantileTransformer,PowerTransformer
import pandas as pd
import os
from absl import logging
def scale(dataframe,method,scaler,config):
"""Scale features with choosen method
Args:
input_path (sring): Path to find features
output_path (string): Path to save scaled features and the scaler
filename_scaled (list): Filenames of features which will get scaled
method (string): Defines method of scaling process.
scaler (obj): sklearn scaler object which was load in main.py
"""
already_fitted = False
if scaler != None:
already_fitted = True
elif method == 'Normalizer':
scaler = Normalizer()
elif method == 'MinMaxScaler':
scaler = MinMaxScaler()
elif method == 'MaxAbsScaler':
scaler = MaxAbsScaler()
elif method == 'StandardScaler':
scaler = StandardScaler()
elif method == 'RobustScaler':
scaler = RobustScaler()
elif method == 'QuantileTransformer':
scaler = QuantileTransformer()
elif method == 'PowerTransformer':
scaler = PowerTransformer()
else:
raise Exception('Scaler method: {} is invalid!'.format(method))
if config != None:
scaler.set_params(**config)
dataframe_head = dataframe.columns.values
if already_fitted == False:
dataframe_scaled = scaler.fit_transform(dataframe)
else:
dataframe_scaled = scaler.transform(dataframe)
dataframe_scaled = pd.DataFrame(dataframe_scaled,columns=dataframe_head)
logging.info('File got scaled with method {}'.format(scaler))
return scaler, dataframe_scaled
def inverse_scale(dataframe,scaler):
"""Inverse Scale Dataframes.
Args:
input_path (sring): Path to find scaled features and their scaler
output_path (string): Path to save inverse scaled features
filename_scaled (list): Filenames of scaled features
scaler (obj): sklearn scaler object which was load in main.py
"""
scaler_str = str(scaler)
if scaler_str == 'Normalizer()':
logging.warning('Normalizer has no function for inverse scaling!')
else:
dataframe_head = dataframe.columns.values
dataframe_inverse_scaled = scaler.inverse_transform(dataframe)
dataframe_inverse_scaled = | pd.DataFrame(dataframe_inverse_scaled,columns=dataframe_head) | pandas.DataFrame |
from pathlib import Path
import epimargin.plots as plt
import flat_table
import numpy as np
import pandas as pd
import seaborn as sns
from epimargin.estimators import analytical_MPVS
from epimargin.etl.commons import download_data
from epimargin.etl.covid19india import state_code_lookup
from epimargin.models import SIR
from epimargin.smoothing import notched_smoothing
sns.set(style = "whitegrid")
# root = Path(__file__).parent
# data = root/"data"
data = Path("./data").resolve()
CI = 0.95
window = 14
gamma = 0.2
infectious_period = 5
smooth = notched_smoothing(window)
num_sims = 50000
# load admin data on population
IN_age_structure = { # WPP2019_POP_F01_1_POPULATION_BY_AGE_BOTH_SEXES
0: 116_880,
5: 117_982 + 126_156 + 126_046,
18: 122_505 + 117_397,
30: 112_176 + 103_460,
40: 90_220 + 79_440,
50: 68_876 + 59_256 + 48_891,
65: 38_260 + 24_091,
75: 15_084 + 8_489 + 3_531 + 993 + 223 + 48,
}
# normalize
age_structure_norm = sum(IN_age_structure.values())
IN_age_ratios = np.array([v/age_structure_norm for (k, v) in IN_age_structure.items()])
split_by_age = lambda v: (v * IN_age_ratios).astype(int)
# from Karnataka
COVID_age_ratios = np.array([0.01618736, 0.07107746, 0.23314877, 0.22946212, 0.18180406, 0.1882451 , 0.05852026, 0.02155489])
india_pop = | pd.read_csv(data/"india_pop.csv", names = ["state", "population"], index_col = "state") | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.