prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# import important libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import seaborn as sns
import os
import argparse
# import machine learning libraries
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.svm import SVR, SVC
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_val_predict, GridSearchCV, KFold
from sklearn.metrics import confusion_matrix, classification_report,\
precision_recall_fscore_support
# import custom functions for vectorizing & visualizing data
import utils
from process_data import get_split
plt.rcParams.update({'font.size': 12})
all_props = ['bulk_modulus',
'thermal_conductivity',
'shear_modulus',
'band_gap',
'debye_temperature',
'thermal_expansion']
symbols = ['B', '$\\kappa$', 'G', 'E$_{\\mathrm{g}}$', 'D', '$\\alpha$']
prop_labels = ['Bulk Modulus (GPa)',
'Log$_{10}$ Thermal Conductivity $\\left(\\dfrac{\\mathrm{W}}' +
'{\\mathrm{m}\\cdot \\mathrm{K}}\\right)$',
'Shear Modulus (GPa)',
'Band Gap (eV)',
'Log$_{10}$ Debye Temperature (K)',
'Log$_{10}$ Thermal Expansion $(\\mathrm{K}^{-1})$']
arg2prop = {'bulk_modulus': 'ael_bulk_modulus_vrh',
'thermal_conductivity': 'agl_log10_thermal_conductivity_300K',
'shear_modulus': 'ael_shear_modulus_vrh',
'band_gap': 'Egap',
'debye_temperature': 'ael_log10_debye_temperature',
'thermal_expansion': 'agl_log10_thermal_expansion_300K'}
prop2label = dict([[v, k] for k, v
in zip(prop_labels, arg2prop.values())])
parser_desc = 'Reproduce the results of this work'
parser = argparse.ArgumentParser(description=parser_desc)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--properties',
type=str,
nargs='+',
metavar='Property to reproduce',
choices=all_props,
help=('example:\n\t' +
'python reproduce.py --properties bulk_modulus\n\t'))
group.add_argument('--all',
action='store_true',
help='Run through each property one at a time '
'and generate results and figures.')
args = parser.parse_args()
if not args.all:
mat_props = []
for j in args.properties:
mat_props.append(arg2prop[j])
else:
mat_props = list(map(lambda p: arg2prop[p], all_props))
print('Reproducing results for the following data:', mat_props)
def optimize_threshold(y_train_labeled, y_train_pred):
"""Given a DataFrame of labels and predictions, return the
optimal threshold for a high F1 score"""
y_train_ = y_train_labeled.copy()
y_train_pred_ = pd.Series(y_train_pred).copy()
f1score_max = 0
for threshold in np.arange(0.1, 1, 0.1):
diff = (max(y_train_pred) - min(y_train_pred))
threshold = min(y_train_pred) + threshold * diff
y_train_pred_[y_train_pred < threshold] = 0
y_train_pred_[y_train_pred >= threshold] = 1
f1score = f1_score(y_train_, y_train_pred_)
if f1score > f1score_max:
f1score_max = f1score
opt_thresh = threshold
return opt_thresh
def get_performance(mat_props, seed):
metrics_dict = {}
for mat_prop in mat_props:
os.makedirs('figures/'+mat_prop, exist_ok=True)
data = get_split(mat_prop, elem_prop='oliynyk', seed_num=seed)
X_train_scaled, X_test_scaled = data[0:2]
y_train, y_test = data[2:4]
y_train_labeled, y_test_labeled = data[4:6]
formula_train, formula_test = data[6:8]
test_threshold = y_test.iloc[-y_test_labeled.sum().astype(int)]
train_threshold = y_train.iloc[-y_train_labeled.sum().astype(int)]
y = pd.concat([y_train, y_test])
plt.figure(1, figsize=(7, 7))
ax = sns.distplot(y, bins=50, kde=False)
rect1 = patches.Rectangle((test_threshold, 0),
ax.get_xlim()[1]-test_threshold,
ax.get_ylim()[1], linewidth=1,
edgecolor='k',
facecolor='g',
alpha=0.2)
ax.add_patch(rect1)
text_size = 18
ax.text(.1,
.5,
'Ordinary\nCompounds',
size=text_size,
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes)
ax.text(.98,
.15,
'Extraordinary\nCompounds',
size=text_size,
horizontalalignment='right',
verticalalignment='center',
transform=ax.transAxes)
ax.tick_params(direction='in',
length=5,
bottom=True,
top=True,
left=True,
right=True,
labelsize=text_size)
ax.set_xlabel(prop2label[mat_prop], size=text_size)
ax.set_ylabel('number of occurrences'.title(), size=text_size)
plt.savefig('figures/' + mat_prop + '/distplot',
dpi=300,
bbox_inches='tight')
plt.clf()
# ## Learn with a Ridge Regression (linear model)
# define ridge regression object
rr = Ridge()
# define k-folds
cv = KFold(n_splits=5, shuffle=True, random_state=1)
# choose search space
parameter_candidates = {'alpha': np.logspace(-5, 2, 10)}
# define the grid search
grid = GridSearchCV(estimator=rr,
param_grid=parameter_candidates,
cv=cv)
# run grid search
grid.fit(X_train_scaled, y_train)
# plot grid search to ensure good values)
utils.plot_1d_grid_search(grid, midpoint=0.75)
print('best parameters:', grid.best_params_)
plt.savefig('figures/' + mat_prop + '/rr_1d_search',
dpi=300,
bbox_inches='tight')
plt.clf()
best_params_rr = grid.best_params_
# best_params_rr = {'alpha': 0.0021544346900318843}
rr = Ridge(**best_params_rr)
rr.fit(X_train_scaled, y_train)
y_test_predicted_rr = rr.predict(X_test_scaled)
y_train_predicted_rr = rr.predict(X_train_scaled)
# plot the data
plt.figure(figsize=(6, 6))
plt.plot(y_test,
y_test_predicted_rr,
marker='o',
mfc='none',
color='#0077be',
linestyle='none',
label='test')
plt.plot(y_train,
y_train_predicted_rr,
marker='o',
mfc='none',
color='#e34234',
linestyle='none',
label='train')
max_val = max(y_test.max(), y_test_predicted_rr.max())
min_val = min(y_test.min(), y_test_predicted_rr.min())
plt.plot([min_val, max_val], [min_val, max_val], 'k--')
limits = [min_val, max_val]
plt.xlim(limits)
plt.ylim(limits)
plt.xlabel('actual')
plt.ylabel('predicted')
plt.legend(loc=4)
plt.tick_params(direction='in',
length=5,
bottom=True,
top=True,
left=True,
right=True)
plt.savefig('figures/' + mat_prop + '/rr_act_vs_pred',
dpi=300,
bbox_inches='tight')
plt.clf()
# ## Learn with a support vector regression (non-linear model)
# to speed up the grid search, optimize on a subsample of data
X_train_scaled_sampled = X_train_scaled.sample(500, random_state=1)
y_train_sampled = y_train.loc[X_train_scaled_sampled.index.values]
# define support vector regression object (default to rbf kernel)
svr = SVR()
# define k-folds
cv = KFold(n_splits=5, shuffle=True, random_state=1)
# choose search space
parameter_candidates = {'C': np.logspace(2, 4, 8),
'gamma': np.logspace(-3, 1, 8)}
# define the grid search
grid = GridSearchCV(estimator=svr,
param_grid=parameter_candidates,
cv=cv)
# run grid search
grid.fit(X_train_scaled_sampled, y_train_sampled)
# plot grid search to ensure good values
utils.plot_2d_grid_search(grid, midpoint=0.7)
plt.savefig('figures/' + mat_prop + '/svr_2d_search',
dpi=300, bbox_inches='tight')
plt.clf()
print('best parameters:', grid.best_params_)
best_params_svr = grid.best_params_
svr = SVR(**best_params_svr)
svr.fit(X_train_scaled, y_train)
y_test_predicted_svr = svr.predict(X_test_scaled)
y_train_predicted_svr = svr.predict(X_train_scaled)
# plot the data
plt.figure(figsize=(6, 6))
plt.plot(y_test,
y_test_predicted_svr,
marker='o',
mfc='none',
color='#0077be',
linestyle='none',
label='test')
plt.plot(y_train,
y_train_predicted_svr,
marker='o',
mfc='none',
color='#e34234',
linestyle='none',
label='train')
max_val = max(y_test.max(), y_test_predicted_svr.max())
min_val = min(y_test.min(), y_test_predicted_svr.min())
plt.plot([min_val, max_val], [min_val, max_val], 'k--')
limits = [min_val, max_val]
plt.xlim(limits)
plt.ylim(limits)
plt.xlabel('actual')
plt.ylabel('predicted')
plt.legend(loc=4)
plt.tick_params(direction='in',
length=5,
bottom=True,
top=True,
left=True,
right=True)
plt.savefig('figures/' + mat_prop + '/svr_act_vs_pred',
dpi=300,
bbox_inches='tight')
plt.clf()
# # Approach the problem as a classification task
# ## Learn with a logistic regression (linear classification)
# define logistic regression object
lr = LogisticRegression(solver='lbfgs')
# define k-folds
cv = KFold(n_splits=5, shuffle=True, random_state=1)
# choose search space
class_1_weight = [{0: 1, 1: weight} for weight in
np.linspace(1, 50, 5)]
parameter_candidates = {'C': np.logspace(-1, 4, 5),
'class_weight': class_1_weight}
# define the grid search. We use log-loss to decide which
# parameters to use.
grid = GridSearchCV(estimator=lr,
param_grid=parameter_candidates,
scoring='neg_log_loss',
cv=cv)
# run grid search
grid.fit(X_train_scaled, y_train_labeled)
# plot grid search to ensure good values
utils.plot_2d_grid_search(grid, midpoint=-0.05, vmin=-0.13, vmax=0)
plt.savefig('figures/' + mat_prop + '/lr_2d_search',
dpi=300,
bbox_inches='tight')
plt.clf()
print('best parameters:', grid.best_params_)
best_params_lr = grid.best_params_
lr = LogisticRegression(solver='lbfgs', penalty='l2', **best_params_lr)
lr.fit(X_train_scaled, y_train_labeled)
# define k-folds
cv = KFold(n_splits=5, shuffle=True, random_state=1)
y_pred_train_lr = cross_val_predict(lr,
X_train_scaled,
y_train_labeled,
cv=cv)
y_prob_train_lr = cross_val_predict(lr,
X_train_scaled,
y_train_labeled,
cv=cv,
method='predict_proba')
y_probability_train_lr = [probability[1] for probability in
y_prob_train_lr]
y_prob_test_lr = lr.predict_proba(X_test_scaled)
y_probability_test_lr = [probability[1] for probability in
y_prob_test_lr]
df_cm = pd.DataFrame(confusion_matrix(y_train_labeled,
y_pred_train_lr))
ax = sns.heatmap(df_cm,
square=True,
annot=True,
annot_kws={"size": 18},
cbar=False,
linewidths=.5,
cmap="YlGnBu",
center=-10000000)
ax.set_ylabel('actual')
ax.set_xlabel('predicted')
ax.xaxis.tick_top()
plt.savefig('figures/' + mat_prop + '/lr_cm',
dpi=300,
bbox_inches='tight')
plt.clf()
threshold = 0.5
utils.plot_prob(threshold,
y_train,
y_probability_train_lr,
threshold_x=train_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/lr_train_prob_thresh={:0.2f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
# ### Check our perfromance on the test set!
utils.plot_prob(threshold,
y_test,
y_probability_test_lr,
threshold_x=test_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/lr_test_prob_thresh={:0.2f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
# ### Compare this performance to regression models
#
# **For the same recall, we are three times more likely that predicted
# compound is not actually extraordinary.**
threshold = optimize_threshold(y_train_labeled, y_train_predicted_rr)
utils.plot_regression(threshold,
y_train,
y_train_predicted_rr,
threshold_x=train_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/rr_train_reg_thresh={:0.2f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
utils.plot_regression(threshold,
y_test,
y_test_predicted_rr,
threshold_x=test_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/rr_test_reg_thresh={:0.2f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
threshold = optimize_threshold(y_train_labeled, y_train_predicted_svr)
utils.plot_regression(threshold,
y_train,
y_train_predicted_svr,
threshold_x=train_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/svr_train_reg_thresh={:0.02f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
utils.plot_regression(threshold,
y_test,
y_test_predicted_svr,
threshold_x=test_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/svr_test_reg_thresh={:0.02f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
# ## Learn with a support vector classification (non-linear)
# to speed up the grid search, optimize on a subsample of data
index_location = X_train_scaled_sampled.index.values
y_train_labeled_sampled = y_train_labeled.loc[index_location]
# define suppor vector classification object
# (need to set probability to True)
svc = SVC(probability=True)
# define k-folds
cv = KFold(n_splits=5, shuffle=True, random_state=1)
# choose search space (we will start with class_weight=1
# as that was optimal for svc)
parameter_candidates = {'C': np.logspace(-1, 4, 5),
'gamma': np.logspace(-2, 2, 5)}
# define the grid search. We use log-loss to decide
# which parameters to use.
grid = GridSearchCV(estimator=svc,
param_grid=parameter_candidates,
scoring='neg_log_loss',
cv=cv)
# run grid search
grid.fit(X_train_scaled_sampled, y_train_labeled_sampled)
# plot grid search to ensure good values
utils.plot_2d_grid_search(grid, midpoint=-0.04, vmin=-0.13, vmax=0)
plt.savefig('figures/' + mat_prop +
'/svc_2d_search.png',
dpi=300,
bbox_inches='tight')
plt.clf()
print('best parameters:', grid.best_params_)
best_params_svc = grid.best_params_
svc = SVC(probability=True, **best_params_svc)
svc.fit(X_train_scaled, y_train_labeled)
cv = KFold(n_splits=5, shuffle=True, random_state=1)
y_pred_train_svc = cross_val_predict(svc,
X_train_scaled,
y_train_labeled,
cv=cv)
y_prob_train_svc = cross_val_predict(svc,
X_train_scaled,
y_train_labeled,
cv=cv,
method='predict_proba')
y_probability_train_svc = [probability[1] for probability in
y_prob_train_svc]
y_prob_test_svc = svc.predict_proba(X_test_scaled)
y_probability_test_svc = [probability[1] for probability in
y_prob_test_svc]
metrics = precision_recall_fscore_support(y_train_labeled,
y_pred_train_svc)
precision, recall, fscore, support = metrics
print('precision: {:0.2f}\nrecall: {:0.2f}'.format(precision[1],
recall[1]))
df_cm = pd.DataFrame(confusion_matrix(y_train_labeled,
y_pred_train_svc))
ax = sns.heatmap(df_cm,
square=True,
annot=True,
annot_kws={"size": 18},
cbar=False,
linewidths=0.5,
cmap="YlGnBu",
center=-10000000)
ax.set_ylabel('actual')
ax.set_xlabel('predicted')
ax.xaxis.tick_top()
plt.savefig('figures/' + mat_prop +
'/svc_cm',
dpi=300,
bbox_inches='tight')
plt.clf()
threshold = 0.5
utils.plot_prob(threshold,
y_train,
y_probability_train_svc,
threshold_x=train_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/svc_train_prob_thresh={:0.02f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
utils.plot_prob(threshold,
y_test,
y_probability_test_svc,
threshold_x=test_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/svc_test_prob_thresh={:0.2f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
metrics_dict[mat_prop] = {'precision': [], 'recall': [], 'f1': []}
threshold = optimize_threshold(y_train_labeled, y_train_predicted_rr)
y_pred_rr = [1 if x >= threshold else 0 for x in y_test_predicted_rr]
threshold = optimize_threshold(y_train_labeled, y_train_predicted_svr)
y_pred_svr = [1 if x >= threshold else 0 for x in y_test_predicted_svr]
threshold = 0.5
y_pred_lr = [1 if x >= threshold else 0 for x in y_probability_test_lr]
threshold = 0.5
y_pred_svc = [1 if x >= threshold else 0 for x in
y_probability_test_svc]
predictions = [y_pred_rr,
y_pred_svr,
y_pred_lr,
y_pred_svc]
for prediction in predictions:
print(classification_report(y_test_labeled, prediction))
metrics = precision_recall_fscore_support(y_test_labeled,
prediction)
precision, recall, f1, support = metrics
if precision[1] == 0:
if precision == 10:
pass
metrics_dict[mat_prop]['precision'].append(precision[1])
metrics_dict[mat_prop]['recall'].append(recall[1])
metrics_dict[mat_prop]['f1'].append(f1[1])
return metrics_dict
def build_metrics():
for seed in [1]:
metrics = get_performance(mat_props, seed)
for prop in metrics:
metric_csv = prop+'_metrics_seed_{:0.0f}.csv'.format(seed)
computed_metrics = os.listdir('data/metrics/')
if metric_csv in computed_metrics:
continue
else:
df_prop_metric = pd.DataFrame(metrics[prop],
index=['rr', 'svr', 'lr', 'svc'])
df_prop_metric.to_csv('data/metrics/'+metric_csv)
def plot_metrics():
metric_mean = {}
metric_std = {}
metric_mean[0] = {}
metric_mean[1] = {}
metric_mean[2] = {}
metric_std[0] = {}
metric_std[1] = {}
metric_std[2] = {}
for prop in mat_props:
rr = []
svr = []
lr = []
svc = []
for seed in [1, 2, 3, 4, 5]:
metric_csv = prop+'_metrics_seed_{:0.0f}.csv'.format(seed)
df_prop_metric = pd.read_csv('data/metrics/'+metric_csv)
rr.append(df_prop_metric.iloc[0, 1:].tolist())
svr.append(df_prop_metric.iloc[1, 1:].tolist())
lr.append(df_prop_metric.iloc[2, 1:].tolist())
svc.append(df_prop_metric.iloc[3, 1:].tolist())
for i in [0, 1, 2]:
metric_mean[i][prop] = [pd.DataFrame(rr).mean()[i],
pd.DataFrame(svr).mean()[i],
pd.DataFrame(lr).mean()[i],
pd.DataFrame(svc).mean()[i]]
metric_std[i][prop] = [pd.DataFrame(rr).std()[i],
pd.DataFrame(svr).std()[i],
pd.DataFrame(lr).std()[i],
pd.DataFrame(svc).std()[i]]
df_p_mean = pd.DataFrame(metric_mean[0], index=['rr', 'svr', 'lr', 'svc'])
df_p_std = pd.DataFrame(metric_std[0], index=['rr', 'svr', 'lr', 'svc'])
df_r_mean = pd.DataFrame(metric_mean[1], index=['rr', 'svr', 'lr', 'svc'])
df_r_std = pd.DataFrame(metric_std[1], index=['rr', 'svr', 'lr', 'svc'])
df_f_mean = pd.DataFrame(metric_mean[2], index=['rr', 'svr', 'lr', 'svc'])
df_f_std = | pd.DataFrame(metric_std[2], index=['rr', 'svr', 'lr', 'svc']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
def test_pi_add_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + one
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize('five', [5, np.array(5, dtype=np.int64)])
def test_sub(self, five):
rng = period_range('2007-01', periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [ | pd.offsets.Day(3) | pandas.offsets.Day |
'''
Imputation:
Helps in filling up the null values
Method1:
Removal of null rows
Method2:
Filling null values with specified values
Method3:
Filling null values with average values
'''
import pandas as pd
import numpy as np
class Imputer:
def __init__(self,df):
self.df=df
def colm_rem(self,colm):
#Removes the column from the dataset
print("Removing the null value rows of "+colm)
temp_df=self.df[ | pd.notnull(self.df[colm]) | pandas.notnull |
from tweepy import OAuthHandler
import time
import pandas as pd
import json
import datetime
import tweepy
from MarioEnterScores import MarioBoard
import os
from mongo import BotDB
BOTENV = os.environ['BOTENV']
#Variables that contains the user credentials to access Twitter API
consumer_key = os.environ['TWITTER_CONSUMER_KEY']
consumer_secret = os.environ['TWITTER_CONSUMER_SECRET']
access_token = os.environ['TWITTER_ACCESS_TOKEN']
access_token_secret = os.environ['TWITTER_ACCESS_TOKEN_SECRET']
def joinList(List,separator):
st = ''
for item in List:
st+=item+separator
#remove last separator
st = st[:len(st)-1]
return st
def readabledate(unixdate):
date = datetime.datetime.fromtimestamp(int(float(unixdate))).strftime('%Y-%m-%d %H:%M:%S')
return date
class MarioKeezy(tweepy.API):
def __init__(self):
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
super(MarioKeezy,self).__init__(auth)
self.db = BotDB(BOTENV)
self.userDict = self.db.getkv('marioUserDict')
def convert_api_string_to_tweet_version(self,api_txt):
api_list = api_txt.split(',')
new_str = joinList([self.userDict[player]['code'] for player in api_list],',')
return new_str
def runtweetfunc(self,apistr):
self.db.log.info('Mario input -- %s' % (apistr))
#reply tweet with results:
resdf = self.rusFunction(apistr) #your main function
if not resdf.empty:
''' not using image for this iteration on twitter '''
#imageFile = createDFImage(resdf)
#tweetback = ('%s - %s - Current Standings'% (readabledate(time.time()) , (joinList([self.userDict[key]['handle'] for key in self.userDict],' ')))) #add handle and current time. time need to prevent a duplicate tweet error
#self.update_with_media(filename=imageFile,status=tweetback)
#for testing:
#img = Image.open(imageFile)
#img.show()
self.db.log.info('Complete')
return resdf
else:
tweetback = ('%s - %s - ' % (readabledate(time.time()) , 'Error - invalid tweet'))
self.db.log.info('failure')
#self.update_status(tweetback)
self.db.log.info('Replied with - %s' % (tweetback))
def rusFunction(self,apistr):
''' mariokeezy is the twitter handle that will tweet the image your string points to.
:::the bot automatically adds the handle ( tweeter['handle'] ) in the string to be returned so that borted replies to you directly
so no need to include it in your string to be returned.
: apistr is the string of text that was sent.
: the current format is r,t,n. It works out the number of players based on the length of the string originally tweeted.
run your code here and return what you want the function to return to the twitter handle as a dataFrame object csv file '''
#just as an idea to start - to save it being messy i'd just save your other.pys in the save folder
# and import the main functions into here like:
# from Mario import Func1, func2 etc...
#
userCodes = {'t':'1','n':'2','r':'3','h':'4'}
try:
userCodes[apistr[0]]
except KeyError:
self.db.log.info('Invalid tweet')
return | pd.DataFrame() | pandas.DataFrame |
import csv
import json
import operator
import os
import datetime
# 3rd party required for core package
import numpy
import pandas
# CSEP Imports
from csep.core import regions
from csep.utils.time_utils import epoch_time_to_utc_datetime, datetime_to_utc_epoch, strptime_to_utc_datetime, \
millis_to_days, parse_string_format, days_to_millis, strptime_to_utc_epoch, utc_now_datetime, create_utc_datetime
from csep.utils.stats import min_or_none, max_or_none
from csep.utils.calc import discretize
from csep.utils.comcat import SummaryEvent
from csep.core.exceptions import CSEPSchedulerException, CSEPCatalogException, CSEPIOException
from csep.utils.calc import bin1d_vec
from csep.utils.constants import CSEP_MW_BINS
from csep.utils.log import LoggingMixin
from csep.utils.readers import csep_ascii
class AbstractBaseCatalog(LoggingMixin):
"""
Abstract catalog base class for PyCSEP catalogs. This class should not and cannot be used on its own. This just
provides the interface for implementing custom catalog classes.
"""
dtype = None
def __init__(self, filename=None, data=None, catalog_id=None, format=None, name=None, region=None,
compute_stats=True, filters=None, metadata=None, date_accessed=None):
""" Standard catalog format for CSEP catalogs. Primary event data are stored in structured numpy array. Additional
metadata are available by the event_id in the catalog metadata information.
Args:
filename: location of catalog
catalog (numpy.ndarray or eventlist): catalog data
catalog_id: catalog id number (used for stochastic event set forecasts)
format: identification used for serialization
name: human readable name of catalog
region: spatial and magnitude region
compute_stats: whether statistics should be computed for the catalog
filters (str or list): filtering operations to apply to the catalog
metadata (dict): additional information for events
date_accessed (str): time string when catalog was accessed
"""
super().__init__()
self.filename = filename
self.catalog_id = catalog_id
self.format = format
self.name = name
self.region = region
self.compute_stats = compute_stats
self.filters = filters or []
self.date_accessed = date_accessed or utc_now_datetime() # type datetime.datetime
# used to store additional event information based on the event_id key, if no event_id will default to an
# integer index
self.metadata = metadata or {}
# cleans the catalog to set as ndarray, see setter.
self.catalog = data # type: numpy.ndarray
# use user defined stats if entered into catalog
if data is not None and self.compute_stats:
self.update_catalog_stats()
def __eq__(self, other):
""" Compares whether two catalogs are equal by comparing their dicts. """
return self.to_dict() == other.to_dict()
def __str__(self):
self.update_catalog_stats()
s = f'''
Name: {self.name}
Start Date: {self.start_time}
End Date: {self.end_time}
Latitude: ({self.min_latitude}, {self.max_latitude})
Longitude: ({self.min_longitude}, {self.max_longitude})
Min Mw: {self.min_magnitude}
Max Mw: {self.max_magnitude}
Event Count: {self.event_count}
'''
return s
def to_dict(self):
"""
Serializes class to json dictionary.
Returns:
catalog as dict
"""
excluded = ['_catalog']
out = {}
for k, v in self.__dict__.items():
# note: if 'v' is callable that implies that we have a function bound to a class-member. this happens
# for the catalog forecast and requires excluding this value.
if not callable(v) and k not in excluded:
if hasattr(v, 'to_dict'):
new_v = v.to_dict()
else:
new_v = v
if k.startswith('_'):
out[k[1:]] = new_v
else:
out[k] = new_v
out['catalog'] = []
for line in list(self.catalog.tolist()):
new_line = []
for item in line:
# try to decode, if it fails just use original, we use this to handle string-based event_ids
try:
item = item.decode('utf-8')
except:
pass
finally:
new_line.append(item)
out['catalog'].append(new_line)
return out
@property
def event_count(self):
""" Number of events in catalog """
return self.get_number_of_events()
@classmethod
def load_catalog(cls, filename, loader=csep_ascii, **kwargs):
raise NotImplementedError("subclass should implement load_catalog funtion.")
@classmethod
def from_dict(cls, adict, **kwargs):
""" Creates a class from the dictionary representation of the class state. The catalog is serialized into a list of
tuples that contain the event information in the order defined by the dtype.
This needs to handle reading in region information at some point.
"""
# could these be class values? can be changed later.
exclude = ['_catalog']
time_members = ['date_accessed', 'start_time', 'end_time']
catalog = adict.get('catalog', None)
out = cls(data=catalog, **kwargs)
for k, v in out.__dict__.items():
if k not in exclude:
if k not in time_members:
setattr(out, k, adict[k])
else:
setattr(out, k, _none_or_datetime(adict[k]))
return out
@classmethod
def from_dataframe(cls, df, **kwargs):
"""
Creates catalog from dataframe. Dataframe must have columns that are equivalent to whatever fields
the catalog expects in the catalog dtype.
For example:
cat = CSEPCatalog()
df = cat.get_dataframe()
new_cat = CSEPCatalog.from_dataframe(df)
Args:
df (pandas.DataFrame): pandas dataframe
**kwargs:
Returns:
Catalog
"""
catalog_id = None
try:
catalog_id = df['catalog_id'].iloc[0]
except KeyError:
pass
col_list = list(cls.dtype.names)
# we want this to be a structured array not a record array and only returns core attributes listed in dtype
# loses information about the region and event meta data
catalog = numpy.ascontiguousarray(df[col_list].to_records(index=False), dtype=cls.dtype)
out_cls = cls(data=catalog, catalog_id=catalog_id, **kwargs)
return out_cls
@classmethod
def load_json(cls, filename, **kwargs):
""" Loads catalog from JSON file """
with open(filename, 'r') as f:
adict = json.load(f)
return cls.from_dict(adict, **kwargs)
def write_json(self, filename):
""" Writes catalog to json file
Args:
filename (str): path to save file
"""
with open(filename, 'w') as f:
json.dump(self.to_dict(), f, indent=4, separators=(',', ': '), sort_keys=True, default=str)
@property
def catalog(self):
return self._catalog
@property
def data(self):
return self._catalog
@catalog.setter
def catalog(self, val):
"""
Ensures that catalogs with formats not numpy arrray are treated as numpy.array
Note:
This requires that catalog classes implement the self._get_catalog_as_ndarray() function.
This function should return structured numpy.ndarray.
Catalog will remain None, if assigned that way in constructor.
"""
self._catalog = val
if self._catalog is not None:
self._catalog = self._get_catalog_as_ndarray()
# ensure that people are behaving, somewhat non-pythonic but needed
if not isinstance(self._catalog, numpy.ndarray):
raise ValueError("Error: Catalog must be numpy.ndarray! Ensure that self._get_catalog_as_ndarray()" +
" returns an ndarray")
if self.compute_stats and self._catalog is not None:
self.update_catalog_stats()
def _get_catalog_as_ndarray(self):
"""
This function will be called anytime that a catalog is assigned to self.catalog
The purpose of this function is to ensure that the catalog is being properly parsed into the correct format, and
to prevent users of the catalog classes from assigning improper data types.
This also acts as a convenience to allow easy assignment of different types to the catalog. The default
implementation of this function expects that the data are arranged as a collection of tuples corresponding to
the catalog data type.
"""
"""
Converts eventlist into ndarray format.
Note:
Failure state exists if self.catalog is not bound
to instance explicity.
"""
# short-circuit
if isinstance(self.catalog, numpy.ndarray):
return self.catalog
# if catalog is not a numpy array, class must have dtype information
catalog_length = len(self.catalog)
catalog = numpy.empty(catalog_length, dtype=self.dtype)
if catalog_length == 0:
return catalog
if isinstance(self.catalog[0], (list, tuple)):
for i, event in enumerate(self.catalog):
catalog[i] = tuple(event)
elif isinstance(self.catalog[0], SummaryEvent):
for i, event in enumerate(self.catalog):
catalog[i] = (event.id, datetime_to_utc_epoch(event.time),
event.latitude, event.longitude, event.depth, event.magnitude)
else:
raise TypeError("Catalog data must be list of events tuples with order:\n"
f"{', '.join(self.dtype.names)} or \n"
"list of SummaryEvent type.")
return catalog
def write_ascii(self, filename, write_header=True, write_empty=True, append=False, id_col='id'):
"""
Write catalog in csep2 ascii format.
This format only uses the required variables from the catalog and should work by default. It can be overwritten
if an event_id (or other columns should be used). By default, the routine will look for a column the catalog array
called 'id' and will populate the event_id column with these values. If the 'id' column is not found, then it will
leave this column blank
Short format description (comma separated values):
longitude, latitude, M, time_string format="%Y-%m-%dT%H:%M:%S.%f", depth, catalog_id, [event_id]
Args:
filename (str): the file location to write the the ascii catalog file
write_header (bool): Write header string (default true)
write_empty (bool): Write file event if no events in catalog
append (bool): If true, append to the filename
id_col (str): name of event_id column (if included)
Returns:
NoneType
"""
# longitude, latitude, M, epoch_time (time in millisecond since Unix epoch in GMT), depth, catalog_id, event_id
header = ['lon', 'lat', 'mag', 'time_string', 'depth', 'catalog_id', 'event_id']
if append:
write_string = 'a'
else:
write_string = 'w'
with open(filename, write_string) as outfile:
writer = csv.DictWriter(outfile, fieldnames=header, delimiter=',')
if write_header:
writer.writeheader()
if write_empty and self.event_count == 0:
return
# create iterator from catalog columns
try:
event_ids = self.catalog[id_col]
except ValueError:
event_ids = [''] * self.event_count
row_iter = zip(self.get_longitudes(),
self.get_latitudes(),
self.get_magnitudes(),
self.get_epoch_times(),
self.get_depths(),
# populate list with `self.event_count` elements with val self.catalog_id
[self.catalog_id] * self.event_count,
event_ids)
# write csv file using DictWriter interface
for row in row_iter:
try:
event_id = row[6].decode('utf-8')
except AttributeError:
event_id = row[6]
# create dictionary for each row
adict = {'lon': row[0],
'lat': row[1],
'mag': row[2],
'time_string': str(epoch_time_to_utc_datetime(row[3]).replace(tzinfo=None)).replace(' ', 'T'),
'depth': row[4],
'catalog_id': row[5],
'event_id': event_id}
writer.writerow(adict)
def to_dataframe(self, with_datetime=False):
"""
Returns pandas Dataframe describing the catalog. Explicitly casts to pandas DataFrame.
Note:
The dataframe will be in the format of the original catalog. If you require that the
dataframe be in the CSEP ZMAP format, you must explicitly convert the catalog.
Returns:
(pandas.DataFrame): This function must return a pandas DataFrame
Raises:
ValueError: If self._catalog cannot be passed to pandas.DataFrame constructor, this function
must be overridden in the child class.
"""
df = | pandas.DataFrame(self.catalog) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 基础模块
import os
import sys
import gc
import json
import time
import functools
from datetime import datetime
# 数据处理
import numpy as np
import pandas as pd
from math import sqrt
from collections import Counter
# 自定义工具包
sys.path.append('../tools/')
import loader
import cate_encoding
# 设置随机种子
SEED = 2018
np.random.seed (SEED)
input_root_path = '../input/'
output_root_path = '../feature/'
tr_base_path = input_root_path + 'tr.ftr'
te_base_path = input_root_path + 'te.ftr'
cv_id_path = '../../../input/' + 'cvid.csv'
postfix = 's0_0'
file_type = 'ftr'
# 当前特征
tr_fea_out_path = output_root_path + 'tr_fea_{}.{}'.format(postfix, file_type)
te_fea_out_path = output_root_path + 'te_fea_{}.{}'.format(postfix, file_type)
# 当前特征 + 之前特征 merge 之后的完整训练数据
tr_out_path = output_root_path + 'tr_{}.{}'.format(postfix, file_type)
te_out_path = output_root_path + 'te_{}.{}'.format(postfix, file_type)
ID_NAME = ['session_id', 'impressions']
TARGET_NAME = 'target'
def feat_extract(df):
df['dt'] = pd.to_datetime(df['timestamp'], unit='s')
df['hour'] = df['dt'].dt.hour
df.drop(['dt'], axis=1, inplace=True)
cate_cols = ['city', 'device', 'platform', 'current_filters']
for col in cate_cols:
df[col] = pd.factorize(df[col], sort=True)[0]
# impr rank
df['impr_rank'] = df.groupby(['session_id']).cumcount().values
# price statistics by session
df = cate_encoding.cate_num_stat(df, df, ['session_id'], 'prices', ['median','std','count'])
df['price_sub'] = df['prices'] - df['session_id_by_prices_median']
df['price_div'] = df['prices'] / df['session_id_by_prices_median']
return df
def gen_fea():
tr = loader.load_df(tr_base_path)
te = loader.load_df(te_base_path)
df_base = | pd.concat([tr, te]) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
from functools import wraps
from pandas import DataFrame, Series
from pandas import to_datetime, concat, NA
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class FinnhubError(Error):
def __init__(self, message):
self.message = message
available_kind = [
'stock',
'forex',
'economic',
'crypto',
'indices'
]
names_dict = {
'c': 'close',
'h': 'high',
'l': 'low',
'o': 'open',
'v': 'volume',
'pc': 'previous_close',
't': 'datetime'
}
def _check_resolution(resolution):
if not str(resolution).upper() in [
"1", "5", "15", "30", "60",
"D", "W", "M"
]:
print('Resolution must be one of 1, 5, 15, 30, 60, D, W, M')
return False
else:
return True
def _rename_candle_columns(df):
return df.rename(columns=names_dict)
def _rename_quote(quotes):
quotes['t'] = | to_datetime(quotes['t'], unit='s', utc=True) | pandas.to_datetime |
import numpy as np
import pandas as pd
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
import scipy.sparse as sp
# Config
relation2id_path = '../data/relation2id.csv'
kg_path = '../data/kg_triplet.csv'
human_sl_path = '../data/sl_data'
kg_save = '../data/kg2id.txt'
sl_save = '../data/sl2id.txt'
entity_save = '../data/entity2id.txt'
# -----------------------------------------------------Begin-------------------------------------------
relation2id = pd.read_csv(relation2id_path)
human_SL = | pd.read_csv(human_sl_path, sep=' ') | pandas.read_csv |
import itertools
import traceback
import uuid
from functools import partial, reduce
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from pdb import set_trace as st
import numpy as np
import pandas as pd
import os
import tensorflow as tf
from nninst_graph import AttrMap, Graph, GraphAttrKey
import nninst_mode as mode
from dataset import cifar10
from dataset.mnist_transforms import *
from dataset.config import MNIST_PATH, CIFAR10_PATH
# from nninst.backend.tensorflow.dataset import imagenet, imagenet_raw
# from nninst.backend.tensorflow.dataset.imagenet_hierarchy import imagenet_class_tree
# from nninst.backend.tensorflow.dataset.imagenet_preprocessing import (
# alexnet_preprocess_image,
# )
from tf_graph import (
MaskWeightWithTraceHook,
model_fn_with_fetch_hook,
)
from model import LeNet
from model.resnet18cifar10 import ResNet18Cifar10
from model.resnet10cifar10 import ResNet10Cifar10
# from nninst.backend.tensorflow.model import AlexNet, LeNet, ResNet50
from model.config import ModelConfig
# from nninst.backend.tensorflow.model.config import (
# ALEXNET,
# RESNET_50,
# VGG_16,
# ModelConfig,
# )
from trace.common import (
get_predicted_value,
get_rank,
predict,
reconstruct_class_trace_from_tf,
reconstruct_trace_from_tf,
reconstruct_trace_from_tf_brute_force,
)
from trace.common import (
reconstruct_stat_from_tf,
reconstruct_trace_from_tf_v2,
)
# from nninst.dataset.envs import IMAGENET_RAW_DIR
from nninst_op import Conv2dOp
from nninst_path import (
get_trace_path_in_fc_layers,
get_trace_path_intersection_in_fc_layers,
)
from nninst_statistics import (
calc_trace_path_num,
calc_trace_size,
calc_trace_size_per_layer,
)
from nninst_trace import (
TraceKey,
compact_edge,
compact_trace,
merge_compact_trace,
merge_compact_trace_diff,
merge_compact_trace_intersect,
)
from nninst_utils import filter_value_not_null, merge_dict
from nninst_utils.fs import CsvIOAction, ImageIOAction, IOAction, abspath
from nninst_utils.numpy import arg_approx, arg_sorted_topk
from nninst_utils.ray import ray_iter
__all__ = [
"clean_overlap_ratio",
"overlap_ratio",
"get_overlay_summary",
"resnet_50_imagenet_overlap_ratio",
"alexnet_imagenet_overlap_ratio",
"resnet_50_imagenet_overlap_ratio_error",
"get_overlay_summary_one_side",
"resnet_50_imagenet_overlap_ratio_rand",
"alexnet_imagenet_overlap_ratio_top5",
"resnet_50_imagenet_overlap_ratio_top5_rand",
"resnet_50_imagenet_overlap_ratio_top5",
"alexnet_imagenet_overlap_ratio_error",
"alexnet_imagenet_overlap_ratio_rand",
"alexnet_imagenet_overlap_ratio_top5_rand",
"alexnet_imagenet_overlap_ratio_top5_diff",
]
def calc_all_overlap(
class_trace: AttrMap,
trace: AttrMap,
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
node_name: str = None,
compact: bool = False,
use_intersect_size: bool = False,
key: str = TraceKey.EDGE,
) -> Dict[str, float]:
if node_name is None:
if use_intersect_size:
overlap_ratio, intersect_size = overlap_fn(
class_trace, trace, key, return_size=True
)
return {key + "_size": intersect_size, key: overlap_ratio}
else:
return {
**{
key + "_size": calc_trace_size(trace, key, compact=compact)
for key in [
TraceKey.EDGE,
TraceKey.POINT,
TraceKey.WEIGHT
]
},
**{
key: overlap_fn(class_trace, trace, key)
for key in [
TraceKey.EDGE,
TraceKey.POINT,
TraceKey.WEIGHT
]
},
}
else:
all_overlap = {
key: overlap_fn(class_trace, trace, key, node_name)
for key in [
TraceKey.EDGE,
TraceKey.POINT,
TraceKey.WEIGHT
]
}
for key in [
TraceKey.EDGE,
TraceKey.POINT,
TraceKey.WEIGHT
]:
if node_name in trace.ops:
node_trace = trace.ops[node_name]
if key in node_trace:
if compact:
all_overlap[key + "_size"] = np.count_nonzero(
np.unpackbits(node_trace[key])
)
else:
all_overlap[key + "_size"] = TraceKey.to_array(
node_trace[key]
).size
return all_overlap
# Compute mnist overlap ratio between the traces of clean test images and class traces
def clean_overlap_ratio(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
num_gpus:float = 0.2,
images_per_class: int = 1,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = abspath(MNIST_PATH)
model_dir = abspath("result/lenet/model_dropout")
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=model_dir,
)
# print(class_id, predicted_label)
# st()
if predicted_label != class_id:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = {
"image_id": image_id,
**map_prefix(
calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
),
"original",
),
}
# st()
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, images_per_class)
for class_id in range(0, 10)
),
chunksize=1,
out_of_order=True,
num_gpus=0.2,
)
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
# Compute transformed (translation, rotation and scale)
# mnist overlap ratio between the traces of clean test images and class traces
def translation_overlap_ratio(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
images_per_class: int = 1,
transforms=None,
name = None,
num_gpus = 0.2,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = abspath(MNIST_PATH)
model_dir = abspath("result/lenet/model_augmentation")
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
# Check the prediction on clean untransformed image, so don't need
# transform
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=model_dir,
)
# print(class_id, predicted_label)
# st()
if predicted_label != class_id:
return [{}] if per_node else {}
# Reconstruct regardless of the correctness of prediction
trace = reconstruct_trace_from_tf_brute_force(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir, transforms=transforms)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
)
# row = {
# "image_id": image_id,
# **map_prefix(
# calc_all_overlap(
# class_trace_fn(class_id).load(), trace, overlap_fn
# ),
# "original",
# ),
# }
# st()
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
# for image_id in range(0, images_per_class)
for image_id in range(0, images_per_class)
for class_id in range(0, 10)
),
chunksize=1,
out_of_order=True,
num_gpus=num_gpus,
)
traces = [trace for trace in traces if len(trace) != 0]
acc = len(traces) / (images_per_class * 10)
traces = pd.DataFrame(traces).mean()
traces.loc['accuracy'] = acc
traces = traces.to_frame()
traces.columns = [name]
return traces
return CsvIOAction(path, init_fn=get_overlap_ratio)
# Compute the mean overlap ratio of attacked image
def attack_overlap_ratio(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
images_per_class: int = 1,
num_gpus: float = 0.2,
model_dir = "result/lenet/model_augmentation",
transforms = None,
transform_name = "noop",
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
nonlocal model_dir
mode.check(False)
data_dir = abspath(MNIST_PATH)
model_dir = abspath(model_dir)
ckpt_dir = f"{model_dir}/ckpts"
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook,
create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=ckpt_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
adversarial_example = lenet_mnist_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
# model_dir not ckpt_dir
model_dir=model_dir,
transforms = transforms,
transform_name = transform_name,
mode = "test",
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
model_dir=ckpt_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir, transforms=transforms)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=ckpt_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
adversarial_trace = reconstruct_trace_from_tf_brute_force(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=ckpt_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = {
"image_id": image_id,
"class_id": class_id,
**map_prefix(
calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
),
"original",
),
**map_prefix(
calc_all_overlap(
class_trace_fn(adversarial_label).load(),
adversarial_trace,
overlap_fn,
),
"adversarial",
),
}
# row = calc_all_overlap(
# class_trace_fn(class_id).load(), adversarial_trace, overlap_fn
# )
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, images_per_class)
for class_id in range(0, 10)
),
# ((-1, image_id) for image_id in range(mnist_info.test().size)),
chunksize=1,
out_of_order=True,
num_gpus=num_gpus,
)
traces = [trace for trace in traces if len(trace) != 0]
# acc = len(traces) / (images_per_class * 10)
# traces = pd.DataFrame(traces).mean()
# traces.loc['clean_accuracy'] = acc
# traces = traces.to_frame()
# traces.columns = [attack_name]
# return traces
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def lenet_mnist_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
model_dir: str,
mode: str ,
transform_name: str = "noop",
transforms: Transforms = None,
**kwargs,
) -> IOAction[np.ndarray]:
def get_example() -> np.ndarray:
data_dir = abspath(MNIST_PATH)
ckpt_dir = f"{model_dir}/ckpts"
ckpt_dir = abspath(ckpt_dir)
create_model = lambda: LeNet(data_format="channels_first")
if mode == "test":
dataset = mnist.test
elif mode == "train":
dataset = mnist.train
else:
raise RuntimeError("Dataset invalid")
input = dataset(data_dir,
normed=False,
transforms=transforms,
)
# st()
# input = input.filter(lambda image, label: tf.equal(tf.convert_to_tensor(class_id, dtype=tf.int32), label))
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: dataset(data_dir,
normed=False,
transforms=transforms,
)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=ckpt_dir,
**kwargs,
)
return adversarial_example
name = f"{attack_name}_{transform_name}"
result_dir = f"{model_dir}/attack/{mode}/{name}/{class_id}"
path = os.path.join(result_dir, f"{image_id}.pkl")
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def resnet18_cifar10_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
model_dir: str,
dataset_mode: str ,
transform_name: str = "noop",
transforms: Transforms = None,
**kwargs,
) -> IOAction[np.ndarray]:
def get_one_input_from_dataset(dataset):
input = (dataset
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0]
)
return input
def get_example() -> np.ndarray:
data_dir = abspath(CIFAR10_PATH)
ckpt_dir = f"{model_dir}/ckpts"
ckpt_dir = abspath(ckpt_dir)
# create_model = lambda: LeNet(data_format="channels_first")
create_model = lambda: partial(
ResNet18Cifar10(),
training = False,
)
from dataset.cifar10_main import input_fn_for_adversarial_examples
# dataset = input_fn_for_adversarial_examples(
# is_training= False,
# data_dir=data_dir,
# num_parallel_batches=1,
# is_shuffle=False,
# transform_fn=None,
# )
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: get_one_input_from_dataset(
# dataset
# ),
# attack_fn=attack_fn,
# model_dir=ckpt_dir,
# **kwargs,
# )
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: (
input_fn_for_adversarial_examples(
is_training= False,
data_dir=data_dir,
num_parallel_batches=1,
is_shuffle=False,
transform_fn=None,
)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0]
),
attack_fn=attack_fn,
model_dir=ckpt_dir,
**kwargs,
)
return adversarial_example
name = f"{attack_name}_{transform_name}"
result_dir = f"{model_dir}/attack/{dataset_mode}/{name}/{class_id}"
path = os.path.join(result_dir, f"{image_id}.pkl")
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def resnet10_cifar10_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
model_dir: str,
dataset_mode: str ,
transform_name: str = "noop",
transforms: Transforms = None,
**kwargs,
) -> IOAction[np.ndarray]:
def get_one_input_from_dataset(dataset):
input = (dataset
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0]
)
return input
def get_example() -> np.ndarray:
data_dir = abspath(CIFAR10_PATH)
ckpt_dir = f"{model_dir}/ckpts"
ckpt_dir = abspath(ckpt_dir)
# create_model = lambda: LeNet(data_format="channels_first")
create_model = lambda: partial(
ResNet10Cifar10(),
training = False,
)
from dataset.cifar10_main import input_fn_for_adversarial_examples
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: (
input_fn_for_adversarial_examples(
is_training= (dataset_mode=="train"),
data_dir=data_dir,
num_parallel_batches=1,
is_shuffle=False,
transform_fn=None,
)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0]
),
attack_fn=attack_fn,
model_dir=ckpt_dir,
**kwargs,
)
return adversarial_example
name = f"{attack_name}"
result_dir = f"{model_dir}/attack/{dataset_mode}/{name}/{class_id}"
path = os.path.join(result_dir, f"{image_id}.pkl")
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def adversarial_example_image(
example_io: IOAction[np.ndarray], cache: bool = True
) -> IOAction[np.ndarray]:
def get_example() -> np.ndarray:
example = example_io.load()
if example is None:
return None
return (np.squeeze(example, axis=0) * 255).astype(np.uint8)
path = example_io.path.replace(".pkl", ".png")
return ImageIOAction(path, init_fn=get_example, cache=cache)
def generate_examples(
example_fn: Callable[..., IOAction[np.ndarray]],
class_ids: Iterable[int],
image_ids: Iterable[int],
attack_name: str,
transform_name: str = "noop",
transforms = None,
cache: bool = True,
num_gpus=0.2,
**kwargs,
):
def generate_examples_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
example_io = example_fn(
attack_name=attack_name,
class_id=class_id,
image_id=image_id,
cache=cache,
transforms = transforms,
transform_name = transform_name,
**kwargs,
)
example_io.save()
adversarial_example_image(example_io, cache=cache).save()
return class_id, image_id
except Exception:
return class_id, image_id, traceback.format_exc()
name = f"{attack_name}_{transform_name}"
print(f"begin {name}, num_gpu={num_gpus}")
if len(image_ids) > 99:
chunksize = 4
else:
chunksize = 1
results = ray_iter(
generate_examples_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=chunksize,
out_of_order=True,
num_gpus=num_gpus,
# huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
# print(f"finish class {class_id} image {image_id}")
print(f"finish {name}")
def get_overlay_summary(
overlap_ratios: pd.DataFrame, trace_key: str, threshold=1
) -> Dict[str, int]:
condition_positive = len(overlap_ratios)
if condition_positive == 0:
return {}
original_key = f"original.{trace_key}"
false_positive = np.count_nonzero(overlap_ratios[original_key] < threshold)
adversarial_key = f"adversarial.{trace_key}"
true_positive = np.count_nonzero(overlap_ratios[adversarial_key] < threshold)
predicted_condition_positive = true_positive + false_positive
recall = (true_positive / condition_positive) if condition_positive != 0 else 0
precision = (
(true_positive / predicted_condition_positive)
if predicted_condition_positive != 0
else 0
)
f1 = (2 / ((1 / recall) + (1 / precision))) if recall != 0 and precision != 0 else 0
return dict(
threshold=threshold,
condition_positive=condition_positive,
# predicted_condition_positive=predicted_condition_positive,
original_is_higher=np.count_nonzero(
(overlap_ratios[original_key] - overlap_ratios[adversarial_key]) > 0
),
# adversarial_is_higher=np.count_nonzero(
# (overlap_ratios[adversarial_key] - overlap_ratios[original_key]) > 0),
true_positive=true_positive,
false_positive=false_positive,
recall=recall,
precision=precision,
f1=f1,
)
def overlap_ratio(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = abspath("/home/yxqiu/data/mnist/raw")
model_dir = abspath("tf/lenet/model_early")
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=model_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: mnist.test(data_dir, normed=False)
# .filter(lambda image, label:
# tf.equal(
# tf.convert_to_tensor(class_id, dtype=tf.int32),
# label))
# .skip(image_id).take(1).batch(1)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = lenet_mnist_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
# class_id = mnist_info.test().label(image_id)
#
# if class_id != trace.attrs[GraphAttrKey.PREDICT]:
# return [{}] if per_node else {}
if trace is None:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: mnist.test(data_dir, normed=False)
# .filter(lambda image, label:
# tf.equal(
# tf.convert_to_tensor(class_id, dtype=tf.int32),
# label))
# .skip(image_id).take(1).batch(1)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
#
# if adversarial_example is None:
# return [{}] if per_node else {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
if class_id != adversarial_label:
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = {
"image_id": image_id,
**map_prefix(
calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
),
"original",
),
**map_prefix(
calc_all_overlap(
class_trace_fn(adversarial_label).load(),
adversarial_trace,
overlap_fn,
),
"adversarial",
),
}
return row
else:
return {}
# traces = ray_iter(get_row, (image_id for image_id in range(300, 350)),
# traces = ray_iter(get_row, (image_id for image_id in range(131, 300)),
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 100)
for class_id in range(0, 10)
),
# ((-1, image_id) for image_id in range(mnist_info.test().size)),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
# chunksize=1, out_of_order=False, num_gpus=1)
# count = 0
# result = []
# for trace in traces:
# result.append(trace)
# print(count)
# count += 1
# traces = [trace for trace in result if len(trace) != 0]
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_class_trace_from_tf(
class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
model_dir=model_dir,
select_fn=select_fn,
per_channel=per_channel,
)
if trace is None:
return {}
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir, class_id, image_id, normed=False
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
if class_id != adversarial_label:
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = class_trace_fn(class_id).load()
adversarial_class_trace = class_trace_fn(adversarial_label).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"node_name": node_name,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
row[f"original.{TraceKey.WEIGHT}"] is not None
or row[f"original.{TraceKey.EDGE}"] is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
# traces = ray_iter(get_row, (image_id for image_id in range(300, 350)),
# traces = ray_iter(get_row, (image_id for image_id in range(131, 300)),
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
# for image_id in range(0, 50)
for class_id in range(1, 1001)
),
# for class_id in range(1, 2)),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
# chunksize=1, out_of_order=False, num_gpus=1)
# count = 0
# result = []
# for trace in traces:
# result.append(trace)
# print(count)
# count += 1
# traces = [trace for trace in result if len(trace) != 0]
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_top5(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
if trace is None:
return {}
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir, class_id, image_id, normed=False
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
if adversarial_label not in label_top5:
# if np.intersect1d(label_top5, adversarial_label_top5).size == 0:
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = merge_compact_trace(
*[class_trace_fn(label).load() for label in label_top5]
)
adversarial_class_trace = merge_compact_trace(
*[class_trace_fn(label).load() for label in adversarial_label_top5]
)
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"node_name": node_name,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
row[f"original.{TraceKey.WEIGHT}"] is not None
or row[f"original.{TraceKey.EDGE}"] is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return | pd.DataFrame(traces) | pandas.DataFrame |
# Copyright 2021 The FirstOrderLp Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates all the experimental results used in the paper.
# It requires python 3, numpy, pandas, and matplotlib installed to run.
#
# `python analyze_csv_data.py`
#
# It reads csv files containing experimental results from ./csv, and outputs
# pdf figures to ./results/figs and latex tables to ./results/tex.
import itertools
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from cycler import cycler
plt.rcParams.update({"figure.max_open_warning": 0, "font.size": 16})
# The 'TkAgg' matplotlib backend fails at the default recursion limit.
sys.setrecursionlimit(10000)
# This is required to generate plots that are easy to read when printed:
plt.rcParams["axes.prop_cycle"] = cycler(
linestyle=["-", "--", ":", "-.", "-", "--", ":", "-.", "-", "--"],
color=[
"#1f77b4",
"#ff7f0e",
"#2ca02c",
"#d62728",
"#9467bd",
"#8c564b",
"#e377c2",
"#7f7f7f",
"#bcbd22",
"#17becf",
],
)
# directory where the csv files are located
CSV_DIR = "./csv"
# directory where all the figure pdf and table tex files are written to:
OUTPUT_DIR = "./results"
FIGS_DIR = os.path.join(OUTPUT_DIR, "figs")
TEX_DIR = os.path.join(OUTPUT_DIR, "tex")
OPT = "TERMINATION_REASON_OPTIMAL"
KKT_PASSES_LIMIT = 1e5
TIME_LIMIT_SECS = 60 * 60 # 1hr
TIME_LIMIT_SECS_ABLATION = 6 * 60 * 60 # 6hr
# shift to use for shifted geometric mean
SGM_SHIFT = int(10)
# penalised average runtime:
PAR = 1.0 # can be None, which removes unsolved instead of penalizing
# Which scaling experiments to present
SCALING_EXPS_TO_USE = [
"off,off",
"off,pock_chambolle alpha=1",
"10 rounds,off",
"10 rounds,pock_chambolle alpha=1",
]
# Which primal-weight experiments to present
PRIMALWEIGHT_EXPS_TO_USE = [
"adaptive",
#'Fixed 1e-0',
]
# placeholder:
_BEST_STR = "_best_str_"
_BEST_FIXED = "_best_fixed_"
# Dataset names:
MITTELMANN_STR = "lp_benchmark"
MIPLIB_STR = "mip_relaxations"
NETLIB_STR = "netlib"
# Change table font size to fit paper:
LATEX_FONT_SIZE = "\\small"
# Naming for improvements plots:
_PDHG = "PDHG"
_RESTARTS = "+restarts"
_SCALING = "+scaling"
_PRIMAL_WEIGHT = "+primal\nweight"
_STEPSIZE = "+step\nsize"
_PRESOLVE = "+presolve\n(= PDLP)"
# Order in which improvements should appear:
IMPROVEMENTS_ORDER = [
_PDHG,
_RESTARTS,
_SCALING,
_PRIMAL_WEIGHT,
_STEPSIZE,
_PRESOLVE,
]
IMPROVEMENTS_ORDER_IDX = dict(
zip(IMPROVEMENTS_ORDER, range(len(IMPROVEMENTS_ORDER)))
)
# Horrible HACK, but needs to be done
def label_lookup(label):
if "pdhg_enhanced" in label:
return "PDLP"
if "mirror-prox" in label:
return "Enh. Extragradient"
if "pdhg_vanilla" in label:
return "PDHG"
if "scs-indirect" in label:
return "SCS (matrix-free)"
if "scs-direct" in label:
return "SCS"
if "nopresolve" in label:
return "No presolve"
if "no restarts" in label:
return "No restart"
if "adaptive theoretical" in label:
return "Adaptive restart (theory)"
if "adaptive enhanced" in label:
return "PDLP"
if "pdhg" in label and "pdhg_mp_1h" in label:
return "PDLP"
if "off,off" in label:
return "No scaling"
if "off,pock_chambolle alpha=1" in label:
return "Pock-Chambolle"
if "10 rounds,off" in label:
return "Ruiz"
if "10 rounds,pock_chambolle alpha=1" in label:
return "Ruiz + Pock-Chambolle"
if "stepsize" in label:
if "adaptive" in label:
return "PDLP"
if "fixed" in label:
return "Fixed step-size"
if "scaling" in label:
if _BEST_STR in label:
return "Best per-instance scaling"
if "primalweight" in label:
if "adaptive" in label:
return "PDLP"
if "Fixed 1e-0" in label:
return r"Fixed PW ($\theta=0$)"
if _BEST_STR in label:
return "Best per-instance PW"
if _BEST_FIXED in label:
return "Best fixed PW"
if "improvements" in label:
if "vanilla" in label:
return _PDHG
st = ""
if "restarts" in label:
st = _RESTARTS
if "scaling" in label:
st = _SCALING
if "primal weight" in label:
st = _PRIMAL_WEIGHT
if "step size" in label:
st = _STEPSIZE
if "pdlp_final" in label:
st = _PRESOLVE
return st
if "malitskypock" in label:
if _BEST_STR in label:
return "Best per-instance MP settings"
return "Best fixed MP setting"
return label
def sanitize_title(title):
title = title.replace("_", " ").title()
title = title.replace("Lp", "LP")
title = title.replace("Mip", "MIP")
title = title.replace("Pdlp", "PDLP")
title = title.replace("Pdhg", "PDHG")
title = title.replace("Scs", "SCS")
title = title.replace("Sgm", "SGM")
return title
# Generate plots of xaxis vs fraction of solved problems
def solved_problems_vs_xaxis_figs(
dfs, xaxis, xlabel, prefix, num_instances, legend_location="best", xmin=0.0
):
fig = plt.figure()
stats_dfs = {}
for k, df_k in dfs.items():
stats_df = (
df_k.groupby(xaxis)[xaxis]
.agg("count")
.pipe(pd.DataFrame)
.rename(columns={xaxis: "frequency"})
)
stats_df["cum_solved_count"] = (
stats_df["frequency"].cumsum() / num_instances
)
stats_df = stats_df.drop(columns="frequency").reset_index()
stats_dfs[k] = stats_df
max_xaxis = pd.concat(stats_dfs)[xaxis].max()
lines = []
labels = []
for k, df_k in stats_dfs.items():
if df_k.empty:
continue
df_k = df_k.append(
{
xaxis: max_xaxis,
"cum_solved_count": df_k.iloc[-1]["cum_solved_count"],
},
ignore_index=True,
)
df_k.reset_index()
label = label_lookup(k)
lines.extend(
plt.plot(df_k[xaxis], df_k["cum_solved_count"], label=label)
)
labels.append(label)
plt.ylabel("Fraction of problems solved")
plt.xlabel(xlabel)
plt.ylim((0, 1))
plt.ticklabel_format(axis="x", style="sci", scilimits=(0, 0))
plt.title(sanitize_title(prefix))
plt.xscale("log")
plt.xlim(left=xmin)
if legend_location == "outer":
plt.legend(bbox_to_anchor=(1.04, 0.5), loc="center left")
elif legend_location == "separate":
figlegend = plt.figure()
figlegend.legend(lines, labels, ncol=len(lines), loc="center")
legendpath = os.path.join(
FIGS_DIR, f"{prefix}_{xaxis}_v_solved_probs_legend.pdf"
)
figlegend.savefig(legendpath, bbox_inches="tight")
else:
plt.legend(loc=legend_location)
path = os.path.join(FIGS_DIR, f"{prefix}_{xaxis}_v_solved_probs.pdf")
fig.savefig(path, bbox_inches="tight")
def gen_solved_problems_plots(
df, prefix, num_instances, legend_location="best"
):
exps = df["experiment_label"].unique()
dfs = {k: df[df["experiment_label"] == k] for k in exps}
optimal_dfs = {
k: v[v["termination_reason"] == OPT] for (k, v) in dfs.items()
}
solved_problems_vs_xaxis_figs(
optimal_dfs,
"cumulative_kkt_matrix_passes",
f"KKT matrix passes SGM{SGM_SHIFT}",
prefix,
num_instances,
legend_location,
xmin=100,
)
solved_problems_vs_xaxis_figs(
optimal_dfs,
"solve_time_sec",
"Wall-clock time (secs)",
prefix,
num_instances,
legend_location,
xmin=1.0,
)
def gen_solved_problems_plots_split_tol(
df, prefix, num_instances, legend_location="best"
):
tols = df["tolerance"].unique()
for t in tols:
gen_solved_problems_plots(
df[df["tolerance"] == t],
prefix + f"_tol_{t:.0E}",
num_instances,
legend_location,
)
def shifted_geomean(x, shift):
x = x[~np.isnan(x)]
sgm = np.exp(np.sum(np.log(x + shift) / len(x))) - shift
return sgm if sgm > 0 else np.nan
def change_table_font_size(table):
table = table.replace(
"\\begin{table}\n", "\\begin{table}\n" + LATEX_FONT_SIZE + "\n"
)
table = table.replace("\\caption{", "\\caption{" + LATEX_FONT_SIZE + " ")
return table
def gen_total_solved_problems_table(
df, prefix, par, time_limit=TIME_LIMIT_SECS
):
solved_probs = (
df[df["termination_reason"] == OPT]
.groupby("experiment_label")["experiment_label"]
.agg("count")
.pipe(pd.DataFrame)
.rename(columns={"experiment_label": "Solved count"})
)
solved_probs.index.name = "Experiment"
solved_probs = solved_probs.reset_index()
shift = SGM_SHIFT
kkt_sgm = df.copy()
if par is not None:
kkt_sgm.loc[
kkt_sgm["termination_reason"] != OPT, "cumulative_kkt_matrix_passes"
] = (par * KKT_PASSES_LIMIT)
else:
kkt_sgm.loc[
kkt_sgm["termination_reason"] != OPT, "cumulative_kkt_matrix_passes"
] = np.nan
# Hack for SCS direct
kkt_sgm.loc[
kkt_sgm["experiment_label"].str.contains("scs-direct"),
"cumulative_kkt_matrix_passes",
] = np.nan
kkt_sgm = (
kkt_sgm.groupby("experiment_label")["cumulative_kkt_matrix_passes"]
.agg(lambda _: shifted_geomean(_, shift))
.pipe(pd.DataFrame)
.rename(
columns={"cumulative_kkt_matrix_passes": f"KKT passes SGM{shift}"}
)
)
kkt_sgm.index.name = "Experiment"
kkt_sgm = kkt_sgm.reset_index()
wall_clock = df.copy()
if par is not None:
wall_clock.loc[
wall_clock["termination_reason"] != OPT, "solve_time_sec"
] = (par * time_limit)
else:
wall_clock.loc[
wall_clock["termination_reason"] != OPT, "solve_time_sec"
] = np.nan
wall_clock = (
wall_clock.groupby("experiment_label")["solve_time_sec"]
.agg(lambda _: shifted_geomean(_, shift))
.pipe(pd.DataFrame)
.rename(columns={"solve_time_sec": f"Solve time secs SGM10"})
)
wall_clock.index.name = "Experiment"
wall_clock = wall_clock.reset_index()
output = solved_probs.merge(kkt_sgm).merge(wall_clock)
# rename the labels
for e in output["Experiment"]:
output.loc[output["Experiment"] == e, "Experiment"] = label_lookup(e)
output = output.sort_values("Solved count", ascending=True)
# HACK to fix improvements table ordering and line break
if "improvements" in prefix:
output["rank"] = output["Experiment"].map(IMPROVEMENTS_ORDER_IDX)
output.sort_values("rank", inplace=True)
output.drop(labels="rank", axis=1, inplace=True)
to_write = output.copy()
for e in to_write["Experiment"]:
to_write.loc[to_write["Experiment"] == e, "Experiment"] = e.replace(
"\n", " "
)
else:
to_write = output
table = to_write.to_latex(
float_format="%.1f",
longtable=False,
index=False,
caption=f"Performance statistics: {sanitize_title(prefix)}",
label=f"t:solved-probs-{prefix}",
column_format="lccc",
escape=False,
na_rep="-",
)
table = change_table_font_size(table)
path = os.path.join(TEX_DIR, f"{prefix}_solved_probs_table.tex")
with open(path, "w") as f:
f.write(table)
return output
def gen_total_solved_problems_table_split_tol(
df, prefix, par, time_limit=TIME_LIMIT_SECS
):
outputs = {}
tols = df["tolerance"].unique()
for t in tols:
outputs[t] = gen_total_solved_problems_table(
df[df["tolerance"] == t], prefix + f"_tol_{t:.0E}", par, time_limit
)
return outputs
def plot_loghist(x, nbins):
x = x[~np.isnan(x)]
hist, bins = np.histogram(x, bins=nbins)
logbins = np.logspace(
np.log10(max(bins[0], 1e-10)), np.log10(max(bins[-1], 1e-10)), nbins
)
plt.hist(x, bins=logbins)
plt.xscale("log")
def gen_ratio_histograms_split_tol(df, prefix, par):
tols = df["tolerance"].unique()
for t in tols:
gen_ratio_histograms(
df[df["tolerance"] == t],
prefix + f"_tol_{t:.0E}",
"cumulative_kkt_matrix_passes",
f"KKT matrix passes SGM{SGM_SHIFT}",
KKT_PASSES_LIMIT,
par,
)
gen_ratio_histograms(
df[df["tolerance"] == t],
prefix + f"_tol_{t:.0E}",
"solve_time_sec",
"Wall-clock time (secs)",
TIME_LIMIT_SECS,
par,
)
def gen_ratio_histograms(df, prefix, xaxis, xlabel, limit, par):
assert len(df["experiment_label"].unique()) == 2
(l0, l1) = df["experiment_label"].unique()
def performance_ratio_fn(df, par):
df = df.reset_index()
assert len(df) <= 2
df0 = df[df["experiment_label"] == l0]
df1 = df[df["experiment_label"] == l1]
instance = df.instance_name.unique()
if len(df0) == 1 and df0["termination_reason"].iloc[0] == OPT:
kkt_passes_0 = df0[xaxis].iloc[0]
else:
kkt_passes_0 = par * limit
if len(df1) == 1 and df1["termination_reason"].iloc[0] == OPT:
kkt_passes_1 = df1[xaxis].iloc[0]
else:
kkt_passes_1 = par * limit
# if (df['termination_reason'] != OPT).any():
# return np.nan
return kkt_passes_0 / kkt_passes_1
ratios = (
df.groupby(["instance_name"])
.apply(lambda _: performance_ratio_fn(_, par))
.reset_index(name="ratio")
.dropna()
)
nbins = min(len(ratios) // 3, 25)
if nbins > 0:
plt.figure(figsize=(10, 6))
plt.title(
sanitize_title(
f"{prefix} {xlabel} {label_lookup(l0)}:{label_lookup(l1)}"
)
)
plot_loghist(ratios["ratio"], nbins)
path = os.path.join(
FIGS_DIR,
f"{prefix}_{label_lookup(l0)}_{label_lookup(l1)}"
+ f"_{xaxis}_performance_ratio.pdf",
)
plt.savefig(path)
table = ratios.to_latex(
float_format="%.2f",
longtable=False,
index=False,
caption=f"Performance ratio.",
label=f"t:ratio-{prefix}",
column_format="lc",
na_rep="-",
)
table = change_table_font_size(table)
path = os.path.join(
TEX_DIR,
f"{prefix}_{label_lookup(l0)}:"
f"{label_lookup(l1)}_{xaxis}_ratio_table.tex",
)
with open(path, "w") as f:
f.write(table)
# Unsolved problems might be missing from csv, make sure all are accounted for.
def fill_in_missing_problems(df, instances_list):
new_index = pd.Index(instances_list, name="instance_name")
experiments = df["experiment_label"].unique()
dfs = []
for e in experiments:
old_df = df[df["experiment_label"] == e]
tol = old_df["tolerance"].unique()[0]
new_df = (
old_df.set_index("instance_name").reindex(new_index).reset_index()
)
# otherwise these would be nan
new_df["tolerance"] = tol
new_df["experiment_label"] = e
dfs.append(new_df)
return pd.concat(dfs)
def improvements_plot(dfs, prefix, key, ascending):
normalized_dfs = []
for df in dfs:
df[key] /= df[df["Experiment"] == "PDHG"][key].to_numpy()[0]
normalized_dfs.append(df)
df = pd.concat(normalized_dfs)
fig = plt.figure(figsize=(10, 6))
markers = itertools.cycle(["o", "v", "^", "<", ">", "s"])
for tol in df["tolerance"].unique():
_df = df[df["tolerance"] == tol].reset_index(drop=True)
plt.plot(
_df[key].to_numpy(),
linestyle="--",
marker=next(markers),
markersize=12,
label=f"tolerance {tol:.0E}",
)
plt.yscale("log")
plt.ylabel("Normalized " + key, fontsize=20)
plt.title(sanitize_title(prefix), fontsize=20)
plt.xticks(range(len(_df["Experiment"])), _df["Experiment"].to_list())
plt.tick_params(axis="both", which="both", labelsize=20)
ax = plt.gca()
ax.yaxis.set_major_locator(ticker.LogLocator(subs=[1, 2, 3, 5, 7]))
ax.yaxis.set_major_formatter(
ticker.LogFormatterSciNotation(
labelOnlyBase=False, minor_thresholds=(4, 2)
)
)
# ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%.2f")
if len(dfs) > 1:
plt.legend(loc="best", prop={"size": 20})
name = key.replace(" ", "_")
path = os.path.join(FIGS_DIR, f"{prefix}_{name}.pdf")
plt.savefig(path, bbox_inches="tight")
def gen_all_improvement_plots(outputs, prefix):
dfs = []
for tol, df in outputs.items():
df = df.copy()
df["tolerance"] = tol
dfs.append(df)
improvements_plot(dfs, prefix, "KKT passes SGM10", ascending=False)
improvements_plot(dfs, prefix, "Solve time secs SGM10", ascending=False)
improvements_plot(dfs, prefix, "Solved count", ascending=True)
# First, make output directories
if not os.path.exists(FIGS_DIR):
os.makedirs(FIGS_DIR)
if not os.path.exists(TEX_DIR):
os.makedirs(TEX_DIR)
# Get clean list of all problems we tested on:
with open("../benchmarking/mip_relaxations_instance_list") as f:
miplib_instances = f.readlines()
miplib_instances = [p.strip() for p in miplib_instances if p[0] != "#"]
with open("../benchmarking/lp_benchmark_instance_list") as f:
mittelmann_instances = f.readlines()
mittelmann_instances = [p.strip() for p in mittelmann_instances if p[0] != "#"]
with open("../benchmarking/netlib_benchmark_instance_list") as f:
netlib_instances = f.readlines()
netlib_instances = [p.strip() for p in netlib_instances if p[0] != "#"]
# Pull out 'default' (ie best) pdhg implementation to compare against:
df_default = pd.read_csv(os.path.join(CSV_DIR, "miplib_pdhg_enhanced_100k.csv"))
df_default = fill_in_missing_problems(df_default, miplib_instances)
######################################################################
# PDLP pdhg vs vanilla pdhg (JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, "miplib_pdhg_vanilla_100k.csv"))
df = fill_in_missing_problems(df, miplib_instances)
df = pd.concat((df_default, df))
gen_solved_problems_plots_split_tol(df, f"{MIPLIB_STR}", len(miplib_instances))
gen_total_solved_problems_table_split_tol(df, f"{MIPLIB_STR}", PAR)
gen_ratio_histograms_split_tol(df, f"{MIPLIB_STR}", PAR)
######################################################################
df = pd.read_csv(os.path.join(CSV_DIR, "mittelmann_pdhg_enhanced_100k.csv"))
df = fill_in_missing_problems(df, mittelmann_instances)
df_vanilla = pd.read_csv(
os.path.join(CSV_DIR, "mittelmann_improvements_100k.csv")
)
df_vanilla = df_vanilla[df_vanilla["enhancements"] == "vanilla"]
df_vanilla = fill_in_missing_problems(df_vanilla, mittelmann_instances)
df = pd.concat((df, df_vanilla))
gen_solved_problems_plots_split_tol(
df, f"{MITTELMANN_STR}", len(mittelmann_instances)
)
gen_total_solved_problems_table_split_tol(df, f"{MITTELMANN_STR}", PAR)
gen_ratio_histograms_split_tol(df, f"{MITTELMANN_STR}", PAR)
######################################################################
df = pd.read_csv(os.path.join(CSV_DIR, "netlib_pdhg_enhanced_100k.csv"))
df = fill_in_missing_problems(df, netlib_instances)
df_vanilla = pd.read_csv(os.path.join(CSV_DIR, "netlib_improvements_100k.csv"))
df_vanilla = df_vanilla[df_vanilla["enhancements"] == "vanilla"]
df_vanilla = fill_in_missing_problems(df_vanilla, netlib_instances)
df = pd.concat((df, df_vanilla))
gen_solved_problems_plots_split_tol(df, f"{NETLIB_STR}", len(netlib_instances))
gen_total_solved_problems_table_split_tol(df, f"{NETLIB_STR}", PAR)
gen_ratio_histograms_split_tol(df, f"{NETLIB_STR}", PAR)
######################################################################
# Scaling results (JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, "miplib_malitskypock_100k.csv"))
mp_solved = (
df[df["termination_reason"] == OPT]
.groupby(["experiment_label", "tolerance"])["experiment_label"]
.agg("count")
.pipe(pd.DataFrame)
.rename(columns={"experiment_label": "solved"})
.reset_index()
)
dfs = []
for t in df["tolerance"].unique():
_df = mp_solved[mp_solved["tolerance"] == t]
best_mp_run = _df.loc[_df["solved"].idxmax()]["experiment_label"]
dfs.append(df[df["experiment_label"] == best_mp_run])
df_best_ind = fill_in_missing_problems(pd.concat(dfs), miplib_instances)
# Pull out best performing scaling for each instance / tolerance:
df_best_fixed = df[df["termination_reason"] == OPT].reset_index()
best_idxs = df_best_fixed.groupby(["instance_name", "tolerance"])[
"cumulative_kkt_matrix_passes"
].idxmin()
df_best_fixed = df_best_fixed.loc[best_idxs]
for t in df_best_fixed["tolerance"].unique():
# rename the experiment label
df_best_fixed.loc[
df_best_fixed["tolerance"] == t, "experiment_label"
] = f"malitskypock {_BEST_STR} {t}"
df_best_fixed = fill_in_missing_problems(df_best_fixed, miplib_instances)
df_stepsize = pd.read_csv(os.path.join(CSV_DIR, "miplib_stepsize_100k.csv"))
df_stepsize = fill_in_missing_problems(df_stepsize, miplib_instances)
df = pd.concat((df_stepsize, df_best_fixed, df_best_ind))
gen_solved_problems_plots_split_tol(
df, f"{MIPLIB_STR}_stepsize", len(miplib_instances), False
)
gen_total_solved_problems_table_split_tol(
df, f"{MIPLIB_STR}_stepsize", PAR, TIME_LIMIT_SECS_ABLATION
)
######################################################################
# PDLP vs mp vs scs on MIPLIB (JOIN PDHG/MP WITH SCS)
df_pdhg_mp = pd.read_csv(os.path.join(CSV_DIR, "miplib_pdhg_mp_1h.csv"))
df_pdhg_mp = fill_in_missing_problems(df_pdhg_mp, miplib_instances)
df_scs = pd.read_csv(os.path.join(CSV_DIR, "miplib_scs_1h.csv"))
df_scs = fill_in_missing_problems(df_scs, miplib_instances)
df_pdhg_vanilla = pd.read_csv(
os.path.join(CSV_DIR, "miplib_pdhg_vanilla_1h.csv")
)
df_pdhg_vanilla = fill_in_missing_problems(df_pdhg_vanilla, miplib_instances)
df = pd.concat((df_pdhg_mp, df_pdhg_vanilla, df_scs))
gen_solved_problems_plots_split_tol(
df,
f"{MIPLIB_STR}_baselines",
len(miplib_instances),
legend_location="separate",
)
gen_total_solved_problems_table_split_tol(df, f"{MIPLIB_STR}_baselines", PAR)
df_pdhg_scs_dir = pd.concat(
(
df_pdhg_mp[df_pdhg_mp["method"] == "pdhg"],
df_scs[df_scs["method"] == "scs-direct"],
)
)
df_pdhg_scs_indir = pd.concat(
(
df_pdhg_mp[df_pdhg_mp["method"] == "pdhg"],
df_scs[df_scs["method"] == "scs-indirect"],
)
)
gen_ratio_histograms_split_tol(df_pdhg_mp, f"{MIPLIB_STR}", PAR)
gen_ratio_histograms_split_tol(df_pdhg_scs_indir, f"{MIPLIB_STR}", PAR)
gen_ratio_histograms_split_tol(df_pdhg_scs_dir, f"{MIPLIB_STR}", PAR)
######################################################################
# PDLP vs mp vs scs on MITTELMANN (JOIN PDHG/MP WITH SCS)
df_pdhg_mp = pd.read_csv(os.path.join(CSV_DIR, "mittelmann_pdhg_mp_1h.csv"))
df_pdhg_mp = fill_in_missing_problems(df_pdhg_mp, mittelmann_instances)
df_pdhg_vanilla = pd.read_csv(
os.path.join(CSV_DIR, "mittelmann_pdhg_vanilla_1h.csv")
)
df_pdhg_vanilla = fill_in_missing_problems(
df_pdhg_vanilla, mittelmann_instances
)
df_scs = pd.read_csv(os.path.join(CSV_DIR, "mittelmann_scs_1h.csv"))
df_scs = fill_in_missing_problems(df_scs, mittelmann_instances)
df = | pd.concat((df_pdhg_mp, df_pdhg_vanilla, df_scs)) | pandas.concat |
# -*- coding: utf-8 -*-
"""Structures data in ML-friendly ways."""
import re
import copy
import datetime as dt
import random
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from avaml import Error, setenvironment as se, _NONE, CSV_VERSION, REGIONS, merge, REGION_ELEV
from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS
from avaml.aggregatedata.time_parameters import to_time_parameters
from varsomdata import getforecastapi as gf
from varsomdata import getmisc as gm
__author__ = 'arwi'
LABEL_PROBLEM_PRIMARY = {
"ext_attr": [
"avalanche_problem_type_id",
"avalanche_problem_type_name",
"avalanche_type_id",
"avalanche_type_name",
"avalanche_ext_id",
"avalanche_ext_name"
],
"values": {
_NONE: [0, "", 0, "", 0, ""],
"new-loose": [3, "Nysnø (løssnøskred)", 20, "Løssnøskred", 10, "Tørre løssnøskred"],
"wet-loose": [5, "Våt snø (løssnøskred)", 20, "Løssnøskred", 15, "Våte løssnøskred"],
"new-slab": [7, "Nysnø (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"drift-slab": [10, "Fokksnø (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"pwl-slab": [30, "Vedvarende svakt lag (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"wet-slab": [45, "Våt snø (flakskred)", 10, "Flakskred", 25, "Våte flakskred"],
"glide": [50, "Glideskred", 10, "Flakskred", 25, "Våte flakskred"]
}
}
LABEL_PROBLEM = {
"cause": {
"ext_attr": ["aval_cause_id", "aval_cause_name"],
"values": {
"0": [0, ""],
"new-snow": [10, "Nedføyket svakt lag med nysnø"],
"hoar": [11, "Nedsnødd eller nedføyket overflaterim"],
"facet": [13, "Nedsnødd eller nedføyket kantkornet snø"],
"crust": [14, "Dårlig binding mellom glatt skare og overliggende snø"],
"snowdrift": [15, "Dårlig binding mellom lag i fokksnøen"],
"ground-facet": [16, "Kantkornet snø ved bakken"],
"crust-above-facet": [18, "Kantkornet snø over skarelag"],
"crust-below-facet": [19, "Kantkornet snø under skarelag"],
"ground-water": [20, "Vann ved bakken/smelting fra bakken"],
"water-layers": [22, "Opphopning av vann i/over lag i snødekket"],
"loose": [24, "Ubunden snø"]
}
},
"dsize": {
"ext_attr": ["destructive_size_ext_id", "destructive_size_ext_name"],
"values": {
'0': [0, "Ikke gitt"],
'1': [1, "1 - Små"],
'2': [2, "2 - Middels"],
'3': [3, "3 - Store"],
'4': [4, "4 - Svært store"],
'5': [5, "5 - Ekstremt store"]
}
},
"prob": {
"ext_attr": ["aval_probability_id", "aval_probability_name"],
"values": {
'0': [0, "Ikke gitt"],
'2': [2, "Lite sannsynlig"],
'3': [3, "Mulig"],
'5': [5, "Sannsynlig"],
}
},
"trig": {
"ext_attr": ["aval_trigger_simple_id", "aval_trigger_simple_name"],
"values": {
'0': [0, "Ikke gitt"],
'10': [10, "Stor tilleggsbelastning"],
'21': [21, "Liten tilleggsbelastning"],
'22': [22, "Naturlig utløst"]
}
},
"dist": {
"ext_attr": ["aval_distribution_id", "aval_distribution_name"],
"values": {
'0': [0, "Ikke gitt"],
'1': [1, "Få bratte heng"],
'2': [2, "Noen bratte heng"],
'3': [3, "Mange bratte heng"],
'4': [4, "De fleste bratte heng"]
}
},
"lev_fill": {
"ext_attr": ["exposed_height_fill"],
"values": {
'0': [0],
'1': [1],
'2': [2],
'3': [3],
'4': [4],
}
}
}
LABEL_PROBLEM_MULTI = {
"aspect": {
"ext_attr": "valid_expositions",
}
}
LABEL_PROBLEM_REAL = {
"lev_max": {
"ext_attr": "exposed_height_1",
},
"lev_min": {
"ext_attr": "exposed_height_2",
}
}
LABEL_GLOBAL = {
"danger_level": {
"ext_attr": ["danger_level", "danger_level_name"],
"values": {
'1': [1, "1 liten"],
'2': [2, "2 Moderat"],
'3': [3, "3 Betydelig"],
'4': [4, "4 Stor"],
'5': [5, "5 Meget stor"]
}
},
"emergency_warning": {
"ext_attr": ["emergency_warning"],
"values": {
"Ikke gitt": ["Ikke gitt"],
"Naturlig utløste skred": ["Naturlig utløste skred"],
}
}
}
COMPETENCE = [0, 110, 115, 120, 130, 150]
class ForecastDataset:
def __init__(self, regobs_types, seasons=('2017-18', '2018-19', '2019-20'), max_file_age=23):
"""
Object contains aggregated data used to generate labeled datasets.
:param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
:param seasons: Tuple/list of string representations of avalanche seasons to fetch.
"""
self.seasons = sorted(list(set(seasons)))
self.date = None
self.regobs_types = regobs_types
self.weather = {}
self.regobs = {}
self.varsom = {}
self.labels = {}
self.use_label = True
for season in seasons:
varsom, labels = _get_varsom_obs(year=season, max_file_age=max_file_age)
self.varsom = merge(self.varsom, varsom)
self.labels = merge(self.labels, labels)
regobs = _get_regobs_obs(season, regobs_types, max_file_age=max_file_age)
self.regobs = merge(self.regobs, regobs)
weather = _get_weather_obs(season, max_file_age=max_file_age)
self.weather = merge(self.weather, weather)
@staticmethod
def date(regobs_types, date: dt.date, days, use_label=True):
"""
Create a dataset containing just a given day's data.
:param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
:param date: Date to fetch and create dataset for.
:param days: How many days to fetch before date. This will be max for .label()'s days parameter.
"""
self = ForecastDataset(regobs_types, [])
self.date = date
self.use_label = use_label
self.regobs = _get_regobs_obs(None, regobs_types, date=date, days=days)
self.varsom, labels = _get_varsom_obs(None, date=date, days=days-1 if days > 0 else 1)
self.weather = _get_weather_obs(None, date=date, days=days-2 if days > 2 else 1)
self.labels = {}
for label_keys, label in labels.items():
if label_keys not in self.labels:
self.labels[label_keys] = {}
for (label_date, label_region), label_data in label.items():
if label_date == date.isoformat():
subkey = (label_date, label_region)
self.labels[label_keys][subkey] = label_data
return self
def label(self, days, with_varsom=True):
"""Creates a LabeledData containing relevant label and features formatted either in a flat structure or as
a time series.
:param days: How far back in time values should data be included.
If 0, only weather data for the forecast day is evaluated.
If 1, day 0 is used for weather, 1 for Varsom.
If 2, day 0 is used for weather, 1 for Varsom, 2 for RegObs.
If 3, days 0-1 is used for weather, 1-2 for Varsom, 2-3 for RegObs.
If 5, days 0-3 is used for weather, 1-4 for Varsom, 2-5 for RegObs.
The reason for this is to make sure that each kind of data contain
the same number of data points, if we want to use some time series
frameworks that are picky about such things.
:param with_varsom: Whether to include previous avalanche bulletins into the indata.
:return: LabeledData
"""
table = {}
row_weight = {}
df = None
df_weight = None
df_label = pd.DataFrame(self.labels, dtype="U")
days_w = {0: 1, 1: 1, 2: 1}.get(days, days - 1)
days_v = {0: 1, 1: 2, 2: 2}.get(days, days)
days_r = days + 1
varsom_index = pd.DataFrame(self.varsom).index
weather_index = pd.DataFrame(self.weather).index
if len(df_label.index) == 0 and self.use_label:
raise NoBulletinWithinRangeError()
if self.date and not self.use_label:
season = gm.get_season_from_date(self.date)
regions = gm.get_forecast_regions(year=season, get_b_regions=True)
date_region = [(self.date.isoformat(), region) for region in regions]
else:
date_region = df_label.index
for monotonic_idx, entry_idx in enumerate(date_region):
date, region_id = dt.date.fromisoformat(entry_idx[0]), entry_idx[1]
def prev_key(day_dist):
return (date - dt.timedelta(days=day_dist)).isoformat(), region_id
# Just check that we can use this entry.
try:
if with_varsom:
for n in range(1, days_v):
if prev_key(n) not in varsom_index:
raise KeyError()
for n in range(0, days_w):
if prev_key(n) not in weather_index:
raise KeyError()
add_row = True
# We don't check for RegObs as it is more of the good to have type of data
except KeyError:
add_row = False
if add_row:
row = {}
for region in REGIONS:
row[(f"region_id_{region}", "0")] = float(region == region_id)
if with_varsom:
for column in self.varsom.keys():
for n in range(1, days_v):
# We try/except an extra time since single dates may run without a forecast.
row[(column, str(n))] = self.varsom[column][prev_key(n)]
for column in self.weather.keys():
for n in range(0, days_w):
try:
row[(column, str(n))] = self.weather[column][prev_key(n)]
except KeyError:
row[(column, str(n))] = 0
for column in self.regobs.keys():
for n in range(2, days_r):
try:
row[(column, str(n))] = self.regobs[column][prev_key(n)]
except KeyError:
row[(column, str(n))] = 0
try:
weight_sum = self.regobs['accuracy'][prev_key(0)]
if weight_sum < 0:
row_weight[entry_idx] = 1 / 2
elif weight_sum == 0:
row_weight[entry_idx] = 1
elif weight_sum > 0:
row_weight[entry_idx] = 2
except KeyError:
row_weight[entry_idx] = 1
# Some restructuring to make DataFrame parse the dict correctly
for key in row.keys():
if key not in table:
table[key] = {}
table[key][entry_idx] = row[key]
# Build DataFrame iteratively to preserve system memory (floats in dicts are apparently expensive).
if (monotonic_idx > 0 and monotonic_idx % 1000 == 0) or monotonic_idx == len(date_region) - 1:
df_new = pd.DataFrame(table, dtype=np.float32).fillna(0)
df_weight_new = pd.Series(row_weight)
df = df_new if df is None else pd.concat([df, df_new])
df_weight = df_weight_new if df is None else pd.concat([df_weight, df_weight_new])
table = {}
row_weight = {}
if df is None or len(df.index) == 0:
raise NoDataFoundError()
if self.use_label:
df_label = df_label.loc[df.index]
df_label.sort_index(axis=0, inplace=True)
df_label.sort_index(axis=1, inplace=True)
df.sort_index(axis=0, inplace=True)
df_weight.sort_index(axis=0, inplace=True)
else:
df_label = None
return LabeledData(df, df_label, df_weight, days, self.regobs_types, with_varsom, self.seasons)
class LabeledData:
is_normalized = False
with_regions = True
elevation_class = (False, False)
scaler = StandardScaler()
def __init__(self, data, label, row_weight, days, regobs_types, with_varsom, seasons=False):
"""Holds labels and features.
:param data: A DataFrame containing the features of the dataset.
:param label: DataFrame of labels.
:param row_weight: Series containing row weights
:param days: How far back in time values should data be included.
If 0, only weather data for the forecast day is evaluated.
If 1, day 0 is used for weather, 1 for Varsom.
If 2, day 0 is used for weather, 1 for Varsom, 2 for RegObs.
If 3, days 0-1 is used for weather, 1-2 for Varsom, 2-3 for RegObs.
If 5, days 0-3 is used for weather, 1-4 for Varsom, 2-5 for RegObs.
The reason for this is to make sure that each kind of data contain
the same number of data points, if we want to use some time series
frameworks that are picky about such things.
:param regobs_types: A tuple/list of strings of types of observations to fetch from RegObs.,
e.g., `("Faretegn")`.
:param with_varsom: Whether to include previous avalanche bulletins into the indata.
"""
self.data = data
self.row_weight = row_weight
if label is not None:
self.label = label
self.label = self.label.replace(_NONE, 0)
self.label = self.label.replace(np.nan, 0)
try: self.label['CLASS', _NONE] = self.label['CLASS', _NONE].replace(0, _NONE).values
except KeyError: pass
try: self.label['MULTI'] = self.label['MULTI'].replace(0, "0").values
except KeyError: pass
try: self.label['REAL'] = self.label['REAL'].astype(np.float)
except KeyError: pass
self.pred = label.copy()
for col in self.pred.columns:
self.pred[col].values[:] = 0
try: self.pred['CLASS', _NONE] = _NONE
except KeyError: pass
try: self.pred['MULTI'] = "0"
except KeyError: pass
else:
self.label = None
self.pred = None
self.days = days
self.with_varsom = with_varsom
self.regobs_types = regobs_types
if self.data is not None:
self.scaler.fit(self.data.values)
self.single = not seasons
self.seasons = sorted(list(set(seasons if seasons else [])))
def normalize(self, by=None):
"""Normalize the data feature-wise using MinMax.
:return: Normalized copy of LabeledData
"""
by = by if by is not None else self
if not self.is_normalized:
ld = self.copy()
data = by.scaler.transform(self.data.values)
ld.data = pd.DataFrame(data=data, index=self.data.index, columns=self.data.columns)
ld.is_normalized = by
return ld
elif self.is_normalized != by:
return self.denormalize().normalize(by=by)
else:
return self.copy()
def denormalize(self):
"""Denormalize the data feature-wise using MinMax.
:return: Denormalized copy of LabeledData
"""
if self.is_normalized:
ld = self.copy()
data = self.is_normalized.scaler.inverse_transform(self.data.values)
ld.data = pd.DataFrame(data=data, index=self.data.index, columns=self.data.columns)
ld.is_normalized = False
return ld
else:
return self.copy()
def drop_regions(self):
"""Remove regions from input data"""
if self.with_regions:
ld = self.copy()
region_columns = list(filter(lambda x: re.match(r'^region_id', x[0]), ld.data.columns))
ld.data.drop(region_columns, axis=1, inplace=True)
ld.with_regions = False
ld.scaler.fit(ld.data.values)
return ld
else:
return self.copy()
def stretch_temperatures(self):
"""Stretch out temperatures near zero"""
ld = self.copy()
if self.data is not None:
temp_cols = [bool(re.match(r"^temp_(max|min)$", title)) for title in ld.data.columns.get_level_values(0)]
ld.data.loc[:, temp_cols] = np.sign(ld.data.loc[:, temp_cols]) * np.sqrt(np.abs(ld.data.loc[:, temp_cols]))
ld.scaler.fit(ld.data.values)
return ld
def problem_graph(self):
label = pd.Series(self.label["CLASS", _NONE, "problem_1"], name="label")
pred1 = pd.Series(self.pred["CLASS", _NONE, "problem_1"], name="problem_1")
pred2 = pd.Series(self.pred["CLASS", _NONE, "problem_2"], name="problem_2")
groups = pd.concat([label, pred1, pred2], axis=1).groupby(["label", "problem_1"], dropna=False)
count = groups.count()["problem_2"].rename("count")
p2 = groups["problem_2"].apply(lambda x: | pd.Series.mode(x) | pandas.Series.mode |
#!python
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import subprocess
import glob
import re
from helperFunctions.myFunctions_helper import *
import numpy as np
import pandas as pd
import fileinput
from itertools import product
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB import PDBList
from pdbfixer import PDBFixer
from simtk.openmm.app import PDBFile
# compute cross Q for every pdb pair in one folder
# parser = argparse.ArgumentParser(description="Compute cross q")
# parser.add_argument("-m", "--mode",
# type=int, default=1)
# args = parser.parse_args()
def getFromTerminal(CMD):
return subprocess.Popen(CMD,stdout=subprocess.PIPE,shell=True).communicate()[0].decode()
def expand_grid(dictionary):
return pd.DataFrame([row for row in product(*dictionary.values())],
columns=dictionary.keys())
def duplicate_pdb(From, To, offset_x=0, offset_y=0, offset_z=0, new_chain="B"):
with open(To, "w") as out:
with open(From, "r") as f:
for line in f:
tmp = list(line)
if len(tmp) < 26:
out.write(line)
continue
atom = line[0:4]
atomSerialNumber = line[6:11]
atomName = line[12:16]
atomResidueName = line[17:20]
chain = line[21]
residueNumber = line[22:26]
# change chain A to B
# new_chain = "B"
if atom == "ATOM":
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
# add 40 to the x
new_x = x + offset_x
new_y = y + offset_y
new_z = z + offset_z
tmp[21] = new_chain
tmp[30:38] = "{:8.3f}".format(new_x)
tmp[38:46] = "{:8.3f}".format(new_y)
tmp[46:54] = "{:8.3f}".format(new_z)
a = "".join(tmp)
out.write(a)
def compute_native_contacts(coords, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
n = len(dis)
remove_band = np.eye(n)
for i in range(1, MAX_OFFSET):
remove_band += np.eye(n, k=i)
remove_band += np.eye(n, k=-i)
dis[remove_band==1] = np.max(dis)
native_contacts = dis < DISTANCE_CUTOFF
return native_contacts.astype("int")
def compute_contacts(coords, native_contacts, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
constacts = dis < DISTANCE_CUTOFF
constacts = constacts*native_contacts # remove non native contacts
return np.sum(constacts, axis=1).astype("float")
def compute_localQ_init(MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
from pathlib import Path
home = str(Path.home())
struct_id = '2xov'
filename = os.path.join(home, "opt/pulling/2xov.pdb")
p = PDBParser(PERMISSIVE=1)
s = p.get_structure(struct_id, filename)
chains = s[0].get_list()
# import pdb file
native_coords = []
for chain in chains:
dis = []
all_res = []
for res in chain:
is_regular_res = res.has_id('CA') and res.has_id('O')
res_id = res.get_id()[0]
if (res.get_resname()=='GLY'):
native_coords.append(res['CA'].get_coord())
elif (res_id==' ' or res_id=='H_MSE' or res_id=='H_M3L' or res_id=='H_CAS') and is_regular_res:
native_coords.append(res['CB'].get_coord())
else:
print('ERROR: irregular residue at %s!' % res)
exit()
native_contacts_table = compute_native_contacts(native_coords, MAX_OFFSET, DISTANCE_CUTOFF)
return native_contacts_table
def compute_localQ(native_contacts_table, pre=".", ii=-1, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_contacts = np.sum(native_contacts_table, axis=1).astype("float")
dump = read_lammps(os.path.join(pre, f"dump.lammpstrj.{ii}"), ca=False)
localQ_list = []
for atom in dump:
contacts = compute_contacts(np.array(atom), native_contacts_table, DISTANCE_CUTOFF=DISTANCE_CUTOFF)
c = np.divide(contacts, native_contacts, out=np.zeros_like(contacts), where=native_contacts!=0)
localQ_list.append(c)
data = pd.DataFrame(localQ_list)
data.columns = ["Res" + str(i+1) for i in data.columns]
data.to_csv(os.path.join(pre, f"localQ.{ii}.csv"), index=False)
def readPMF_basic(pre):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys())
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(location)
name_list = ["f", "df", "e", "s"]
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def make_metadata_3(k=1000.0, temps_list=["450"], i=-1, biasLow=None, biasHigh=None):
print("make metadata")
cwd = os.getcwd()
files = glob.glob(f"../data_{i}/*")
kconstant = k
with open("metadatafile", "w") as out:
for oneFile in sorted(files):
tmp = oneFile.split("/")[-1].replace('.dat', '')
t = tmp.split("_")[1]
bias = tmp.split("_")[3]
if biasLow:
if float(bias) < biasLow:
continue
if biasHigh:
if float(bias) > biasHigh:
continue
# print(tmp)
# if int(float(dis)) > 150:
# continue
if t in temps_list:
target = "../{} {} {} {}\n".format(oneFile, t, kconstant, bias)
out.write(target)
def readPMF(pre, is2d=False, force_list=["0.0", "0.1", "0.2"]):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys()),
"force":force_list
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
force = row["force"]
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/force_{force}/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/force_{force}/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(pmf_list)
name_list = ["f", "df", "e", "s"]
if is2d:
names = ["x", "y"] + name_list
else:
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, force=force, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def readPMF_2(pre, is2d=0, force_list=["0.0", "0.1", "0.2"]):
if is2d:
print("reading 2d pmfs")
else:
print("reading 1d dis, qw and z")
if is2d == 1:
mode_list = ["2d_qw_dis", "2d_z_dis", "2d_z_qw"]
elif is2d == 2:
mode_list = ["quick"]
else:
mode_list = ["1d_dis", "1d_qw", "1d_z"]
all_data_list =[]
for mode in mode_list:
tmp = readPMF(mode, is2d, force_list).assign(mode=mode)
all_data_list.append(tmp)
return pd.concat(all_data_list).dropna().reset_index()
def shrinkage(n=552, shrink_size=6, max_frame=2000, fileName="dump.lammpstrj"):
print("Shrinkage: size: {}, max_frame: {}".format(shrink_size, max_frame))
bashCommand = "wc " + fileName
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
line_number = int(output.decode("utf-8").split()[0])
print(line_number)
print(line_number/552)
# number of atom = 543
n = 552
count = 0
with open("small.lammpstrj", "w") as out:
with open(fileName, "r") as f:
for i, line in enumerate(f):
if (i // n) % shrink_size == 0:
if count >= max_frame*n:
break
count += 1
out.write(line)
def compute_theta_for_each_helix(output="angles.csv", dumpName="../dump.lammpstrj.0"):
print("This is for 2xov only")
helices_list = [(94,114), (147,168), (171, 192), (200, 217), (226, 241), (250, 269)]
atoms_all_frames = read_lammps(dumpName)
# print(atoms[0])
# print(len(atoms), len(atoms[0]))
# helices_angles_all_frames = []
with open(output, "w") as out:
out.write("Frame, Helix, Angle\n")
for ii, frame in enumerate(atoms_all_frames):
# helices_angles = []
for count, (i, j) in enumerate(helices_list):
# print(i, j)
i = i-91
j = j-91
# end - start
a = np.array(frame[j]) - np.array(frame[i])
b = np.array([0, 0, 1])
angle = a[2]/length(a) # in form of cos theta
# helices_angles.append(angle)
# print(angle)
out.write("{}, {}, {}\n".format(ii, count+1, angle))
# helices_angles_all_frames.append(helices_angles)
def check_and_correct_fragment_memory(fragFile="fragsLAMW.mem"):
with open("tmp.mem", "w") as out:
with open(fragFile, "r") as f:
for i in range(4):
line = next(f)
out.write(line)
for line in f:
gro, _, i, n, _ = line.split()
delete = False
# print(gro, i, n)
# name = gro.split("/")[-1]
with open(gro, "r") as one:
next(one)
next(one)
all_residues = []
for atom in one:
residue, resType, atomType, *_ = atom.split()
# print(residue, resType, atomType)
if atomType == "CA":
all_residues.append(int(residue))
all_residues = np.array(all_residues)
for test in range(int(i), int(i)+int(n)):
if (test == all_residues).sum() > 1:
# In rare case, one res id may have two different possible residues.
# on example, pdb 3vpg. chain A, res id 220.
# ATOM 1467 N ARG A 220A 9.151 -20.984 46.737 1.00 31.30 N
# ATOM 1468 CA ARG A 220A 9.120 -19.710 46.027 1.00 31.52 C
# ATOM 1469 C ARG A 220A 9.768 -19.832 44.650 1.00 33.58 C
# ATOM 1470 O ARG A 220A 10.552 -18.973 44.240 1.00 28.91 O
# ATOM 1471 CB ARG A 220A 9.853 -18.641 46.847 1.00 31.58 C
# ATOM 1472 CG ARG A 220A 9.181 -18.295 48.168 1.00 33.55 C
# ATOM 1473 CD ARG A 220A 7.834 -17.651 47.916 1.00 34.70 C
# ATOM 1474 NE ARG A 220A 7.959 -16.526 46.994 1.00 43.05 N
# ATOM 1475 CZ ARG A 220A 6.931 -15.906 46.425 1.00 46.69 C
# ATOM 1476 NH1 ARG A 220A 5.691 -16.300 46.683 1.00 39.12 N
# ATOM 1477 NH2 ARG A 220A 7.144 -14.898 45.590 1.00 41.15 N
# ATOM 1478 N ALA A 220B 9.429 -20.901 43.936 1.00 33.78 N
# ATOM 1479 CA ALA A 220B 9.979 -21.153 42.608 1.00 32.13 C
# ATOM 1480 C ALA A 220B 9.944 -19.933 41.692 1.00 30.71 C
# ATOM 1481 O ALA A 220B 9.050 -19.088 41.787 1.00 28.56 O
# ATOM 1482 CB ALA A 220B 9.234 -22.310 41.951 1.00 35.20 C
print("ATTENTION", gro, i, n, "duplicate:",test)
delete = True
if test not in all_residues:
print("ATTENTION", gro, i, n, "missing:",test)
delete = True
if not delete:
out.write(line)
os.system(f"mv {fragFile} fragsLAMW_back")
os.system(f"mv tmp.mem {fragFile}")
def compute_average_z(dumpFile, outFile):
# input dump, output z.dat
z_list = []
with open(outFile, "w") as f:
a = read_lammps(dumpFile)
for atoms in a:
b = np.array(atoms)
z = b.mean(axis=0)[2]
z_list.append(z)
f.write(str(z)+"\n")
def compute_average_z_2(dumpFile, outFile):
# input dump, output z.dat
helices_list = [(94,114), (147,168), (171, 192), (200, 217), (226, 241), (250, 269)]
with open(outFile, "w") as f:
a = read_lammps(dumpFile)
f.write("z_average, abs_z_average, z_h1, z_h2, z_h3, z_h4, z_h5, z_h6\n")
for atoms in a:
b = np.array(atoms)
z = b.mean(axis=0)[2]
f.write(str(z)+ ", ")
z = np.abs(b).mean(axis=0)[2]
f.write(str(z)+ ", ")
for count, (i,j) in enumerate(helices_list):
i = i - 91
j = j - 91
z = np.mean(b[i:j], axis=0)[2]
if count == 5:
f.write(str(z))
else:
f.write(str(z)+ ", ")
f.write("\n")
def read_folder(location, match="", **kwargs):
runFolders = os.listdir(location+"/simulation")
if match == "qbias":
runFolders = [f for f in runFolders if re.match(r'qbias_[0-9]+', f)]
else:
runFolders = [f for f in runFolders if re.match(r'[0-9]+', f)]
print(runFolders)
data_list = []
for run in runFolders:
tmp = read_simulation_2(location+"/simulation/"+run+"/0/", **kwargs).assign(Run=run)
data_list.append(tmp)
return pd.concat(data_list).reset_index(drop=True)
def read_variable_folder(location, match="*_", **kwargs):
variables = glob.glob(os.path.join(location, match))
print(variables)
data_list = []
for variableFolder in variables:
tmp = variableFolder.split("/")[-1]
data_list.append(read_folder(variableFolder, **kwargs).assign(Folder=tmp))
data = | pd.concat(data_list) | pandas.concat |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2021/12/13 21:43
describe: 概念、行业、指数等股票聚类板块感应器
三个问题:
1)如何找出引领大盘的概念、行业、指数
2)板块内股票相比于板块走势,划分强中弱
3)根据指数强弱进行账户总仓位控制
"""
import os
import traceback
import inspect
from datetime import timedelta, datetime
import pandas as pd
from tqdm import tqdm
from typing import Callable
from czsc import envs
from czsc.utils import WordWriter, io
from czsc.data.ts_cache import TsDataCache, Freq
from czsc.sensors.utils import get_index_beta, generate_signals, turn_over_rate, max_draw_down
class ThsConceptsSensor:
"""
输入:同花顺概念列表;同花顺概念日线行情
输出:每一个交易日的同花顺强势概念
"""
def __init__(self,
results_path: str,
sdt: str,
edt: str,
dc: TsDataCache,
get_signals: Callable,
get_event: Callable,
ths_index_type='N'):
"""
:param results_path: 结果保存路径
:param sdt: 开始日期
:param edt: 结束日期
:param dc: 数据缓存对象
:param get_signals: 信号获取函数
:param get_event: 事件定义函数
:param ths_index_type: 同花顺指数类型 N-板块指数 I-行业指数 S-同花顺特色指数
"""
self.name = self.__class__.__name__
self.dc = dc
self.get_signals = get_signals
self.get_event = get_event
self.event = get_event()
self.base_freq = Freq.D.value
self.freqs = [Freq.W.value, Freq.M.value]
self.dc = dc
self.ths_index_type = ths_index_type
self.verbose = envs.get_verbose()
self.cache = dict()
self.results_path = results_path
os.makedirs(self.results_path, exist_ok=True)
self.sdt = sdt
self.edt = edt
self.file_docx = os.path.join(results_path, f'{self.event.name}_{self.ths_index_type}_{sdt}_{edt}.docx')
writer = WordWriter(self.file_docx)
if not os.path.exists(self.file_docx):
writer.add_title(f"同花顺指数({self.ths_index_type})感应器报告")
writer.add_page_break()
writer.add_heading(f"{datetime.now().strftime('%Y-%m-%d %H:%M')} {self.event.name}", level=1)
writer.add_heading("参数配置", level=2)
writer.add_paragraph(f"测试方法描述:{self.event.name}")
writer.add_paragraph(f"测试起止日期:{sdt} ~ {edt}")
writer.add_paragraph(f"信号计算函数:\n{inspect.getsource(self.get_signals)}")
writer.add_paragraph(f"事件具体描述:\n{inspect.getsource(self.get_event)}")
writer.save()
self.writer = writer
self.file_ssd = os.path.join(results_path, f'ths_all_strong_days_{self.ths_index_type}.pkl')
if os.path.exists(self.file_ssd):
self.ssd, self.cache = io.read_pkl(self.file_ssd)
else:
self.ssd = self.get_all_strong_days()
io.save_pkl([self.ssd, self.cache], self.file_ssd)
self.betas = get_index_beta(dc, sdt, edt, freq='D',
indices=['000001.SH', '000016.SH', '000905.SH',
'000300.SH', '399001.SZ', '399006.SZ'])
def get_strong_days(self, ts_code, name):
"""获取单个概念的强势日期
:param ts_code: 同花顺概念代码
:param name: 同花顺概念名称
:return:
"""
dc = self.dc
event = self.event
sdt = self.sdt
edt = self.edt
start_date = | pd.to_datetime(sdt) | pandas.to_datetime |
# coding: utf-8 -*-
'''
GSI.py contains utility functions for GSI
'''
__all__ = ['GSIstat']
import numpy as _np
import pandas as _pd
import re as _re
class GSIstat(object):
'''
Object containing the GSI statistics
'''
def __init__(self,filename,adate):
'''
Initialize the GSIstat object
INPUT:
filename = filename of the gsistat file
adate = analysis date
OUTPUT:
GSIstat: object containing the contents of the filename
'''
self.filename = filename
self.analysis_date = adate
fh = open(self.filename,'rb')
self._lines = fh.readlines() # Keep lines private
fh.close()
# Initialize cache for fast parsing
self._cache = {}
return
def extract(self,name):
'''
From the gsistat file, extract information:
INPUT:
name = information seeked
Valid options are:
ps, oz, uv, t, q, gps, rad, cost
OUTPUT:
df = dataframe containing information
'''
# If name has already been parsed,
# just return it from cache
if name in self._cache:
df = self._cache[name]
return df
if name in ['ps']:
df = self._get_ps()
elif name in ['oz']:
df = self._get_ozone()
elif name in ['uv','t','q','gps']:
df = self._get_conv(name)
elif name in ['rad']:
df = self._get_radiance()
elif name in ['cost']:
df = self._get_cost()
else:
raise IOError('option %s is not defined' % name)
# Drop the o-g from the indicies list
if 'o-g' in list(df.index.names):
df.reset_index(level='o-g',drop=True,inplace=True)
# Add datetime index
df = self._add_datetime_index(df)
# Cache it for faster access
self._cache[name] = df
return df
def _add_datetime_index(self,df):
'''
Add the datetime as the first index
INPUT:
df = dataframe without datetime index
OUTPUT:
df = dataframe with datetime as the 1st index
'''
# If date is already present, return
if 'date' in list(df.index.names):
return df
indices = ['date'] + list(df.index.names)
df['date'] = self.analysis_date
df.set_index('date', append=True, inplace=True)
df = df.reorder_levels(indices)
return df
def extract_instrument(self,obtype,instrument):
'''
From the gsistat file, extract detailed information on an instrument:
INPUT:
obtype = observation type to extract (rad or oz)
instrument = instrument name [must be in the observation type]
E.g.:
amsua, mhs, iasi, hirs, etc
OUTPUT:
df = dataframe containing information
'''
# If instrument has already been parsed,
# just return it from cache
if instrument in self._cache:
df = self._cache[instrument]
return df
# Ensure obtype is already called,
# if not call it and cache it
if obtype in list(self._cache.keys()):
otype = self._cache[obtype]
else:
otype = self.extract(obtype)
self._cache[obtype] = otype
instruments = sorted(otype.index.get_level_values('instrument').unique())
satellites = sorted(otype.index.get_level_values('satellite' ).unique())
if instrument not in instruments:
print('Instrument %s not found!' % instrument)
print('%s contains ...' % self.filename)
print(', '.join(str(x) for x in instruments))
return None
# Handle special instruments
if instrument in ['iasi','iasi616']:
inst = 'iasi616'
elif instrument in ['airs','airs281SUBSET']:
inst = 'airs281SUBSET'
else:
inst = instrument
tmp = []
pattern = '\s+\d+\s+\d+\s+%s_\S+\s+\d+\s+\d+\s+' % (inst)
for line in self._lines:
if _re.match(pattern,line):
tst = line.strip().split()
tst = tst[:2] + tst[2].split('_') + tst[3:]
tmp.append(tst)
columns = ['it','channel','instrument','satellite','nassim','nrej','oberr','OmF_bc','OmF_wobc','col1','col2','col3']
df = | _pd.DataFrame(data=tmp,columns=columns) | pandas.DataFrame |
import itertools
import json
from copy import deepcopy
import networkx as nx
import numpy as np
import pandas as pd
import syspy.assignment.raw as assignment_raw
from quetzal.engine import engine
from quetzal.engine.subprocesses import filepaths
from quetzal.os import parallel_call
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import dijkstra
from syspy.routing.frequency import graph as frequency_graph
from syspy.skims import skims
from syspy.spatial import spatial
from tqdm import tqdm
def get_path(predecessors, i, j):
path = [j]
k = j
p = 0
while p != -9999:
k = p = predecessors[i, k]
path.append(p)
return path[::-1][1:]
def get_reversed_path(predecessors, i, j):
path = [j]
k = j
p = 0
while p != -9999:
k = p = predecessors[i, k]
path.append(p)
return path[:-1]
def path_and_duration_from_graph(
nx_graph,
pole_set,
od_set=None,
sources=None,
reversed_nx_graph=None,
reverse=False,
ntlegs_penalty=1e9,
cutoff=np.inf,
**kwargs
):
sources = pole_set if sources is None else sources
source_los = sparse_los_from_nx_graph(
nx_graph, pole_set, sources=sources,
cutoff=cutoff + ntlegs_penalty, od_set=od_set, **kwargs
)
source_los['reversed'] = False
reverse = reverse or reversed_nx_graph is not None
if reverse:
if reversed_nx_graph is None:
reversed_nx_graph = nx_graph.reverse()
try:
reversed_od_set = {(d, o) for o, d in od_set}
except TypeError:
reversed_od_set = None
target_los = sparse_los_from_nx_graph(
reversed_nx_graph, pole_set, sources=sources,
cutoff=cutoff + ntlegs_penalty, od_set=reversed_od_set, **kwargs)
target_los['reversed'] = True
target_los['path'] = target_los['path'].apply(lambda x: list(reversed(x)))
target_los[['origin', 'destination']] = target_los[['destination', 'origin']]
los = pd.concat([source_los, target_los]) if reverse else source_los
los.loc[los['origin'] != los['destination'], 'gtime'] -= ntlegs_penalty
tuples = [tuple(l) for l in los[['origin', 'destination']].values.tolist()]
los = los.loc[[t in od_set for t in tuples]]
return los
def sparse_los_from_nx_graph(
nx_graph,
pole_set,
sources=None,
cutoff=np.inf,
od_set=None,
):
sources = pole_set if sources is None else sources
if od_set is not None:
sources = {o for o, d in od_set if o in sources}
# INDEX
pole_list = sorted(list(pole_set)) # fix order
source_list = [zone for zone in pole_list if zone in sources]
nodes = list(nx_graph.nodes)
node_index = dict(zip(nodes, range(len(nodes))))
zones = [node_index[zone] for zone in source_list]
source_index = dict(zip(source_list, range(len(source_list))))
zone_index = dict(zip(pole_list, range(len(pole_list))))
# SPARSE GRAPH
sparse = nx.to_scipy_sparse_matrix(nx_graph)
graph = csr_matrix(sparse)
dist_matrix, predecessors = dijkstra(
csgraph=graph,
directed=True,
indices=zones,
return_predecessors=True,
limit=cutoff
)
# LOS LAYOUT
df = pd.DataFrame(dist_matrix)
df.index = [zone for zone in pole_list if zone in sources]
df.columns = list(nx_graph.nodes)
df.columns.name = 'destination'
df.index.name = 'origin'
stack = df[pole_list].stack()
stack.name = 'gtime'
los = stack.reset_index()
# QUETZAL FORMAT
los = los.loc[los['gtime'] < np.inf]
if od_set is not None:
tuples = [tuple(l) for l in los[['origin', 'destination']].values.tolist()]
los = los.loc[[t in od_set for t in tuples]]
# BUILD PATH FROM PREDECESSORS
od_list = los[['origin', 'destination']].values.tolist()
paths = [
[nodes[i] for i in get_path(predecessors, source_index[o], node_index[d])]
for o, d in od_list
]
los['path'] = paths
return los
def sparse_matrix(edges):
nodelist = {e[0] for e in edges}.union({e[1] for e in edges})
nlen = len(nodelist)
index = dict(zip(nodelist, range(nlen)))
coefficients = zip(*((index[u], index[v], w) for u, v, w in edges))
row, col, data = coefficients
return csr_matrix((data, (row, col)), shape=(nlen, nlen)), index
def link_edges(links, boarding_time=None, alighting_time=None):
assert not (boarding_time is not None and 'boarding_time' in links.columns)
boarding_time = 0 if boarding_time is None else boarding_time
assert not (alighting_time is not None and 'alighting_time' in links.columns)
alighting_time = 0 if alighting_time is None else alighting_time
l = links.copy()
l['index'] = l.index
l['next'] = l['link_sequence'] + 1
if 'cost' not in l.columns:
l['cost'] = l['time'] + l['headway'] / 2
if 'boarding_time' not in l.columns:
l['boarding_time'] = boarding_time
if 'alighting_time' not in l.columns:
l['alighting_time'] = alighting_time
l['total_time'] = l['boarding_time'] + l['cost']
boarding_edges = l[['a', 'index', 'total_time']].values.tolist()
alighting_edges = l[['index', 'b', 'alighting_time']].values.tolist()
transit = pd.merge(
l[['index', 'next', 'trip_id']],
l[['index', 'link_sequence', 'trip_id', 'time']],
left_on=['trip_id', 'next'],
right_on=['trip_id', 'link_sequence'],
)
transit_edges = transit[['index_x', 'index_y', 'time']].values.tolist()
return boarding_edges + transit_edges + alighting_edges
def adjacency_matrix(
links,
ntlegs,
footpaths,
ntlegs_penalty=1e9,
boarding_time=None,
alighting_time=None,
**kwargs
):
ntlegs = ntlegs.copy()
# ntlegs and footpaths
ntlegs.loc[ntlegs['direction'] == 'access', 'time'] += ntlegs_penalty
ntleg_edges = ntlegs[['a', 'b', 'time']].values.tolist()
footpaths_edges = footpaths[['a', 'b', 'time']].values.tolist()
edges = link_edges(links, boarding_time, alighting_time)
edges += footpaths_edges + ntleg_edges
return sparse_matrix(edges)
def los_from_graph(
csgraph, # graph is assumed to be a scipy csr_matrix
node_index=None,
pole_set=None,
sources=None,
cutoff=np.inf,
od_set=None,
ntlegs_penalty=1e9
):
sources = pole_set if sources is None else sources
if od_set is not None:
sources = {o for o, d in od_set if o in sources}
# INDEX
pole_list = sorted(list(pole_set)) # fix order
source_list = [zone for zone in pole_list if zone in sources]
zones = [node_index[zone] for zone in source_list]
source_index = dict(zip(source_list, range(len(source_list))))
zone_index = dict(zip(pole_list, range(len(pole_list))))
# SPARSE GRAPH
dist_matrix, predecessors = dijkstra(
csgraph=csgraph,
directed=True,
indices=zones,
return_predecessors=True,
limit=cutoff + ntlegs_penalty
)
# LOS LAYOUT
df = pd.DataFrame(dist_matrix)
indexed_nodes = {v: k for k, v in node_index.items()}
df.rename(columns=indexed_nodes, inplace=True)
df.index = [zone for zone in pole_list if zone in sources]
df.columns.name = 'destination'
df.index.name = 'origin'
stack = df[pole_list].stack()
stack.name = 'gtime'
los = stack.reset_index()
# QUETZAL FORMAT
los = los.loc[los['gtime'] < np.inf]
los.loc[los['origin'] != los['destination'], 'gtime'] -= ntlegs_penalty
if od_set is not None:
tuples = [tuple(l) for l in los[['origin', 'destination']].values.tolist()]
los = los.loc[[t in od_set for t in tuples]]
# BUILD PATH FROM PREDECESSORS
od_list = los[['origin', 'destination']].values.tolist()
paths = [
[indexed_nodes[i] for i in get_path(predecessors, source_index[o], node_index[d])]
for o, d in od_list
]
los['path'] = paths
return los
def paths_from_graph(
csgraph,
node_index,
sources,
targets,
od_set=None,
cutoff=np.inf
):
reverse = False
if od_set:
o_set = {o for o, d in od_set}
d_set = {d for o, d in od_set}
sources = [s for s in sources if s in o_set]
targets = [t for t in targets if t in d_set]
if len(sources) > len(targets):
reverse = True
sources, targets, csgraph = targets, sources, csgraph.T
# INDEX
source_indices = [node_index[s] for s in sources]
target_indices = [node_index[t] for t in targets]
source_index = dict(zip(sources, range(len(sources))))
index_node = {v: k for k, v in node_index.items()}
# DIKSTRA
dist_matrix, predecessors = dijkstra(
csgraph=csgraph,
directed=True,
indices=source_indices,
return_predecessors=True,
limit=cutoff
)
dist_matrix = dist_matrix.T[target_indices].T
df = pd.DataFrame(dist_matrix, index=sources, columns=targets)
df.columns.name = 'destination'
df.index.name = 'origin'
if od_set is not None:
mask_series = pd.Series(0, index=pd.MultiIndex.from_tuples(list(od_set)))
mask = mask_series.unstack().loc[sources, targets]
df += mask
stack = df.stack()
stack.name = 'length'
odl = stack.reset_index()
od_list = odl[['origin', 'destination']].values
path = get_reversed_path if reverse else get_path
paths = [
[
index_node[i] for i in
path(predecessors, source_index[o], node_index[d])
]
for o, d in od_list
]
odl['path'] = paths
if reverse:
odl[['origin', 'destination']] = odl[['destination', 'origin']]
return odl
class PublicPathFinder:
def __init__(self, model, walk_on_road=False):
self.zones = model.zones.copy()
self.links = engine.graph_links(model.links.copy())
if walk_on_road:
road_links = model.road_links.copy()
road_links['time'] = road_links['walk_time']
self.footpaths = pd.concat([model.footpaths, road_links, model.road_to_transit])
self.ntlegs = pd.concat(
[model.zone_to_road, model.zone_to_transit]
)
else:
self.footpaths = model.footpaths.copy()
self.ntlegs = model.zone_to_transit.copy()
try:
self.centroids = model.centroids.copy()
except AttributeError:
self.centroids = self.zones.copy()
self.centroids['geometry'] = self.centroids['geometry'].apply(
lambda g: g.centroid
)
def first_link(self, path):
for n in path:
if n in self.links.index:
return n
def last_link(self, path):
for n in reversed(path):
if n in self.links.index:
return n
def build_route_zones(self, route_column):
"""
find origin zones that are likely to be affected by the removal
each one of the routes
"""
los = self.best_paths.copy()
los['first_link'] = los['path'].apply(self.first_link)
los['last_link'] = los['path'].apply(self.last_link)
right = self.links[[route_column]]
merged = pd.merge(los, right, left_on='first_link', right_index=True)
merged = pd.merge(merged, right, left_on='last_link', right_index=True, suffixes=['_first', '_last'])
first = merged[['origin', route_column + '_first']]
first.columns = ['zone', 'route']
last = merged[['destination', route_column + '_last']]
last.columns = ['zone', 'route']
zone_route = pd.concat([first, last]).drop_duplicates(subset=['zone', 'route'])
route_zone_sets = zone_route.groupby('route')['zone'].apply(set)
self.route_zones = route_zone_sets.to_dict()
def build_route_breaker(self, route_column='route_id'):
self.build_route_zones(route_column=route_column)
def build_mode_breaker(self, mode_column='route_type'):
self.build_mode_combinations(mode_column='route_type')
def build_mode_combinations(self, mode_column='route_type'):
mode_list = sorted(list(set(self.links[mode_column])))
boolean_array = list(itertools.product((True, False), repeat=len(mode_list)))
mode_combinations = []
for booleans in boolean_array:
combination = {
mode_list[i]
for i in range(len(mode_list))
if booleans[i]
}
mode_combinations.append(combination)
self.mode_combinations = mode_combinations
links = self.links.copy()
links.drop('index', axis=1, inplace=True, errors='ignore')
links.index.name = 'index'
self.mode_links = links.reset_index().groupby(
[mode_column]
)['index'].apply(lambda s: set(s)).to_dict()
def find_best_path(
self,
od_set=None,
cutoff=np.inf,
ntlegs_penalty=1e9,
boarding_time=None,
**kwargs
):
pole_set = set(self.zones.index)
matrix, node_index = adjacency_matrix(
links=self.links,
ntlegs=self.ntlegs,
footpaths=self.footpaths,
ntlegs_penalty=ntlegs_penalty,
boarding_time=boarding_time,
**kwargs
)
los = los_from_graph(
csgraph=matrix,
node_index=node_index,
pole_set=pole_set,
od_set=od_set,
cutoff=cutoff,
ntlegs_penalty=ntlegs_penalty
)
los['pathfinder_session'] = 'best_path'
los['reversed'] = False
self.best_paths = los
def find_broken_route_paths(
self,
od_set=None,
cutoff=np.inf,
route_column='route_id',
ntlegs_penalty=1e9,
boarding_time=None,
speedup=True,
**kwargs
):
pole_set = set(self.zones.index)
do_set = {(d, o) for o, d in od_set} if od_set is not None else None
to_concat = []
iterator = tqdm(self.route_zones.items())
for route_id, zones in iterator:
if not speedup:
zones = set(self.zones.index).intersection(set(self.ntlegs['a']))
iterator.desc = 'breaking route: ' + str(route_id) + ' '
matrix, node_index = adjacency_matrix(
links=self.links.loc[self.links[route_column] != route_id],
ntlegs=self.ntlegs,
footpaths=self.footpaths,
ntlegs_penalty=ntlegs_penalty,
boarding_time=boarding_time,
**kwargs
)
los = los_from_graph(
csgraph=matrix,
node_index=node_index,
pole_set=pole_set,
od_set=od_set,
sources=zones,
cutoff=cutoff,
ntlegs_penalty=ntlegs_penalty
)
los['reversed'] = False
los['broken_route'] = route_id
los['pathfinder_session'] = 'route_breaker'
to_concat.append(los)
los = los_from_graph(
csgraph=matrix.transpose(),
node_index=node_index,
pole_set=pole_set,
od_set=do_set,
sources=zones,
cutoff=cutoff,
ntlegs_penalty=ntlegs_penalty
)
los[['origin', 'destination']] = los[['destination', 'origin']]
los['path'] = los['path'].apply(lambda p: list(reversed(p)))
los['reversed'] = True
los['broken_route'] = route_id
los['pathfinder_session'] = 'route_breaker'
to_concat.append(los)
self.broken_route_paths = pd.concat(to_concat)
def find_broken_mode_paths(
self,
od_set=None,
cutoff=np.inf,
mode_column='mode_type',
ntlegs_penalty=1e9,
boarding_time=None,
**kwargs
):
pole_set = set(self.zones.index)
to_concat = []
iterator = tqdm(self.mode_combinations)
for combination in iterator:
iterator.desc = 'breaking modes: ' + str(combination) + ' '
matrix, node_index = adjacency_matrix(
links=self.links.loc[~self.links[mode_column].isin(combination)],
ntlegs=self.ntlegs,
footpaths=self.footpaths,
ntlegs_penalty=ntlegs_penalty,
boarding_time=boarding_time,
**kwargs
)
los = los_from_graph(
csgraph=matrix,
node_index=node_index,
pole_set=pole_set,
od_set=od_set,
cutoff=cutoff,
ntlegs_penalty=ntlegs_penalty
)
los['reversed'] = False
los['pathfinder_session'] = 'mode_breaker'
los['broken_modes'] = [combination for i in range(len(los))]
to_concat.append(los)
self.broken_mode_paths = | pd.concat(to_concat) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@contact: <EMAIL>
@software: PyCharm
@file: conf_matrix_bases_metrics.py
@time: 2020/2/26 23:16
@desc:
"""
import numpy as np
import pandas as pd
import seaborn as sns
from alchemy_cat.acplot import pretty_plot_confusion_matrix
from alchemy_cat import quick_init
from matplotlib import pyplot as plt
import os
from typing import Optional, Iterable
from torchnet.meter import MovingAverageValueMeter
from alchemy_cat.py_tools import Statistic, Tracker
__all__ = ['ClassificationMetric', 'SegmentationMetric']
class ClassificationMetric(Tracker):
"""Multi classification metric
See statistics at https://en.wikipedia.org/wiki/Confusion_matrix
"""
def __init__(self, class_num: Optional[int] = None, class_names: Optional[Iterable[str]]=None):
# * Init attributes and save init_dict
super(ClassificationMetric, self).__init__(quick_init(self, locals()))
# * Check Input
if class_names is None:
if class_num is None:
raise ValueError("class_num and class_names can not be None at the same time")
else:
class_names = [str(i) for i in range(class_num)]
else:
class_names = [str(name) for name in class_names]
if class_num is not None and len(class_names) != class_num:
raise ValueError(f"class_num = {class_num} should be equal to len(class_names) = {len(class_names)}")
else:
class_num = len(class_names)
self.class_num, self.class_names = class_num, class_names
self.conf_matrix = np.zeros((class_num, class_num), dtype=np.int32)
self._last_conf_matrix = self.conf_matrix.copy()
@staticmethod
def get_conf_matrix(pred: np.ndarray, gt: np.ndarray, class_num: int) -> np.ndarray:
"""Get confusion matrix accroding to input pred and gt
Function get pred and gt array from input, which mean predicting label and ground truth label respectively.
The label can be class of the input image, or class of each pixel of the input image. As long as pred and gt
have the same shape.
Args:
pred: Numpy array of pred label
gt: Numpy array of ground truth label.
class_num: number of classes
Returns:
confusion matrix calculated according to input pred and gt
"""
if pred.shape != gt.shape:
raise ValueError(f"pred's shape {pred.shape} should be equal to gt's shape {gt.shape}")
mask = (gt >= 0) & (gt < class_num)
conf_matrix = np.bincount(
class_num * gt[mask].astype(np.int) + pred[mask].astype(np.int),
minlength=class_num ** 2,
).reshape(class_num, class_num)
return conf_matrix
def update(self, pred: np.ndarray, gt: np.ndarray):
"""Update confusion matrix accroding to input pred and gt
Function get pred and gt array from input, which mean predicting label and ground truth label respectively.
The label can be class of the input image, or class of each pixel of the input image. As long as pred and gt
have the same shape.
Function will use get_conf_matrix to calculate the last_conf_matrix of current input then add it to
self.conf_matrix
Args:
pred: Numpy array of pred label
gt: Numpy array of ground truth label.
"""
super(ClassificationMetric, self).update(pred, gt)
self._last_conf_matrix = self.get_conf_matrix(pred, gt, self.class_num)
self.conf_matrix += self._last_conf_matrix
def _plot_conf_matrix(self, **kwargs) -> plt.Figure:
df = | pd.DataFrame(self.conf_matrix, index=self.class_names, columns=self.class_names) | pandas.DataFrame |
import os
import keras.backend as K
import numpy as np
import pandas as pd
import pylab as pl
import matplotlib.cm as cm
import math
from scipy import misc
import util
BATCH_SIZE = 20
def img_data_generator(file_paths, batch_size):
"""
Data generator for the model.
:param file_paths: List of paths for the images
:param batch_size: Batch size to be used for prediction
"""
while True:
x_train = []
for file_path in file_paths:
img = misc.imread(file_path)
x_train.append(img)
if len(x_train) == batch_size:
x_to_yield = np.array(x_train, dtype=np.float32)
if K.image_dim_ordering() == "th":
x_to_yield = x_to_yield.transpose((0, 3, 1, 2))
yield x_to_yield
x_train = []
if len(x_train) > 0:
x_to_yield = np.array(x_train, dtype=np.float32)
if K.image_dim_ordering() == "th":
x_to_yield = x_to_yield.transpose((0, 3, 1, 2))
yield x_to_yield
def generate_predictions(model, img_dir, out_filepath, batch_size=BATCH_SIZE):
"""
Generate predictions for the model and save them to the specified path.
:param model: The pretrained model object
:param img_dir: The directory containing images that are to be fed to the model
:param out_filepath: File path to write the predictions
:param batch_size: Batch size to be used for generating predictions
"""
file_paths = [os.path.join(img_dir, f) for f in os.listdir(img_dir) if os.path.isfile(os.path.join(img_dir, f))]
steps = int(len(file_paths) / batch_size)
if len(file_paths) % batch_size > 0:
steps += 1
data_generator_obj = img_data_generator(file_paths, batch_size)
print("Generating predictions...")
predictions = model.predict_generator(data_generator_obj,
val_samples=steps * batch_size,
pickle_safe=True)
pd_dict = dict()
order = ['village_code']
pd_dict['village_code'] = [os.path.split(f)[1].split('.')[0] for f in file_paths]
for ii in range(len(predictions)):
predictions[ii] = np.array(predictions[ii], dtype=np.float32)
for idx in range(predictions[ii].shape[-1]):
pd_dict[str(ii) + "_" + str(idx)] = np.transpose(predictions[ii])[idx]
order.append(str(ii) + "_" + str(idx))
compare = | pd.DataFrame(data=pd_dict) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on 2021/10/19
@author: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"""
import os
import pickle
from collections import namedtuple, Counter
from multiprocessing.pool import Pool
from typing import Tuple, Optional, Union, Dict
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
Dataset = namedtuple('Dataset', ('data', 'label', 'protect'))
# Generating random seed sequence
np.random.seed(1)
SEEDS = list(np.random.randint(1, 1000, 50000))
def get_seed() -> int: return SEEDS.pop(0)
def randomize(dataset: Dataset) -> Dataset:
""" Randomly shuffle the dataset.
:param dataset: Dataset Object.
:return: A Dataset Object after randomize.
"""
_dataset = np.hstack((dataset.data, dataset.label.reshape((-1, 1)), dataset.protect.reshape((-1, 1))))
np.random.seed(get_seed())
np.random.shuffle(_dataset)
return Dataset(_dataset[:, :-2], _dataset[:, -2].ravel(), _dataset[:, -1].ravel())
def mode(x: np.ndarray) -> np.ndarray:
""" Mode by line.
:param x: np.ndarray Object.
:return: A 1-d np.ndarray.
"""
_res = []
for idx in range(x.shape[0]):
count_dict = Counter(x[idx, :])
_label = 0 if count_dict[0] > count_dict[1] else 1
_res.append(_label)
return np.array(_res)
def data_utils(names_file_path: str, data_file_path: str, positive_label: Union[int, str], protect_feature_name: str,
split_rate: float, *, protect_feature_values: Optional[Tuple[str, ...]] = None,
protect_feature_condition: Optional[Tuple[str, Union[int, float]]] = None,
data_sep: str = ',', header: bool = False, use_filter: bool = True, use_only_numeric_label: bool = False,
filter_ignore_tup: Tuple[str, str] = ('continuous', 'numeric')) -> Dict[str, Dataset]:
""" Load and pre-treat the dataset with custom requirements.
Basic Attribute
:param names_file_path: The path of the file includes features name and set of alternative labels.
:param data_file_path: The path of the data file.
:param positive_label: Positive sample label which corresponding to 1 in numeric label.
:param protect_feature_name: The name of the protected feature.
:param split_rate: Train and test dataset split rate.
Optional Attribute
:param protect_feature_values: Protected sample tag value list.
:param protect_feature_condition: Protected sample condition. A tuple of condition + base value. eg: (>, 65)
:param data_sep: The separation character of the data if not comma.
:param header: Whether the data file includes header.
:param use_filter: Whether to use the value filter.
:param use_only_numeric_label: Use only numeric features and delete label features.
:param filter_ignore_tup: The ignore tuple for the filter.
:returns: Datasets include train, test and unlabeled in a dictionary.
"""
# Assert check
assert protect_feature_values is not None or protect_feature_condition is not None, \
'Either protect_attr_value or protect_attr_condition should be specified.'
if protect_feature_condition is not None:
assert protect_feature_condition[0] in ('<', '>', '<=', '>='), \
'Protect_attr_condition only supports operation of (<, >, <=, >=).'
# Pre-define the container variable
info_dict = {}
names_ls = []
# Load names file
with open(names_file_path) as f:
names = f.readlines()
# Extract names
for item in names:
item = item.rstrip().replace(' ', '')
name, labels = item.split(':')
names_ls.append(name)
info_dict[name] = labels.split(',')
if names_ls[-1] != 'label':
names_ls.append('label')
# Load data file
names_ls = None if header else names_ls
_data = pd.read_csv(data_file_path, sep=data_sep, names=names_ls, index_col=False)
# Data filter
if use_filter:
for name in _data.columns:
try:
if info_dict[name][0] not in filter_ignore_tup:
_data = _data[_data[name].isin(info_dict[name])]
except KeyError:
del _data[name]
# Get labels
_label = np.zeros((_data.shape[0], 1), np.uint8)
_label[_data['label'] == positive_label] = 1
_label = _label.ravel()
del _data['label']
# Classify the protection group
_protect = np.zeros((_data.shape[0], 1), np.uint8)
if protect_feature_values is not None:
_protect[_data[protect_feature_name].isin(protect_feature_values)] = 1
else:
try:
base_value = float(protect_feature_condition[1])
except ValueError:
raise ValueError('The second value of protect_attr_condition should be in type int or float.')
exec('_protect[_data[protect_feature_name]' + protect_feature_condition[0] + 'base_value] = 1')
del _data[protect_feature_name]
_protect = _protect.ravel()
# Encode data with OneHot
if use_only_numeric_label:
for name in _data.columns:
if info_dict[name][0] not in filter_ignore_tup:
del _data[name]
else:
_data = | pd.get_dummies(_data) | pandas.get_dummies |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
#import os
import numpy as np
import pandas as pd
from unittest import TestCase
from exatomic import gaussian
from exatomic.base import resource
from exatomic.gaussian import Output, Fchk
class TestFchk(TestCase):
def setUp(self):
self.mam1 = Fchk(resource('g09-ch3nh2-631g.fchk'))
self.mam2 = Fchk(resource('g09-ch3nh2-augccpvdz.fchk'))
self.mam3 = Fchk(resource('g16-methyloxirane-def2tzvp-freq.fchk'))
self.mam4 = Fchk(resource('g16-h2o2-def2tzvp-freq.fchk'))
self.nitro_nmr = Fchk(resource('g16-nitromalonamide-6-31++g-nmr.fchk'))
def test_parse_atom(self):
self.mam1.parse_atom()
self.assertEqual(self.mam1.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.atom))))
self.mam2.parse_atom()
self.assertEqual(self.mam2.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.atom))))
def test_parse_basis_set(self):
self.mam1.parse_basis_set()
self.assertEqual(self.mam1.basis_set.shape[0], 32)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.basis_set))))
self.mam2.parse_basis_set()
self.assertEqual(self.mam2.basis_set.shape[0], 53)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.basis_set))))
def test_parse_orbital(self):
self.mam1.parse_orbital()
self.assertEqual(self.mam1.orbital.shape[0], 28)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.orbital))))
self.mam2.parse_orbital()
self.assertEqual(self.mam2.orbital.shape[0], 91)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.orbital))))
def test_parse_momatrix(self):
self.mam1.parse_momatrix()
self.assertEqual(self.mam1.momatrix.shape[0], 784)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.momatrix))))
self.mam2.parse_momatrix()
self.assertEqual(self.mam2.momatrix.shape[0], 8281)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.momatrix))))
def test_parse_basis_set_order(self):
self.mam1.parse_basis_set_order()
self.assertEqual(self.mam1.basis_set_order.shape[0], 28)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.basis_set_order))))
self.mam2.parse_basis_set_order()
self.assertEqual(self.mam2.basis_set_order.shape[0], 91)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.basis_set_order))))
def test_parse_frame(self):
self.mam1.parse_frame()
self.assertEqual(self.mam1.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.frame))))
self.mam2.parse_frame()
self.assertEqual(self.mam2.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.frame))))
def test_parse_frequency(self):
self.mam3.parse_frequency()
self.assertEqual(self.mam3.frequency.shape[0], 240)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.frequency))))
self.mam4.parse_frequency()
self.assertEqual(self.mam4.frequency.shape[0], 24)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.frequency))))
def test_parse_frequency_ext(self):
self.mam3.parse_frequency_ext()
self.assertEqual(self.mam3.frequency_ext.shape[0], 24)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.frequency_ext))))
self.mam4.parse_frequency_ext()
self.assertEqual(self.mam4.frequency_ext.shape[0], 6)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.frequency_ext))))
def test_parse_gradient(self):
self.mam3.parse_gradient()
self.assertEqual(self.mam3.gradient.shape[0], 10)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.gradient))))
self.mam4.parse_gradient()
self.assertEqual(self.mam4.gradient.shape[0], 4)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.gradient))))
def test_shielding_tensor(self):
self.nitro_nmr.parse_nmr_shielding()
self.assertEqual(self.nitro_nmr.nmr_shielding.shape[0], 15)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.nitro_nmr.nmr_shielding))))
def test_to_universe(self):
"""Test the to_universe method."""
mam1 = self.mam1.to_universe(ignore=True)
mam2 = self.mam2.to_universe(ignore=True)
for uni in [mam1, mam2]:
# cannot add frequency and frequency_ext attributes as they require
# very specific inputs
for attr in ['atom', 'basis_set', 'basis_set_order',
'momatrix', 'orbital', 'frame']:
self.assertTrue(hasattr(uni, attr))
class TestOutput(TestCase):
"""
This test ensures that the parsing functionality works on
a smattering of output files that were generated with the
Gaussian software package. Target syntax is for Gaussian
09.
"""
def setUp(self):
# TODO : add some cartesian basis set files
self.uo2 = Output(resource('g09-uo2.out'))
self.mam3 = Output(resource('g09-ch3nh2-631g.out'))
self.mam4 = Output(resource('g09-ch3nh2-augccpvdz.out'))
# need two because of the current limitations in the parse_frequency code
self.meth_opt = Output(resource('g16-methyloxirane-def2tzvp-opt.out'))
self.meth_freq = Output(resource('g16-methyloxirane-def2tzvp-freq.out'))
self.nap_tddft = Output(resource('g16-naproxen-def2tzvp-tddft.out'))
self.h2o2_tddft = Output(resource('g16-h2o2-def2tzvp-tddft.out'))
self.nap_opt = Output(resource('g16-naproxen-def2tzvp-opt.out'))
self.nitro_nmr = Output(resource('g16-nitromalonamide-6-31++g-nmr.out'))
# to test having both a geometry optimization and frequencies calculation
self.meth_opt_freq_hp = Output(resource('g16-methyloxirane-def2tzvp-opt-freq.out'))
def test_parse_atom(self):
self.uo2.parse_atom()
self.assertEqual(self.uo2.atom.shape[0], 3)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2.atom))))
self.mam3.parse_atom()
self.assertEqual(self.mam3.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.atom))))
self.mam4.parse_atom()
self.assertEqual(self.mam4.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.atom))))
self.meth_opt.parse_atom()
self.assertEqual(self.meth_opt.atom.shape[0], 120)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.meth_opt.atom))))
self.nap_opt.parse_atom()
self.assertEqual(self.nap_opt.atom.shape[0], 806)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.nap_opt.atom))))
self.meth_opt_freq_hp.parse_atom()
self.assertEqual(self.meth_opt_freq_hp.atom.shape[0], 130)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.meth_opt_freq_hp.atom))))
def test_parse_basis_set(self):
self.uo2.parse_basis_set()
self.assertEqual(self.uo2.basis_set.shape[0], 49)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2.basis_set))))
self.mam3.parse_basis_set()
self.assertEqual(self.mam3.basis_set.shape[0], 32)
cols = list(set(self.mam3.basis_set._columns))
test = pd.DataFrame(self.mam3.basis_set[cols])
self.assertTrue(np.all(pd.notnull(test)))
self.mam4.parse_basis_set()
self.assertEqual(self.mam4.basis_set.shape[0], 53)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.basis_set))))
def test_parse_orbital(self):
self.uo2.parse_orbital()
self.assertEqual(self.uo2.orbital.shape[0], 141)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2.orbital))))
self.mam3.parse_orbital()
self.assertEqual(self.mam3.orbital.shape[0], 28)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.orbital))))
self.mam4.parse_orbital()
self.assertEqual(self.mam4.orbital.shape[0], 91)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.orbital))))
self.meth_opt.parse_orbital()
self.assertEqual(self.meth_opt.orbital.shape[0], 160)
self.assertTrue(np.all(pd.notnull( | pd.DataFrame(self.meth_opt.orbital) | pandas.DataFrame |
import os
import logging
import progressbar
import pandas as pd
from aikit.tools.helper_functions import load_pkl, save_pkl
from mano.data.utils import load_data_from_dirty_json_file, chunks
class DataManager:
JSON_FIELDS_TO_MAP = [
{
'keys': ['delivery_offers', 'min_fee', 'as_float'],
'key': 'delivery_offers_min_fee'
},
{
'keys': ['prices', 'per_item', 'unit'],
'key': 'prices_per_item_unit'
}
]
## Fields tested and removed because lots of nan or high correlations with other fields
JSON_FIELDS_TO_MAP_REMOVED = [
{
'keys': ['delivery_offers', 'min_time_fee', 'as_float'],
'key': 'delivery_offers_min_time_fee'
},
{
'keys': ['prices', 'main_price'],
'key': 'prices_main_price'
},
{
'keys': ['prices', 'secondary_price'],
'key': 'prices_secondary_price'
},
{
'keys': ['prices', 'per_item', 'actual_price', 'with_vat', 'as_float'],
'key': 'prices_per_item_actual_price_with_vat'
},
{
'keys': ['prices', 'per_item', 'actual_price', 'without_vat', 'as_float'],
'key': 'prices_per_item_actual_price_without_vat'
},
{
'keys': ['prices', 'per_item', 'retail_price', 'with_vat', 'as_float'],
'key': 'prices_per_item_retail_price_with_vat'
},
{
'keys': ['prices', 'per_item', 'retail_price', 'without_vat', 'as_float'],
'key': 'prices_per_item_retail_price_without_vat'
}
]
JSON_FIELDS_TO_DELETE = [
'detail_price', 'is_seller_b2b', 'is_mmf', 'has_3x_payment', 'market', 'model_markets', 'is_sample', 'has_sample',
'image_fullpath', 'brand_image_fullpath', 'url', 'default_title', 'legacy_unit',
'attribute_facet', 'top_attributes', 'catalog_attribute', 'reranking_positions', 'reranking_positions.alternate',
'categories.l3', 'categories.l3.id', 'categories.last_id', 'category_slug', 'banner.alternate',
'experiences', 'score', 'me_id', 'energy_efficiency',
'seller_id', 'brand_id', 'categories.l0.id', 'categories.l1.id', 'categories.l2.id', 'categories.last.id'
]
COLUMNS_TO_DELETE = [
'image_path', 'brand_image_path', 'thumbnails', 'catalog_attribute_facet', 'banner.categories',
'banner.default'
]
COLUMNS = [
'objectID', 'model_id', 'article_id', 'title',
'price', 'vat_rate', 'ecopart', 'discount', 'delivery_offers_min_fee', 'ranking_score_v1',
'seller_name', 'seller_country_id', 'brand_name', 'rating', 'rating_count',
'unit_type', 'unit_price', 'min_quantity', 'models_count',
'categories.l0', 'categories.l1', 'categories.l2', 'categories.last',
'n_categories.l0', 'n_categories.l1', 'n_categories.l2', 'n_thumbnails', 'n_attributes', 'n_topsales',
'has_brand_image','has_free_delivery', 'has_relay_delivery', 'has_1day_delivery', 'on_sale', 'indexable'
]
def __init__(self, path):
self.path = path
self.data_path = os.path.join(path, 'data')
self.processed_path = os.path.join(path, 'processed')
self.cache_file = os.path.join(path, 'cache.pkl')
self.files = sorted(os.listdir(self.data_path))
self.mapping = {}
if not os.path.exists(self.processed_path):
os.mkdir(self.processed_path)
def load(self):
""" Process and load the scraped data to a pandas dataframe """
if os.path.exists(self.cache_file):
return load_pkl(self.cache_file)
self._process_files_chunks()
results = self._concat_chunks()
save_pkl(results, self.cache_file)
return results
def _process_files_chunks(self):
""" Process files by chunks of ~1000 to allow fast recovery """
files_chunks = chunks(self.files, 1000)
for i, files in enumerate(files_chunks):
logging.info('---- Chunk {}'.format(i))
processed_file = os.path.join(self.processed_path, 'chunk_{}.pkl'.format(i))
if os.path.exists(processed_file):
continue
results = []
for file in progressbar.progressbar(files):
r = self._process_file(file)
if r is not None:
results.append(r)
results = pd.concat(results, sort=False)
# Drop duplicates objects
results = results.drop_duplicates('objectID')
save_pkl(results, processed_file)
def _concat_chunks(self):
""" Concatenate the processed chunks into one dataset"""
results = [load_pkl(os.path.join(self.processed_path, chunk)) for chunk in os.listdir(self.processed_path)]
results = pd.concat(results, sort=False)
results = results[self.COLUMNS]
results = results.drop_duplicates('objectID')
results = results.utils.to_categoricals()
return results
def _process_file(self, file):
""" Process a single file """
file_path = os.path.join(self.data_path, file)
with open(file_path, 'r', encoding='utf-8') as file_stream:
data = file_stream.read()
data = load_data_from_dirty_json_file(data)
if data is None:
return None
data = data[0]['hits']
data = self._preprocess_json(data)
data = | pd.json_normalize(data) | pandas.json_normalize |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import re
import numpy as np
from datetime import datetime
from dateutil import parser
import datefinder
#type0 - 'CARD TRANSACTION ALERT'
def key_extractor_0(data):
card = list(data['msg'])
card_subjs = list(data['subject'])
trans_amts = | pd.Series(card) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
orig = tm.makeTimeDataFrame()
df = orig.copy()
# don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.loc[100, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100, :] = df.ix[0]
self.assertRaises(ValueError, f)
# allow object conversion here
df = orig.copy()
df.loc['a', :] = df.ix[0]
exp = orig.append(pd.Series(df.ix[0], name='a'))
tm.assert_frame_equal(df, exp)
tm.assert_index_equal(df.index,
pd.Index(orig.index.tolist() + ['a']))
self.assertEqual(df.index.dtype, 'object')
def test_partial_set_empty_series(self):
# GH5226
# partially set with an empty object series
s = Series()
s.loc[1] = 1
tm.assert_series_equal(s, Series([1], index=[1]))
s.loc[3] = 3
tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
s = Series()
s.loc[1] = 1.
tm.assert_series_equal(s, Series([1.], index=[1]))
s.loc[3] = 3.
tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
s = Series()
s.loc['foo'] = 1
tm.assert_series_equal(s, Series([1], index=['foo']))
s.loc['bar'] = 3
tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
s.loc[3] = 4
tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
df = DataFrame()
def f():
df.loc[1] = 1
self.assertRaises(ValueError, f)
def f():
df.loc[1] = Series([1], index=['foo'])
self.assertRaises(ValueError, f)
def f():
df.loc[:, 1] = 1
self.assertRaises(ValueError, f)
# these work as they don't really change
# anything but the index
# GH5632
expected = DataFrame(columns=['foo'], index=pd.Index(
[], dtype='int64'))
def f():
df = DataFrame()
df['foo'] = Series([], dtype='object')
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = df.index
return df
tm.assert_frame_equal(f(), expected)
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
def f():
df = DataFrame()
df['foo'] = []
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(range(len(df)))
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
tm.assert_index_equal(df.index, pd.Index([], dtype='object'))
df['foo'] = range(len(df))
return df
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
tm.assert_frame_equal(f(), expected)
df = DataFrame()
tm.assert_index_equal(df.columns, pd.Index([], dtype=object))
df2 = DataFrame()
df2[1] = Series([1], index=['foo'])
df.loc[:, 1] = Series([1], index=['foo'])
tm.assert_frame_equal(df, DataFrame([[1]], index=['foo'], columns=[1]))
tm.assert_frame_equal(df, df2)
# no index to start
expected = DataFrame({0: Series(1, index=range(4))},
columns=['A', 'B', 0])
df = DataFrame(columns=['A', 'B'])
df[0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['A', 'B'])
df.loc[:, 0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_row(self):
# GH5720, GH5744
# don't create rows when empty
expected = DataFrame(columns=['A', 'B', 'New'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['New'] = expected['New'].astype('float64')
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
y['New'] = np.nan
tm.assert_frame_equal(y, expected)
# tm.assert_frame_equal(y,expected)
expected = DataFrame(columns=['a', 'b', 'c c', 'd'])
expected['d'] = expected['d'].astype('int64')
df = DataFrame(columns=['a', 'b', 'c c'])
df['d'] = 3
tm.assert_frame_equal(df, expected)
tm.assert_series_equal(df['c c'], Series(name='c c', dtype=object))
# reindex columns is ok
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
result = y.reindex(columns=['A', 'B', 'C'])
expected = DataFrame(columns=['A', 'B', 'C'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['C'] = expected['C'].astype('float64')
tm.assert_frame_equal(result, expected)
def test_partial_set_empty_frame_set_series(self):
# GH 5756
# setting with empty Series
df = DataFrame(Series())
tm.assert_frame_equal(df, DataFrame({0: Series()}))
df = DataFrame(Series(name='foo'))
tm.assert_frame_equal(df, DataFrame({'foo': Series()}))
def test_partial_set_empty_frame_empty_copy_assignment(self):
# GH 5932
# copy on empty with assignment fails
df = DataFrame(index=[0])
df = df.copy()
df['a'] = 0
expected = DataFrame(0, index=[0], columns=['a'])
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_empty_consistencies(self):
# GH 6171
# consistency on empty frames
df = DataFrame(columns=['x', 'y'])
df['x'] = [1, 2]
expected = DataFrame(dict(x=[1, 2], y=[np.nan, np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
df = DataFrame(columns=['x', 'y'])
df['x'] = ['1', '2']
expected = DataFrame(
dict(x=['1', '2'], y=[np.nan, np.nan]), dtype=object)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df.loc[0, 'x'] = 1
expected = DataFrame(dict(x=[1], y=[np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
df = tm.makeDataFrame()
df['A'] # cache series
df.ix["Hello Friend"] = df.ix[0]
self.assertIn("Hello Friend", df['A'].index)
self.assertIn("Hello Friend", df['B'].index)
panel = tm.makePanel()
panel.ix[0] # get first item into cache
panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1
self.assertIn("A+1", panel.ix[0].columns)
self.assertIn("A+1", panel.ix[1].columns)
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=['x', 'y', 'z'])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]['z'].iloc[0] = 1.
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 1)
# correct setting
df.loc[(0, 0), 'z'] = 2
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 2)
# 10264
df = DataFrame(np.zeros((5, 5), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e'], index=range(5))
df['f'] = 0
df.f.values[3] = 1
# TODO(wesm): unused?
# y = df.iloc[np.arange(2, len(df))]
df.f.values[3] = 2
expected = DataFrame(np.zeros((5, 6), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e', 'f'], index=range(5))
expected.at[3, 'f'] = 2
tm.assert_frame_equal(df, expected)
expected = Series([0, 0, 0, 2, 0], name='f')
tm.assert_series_equal(df.f, expected)
def test_set_ix_out_of_bounds_axis_0(self):
df = pd.DataFrame(
randn(2, 5), index=["row%s" % i for i in range(2)],
columns=["col%s" % i for i in range(5)])
self.assertRaises(ValueError, df.ix.__setitem__, (2, 0), 100)
def test_set_ix_out_of_bounds_axis_1(self):
df = pd.DataFrame(
randn(5, 2), index=["row%s" % i for i in range(5)],
columns=["col%s" % i for i in range(2)])
self.assertRaises(ValueError, df.ix.__setitem__, (0, 2), 100)
def test_iloc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_loc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.loc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_ix_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_index_type_coercion(self):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)),
Series(range(5), index=range(1, 6))]:
self.assertTrue(s.index.is_integer())
for indexer in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
indexer(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(indexer(s2)[0.1] == 0)
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
for s in [Series(range(5), index=np.arange(5.))]:
self.assertTrue(s.index.is_floating())
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
idxr(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(idxr(s2)[0.1] == 0)
s2 = s.copy()
idxr(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
idxr(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df['a'] = 10
tm.assert_frame_equal(DataFrame({0.0: df[0.0],
1.0: df[1.0],
'a': [10] * 10}),
df)
def test_duplicate_ix_returns_series(self):
df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
columns=list('abc'))
r = df.ix[0.2, 'a']
e = df.loc[0.2, 'a']
tm.assert_series_equal(r, e)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df.loc[df.index[:2]] = 1
expected = DataFrame({'a': [1, 1, 3], 'b': [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_float_index_at_iat(self):
s = pd.Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in s.iteritems():
self.assertEqual(s.at[el], item)
for i in range(len(s)):
self.assertEqual(s.iat[i], i + 1)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right):
# label, index, slice
r, i, s = list('bcd'), [1, 2, 3], slice(1, 4)
c, j, l = ['joe', 'jolie'], [1, 2], slice(1, 3)
left = df.copy()
left.loc[r, c] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.iloc[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[s, l] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[r, c] = rhs
tm.assert_frame_equal(left, right)
xs = np.arange(20).reshape(5, 4)
cols = ['jim', 'joe', 'jolie', 'joline']
df = pd.DataFrame(xs, columns=cols, index=list('abcde'))
# right hand side; permute the indices and multiplpy by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right = df.copy()
right.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right)
# make frames multi-type & re-run tests
for frame in [df, rhs, right]:
frame['joe'] = frame['joe'].astype('float64')
frame['jolie'] = frame['jolie'].map('@{0}'.format)
run_tests(df, rhs, right)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
if not idx.is_integer:
# For integer indices, ix and plain getitem are position-based.
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
for idx in [_mklbl('A', 20), np.arange(20) + 100,
np.linspace(100, 150, 20)]:
idx = Index(idx)
s = Series(np.arange(20), index=idx)
assert_slices_equivalent(SLC[idx[9]::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:idx[9]:-1], SLC[:8:-1])
assert_slices_equivalent(SLC[idx[13]:idx[9]:-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[idx[9]:idx[13]:-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
s = Series(np.arange(20), index=_mklbl('A', 20))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.ix[::0])
def test_indexing_assignment_dict_already_exists(self):
df = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]}).set_index('z')
expected = df.copy()
rhs = dict(x=9, y=99)
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
def test_indexing_dtypes_on_empty(self):
# Check that .iloc and .ix return correct dtypes GH9983
df = DataFrame({'a': [1, 2, 3], 'b': ['b', 'b2', 'b3']})
df2 = df.ix[[], :]
self.assertEqual(df2.loc[:, 'a'].dtype, np.int64)
tm.assert_series_equal(df2.loc[:, 'a'], df2.iloc[:, 0])
tm.assert_series_equal(df2.loc[:, 'a'], df2.ix[:, 0])
def test_range_in_series_indexing(self):
# range can cause an indexing error
# GH 11652
for x in [5, 999999, 1000000]:
s = pd.Series(index=range(x))
s.loc[range(1)] = 42
tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
def test_non_reducing_slice(self):
df = pd.DataFrame([[0, 1], [2, 3]])
slices = [
# pd.IndexSlice[:, :],
pd.IndexSlice[:, 1],
pd.IndexSlice[1, :],
pd.IndexSlice[[1], [1]],
pd.IndexSlice[1, [1]],
pd.IndexSlice[[1], 1],
pd.IndexSlice[1],
pd.IndexSlice[1, 1],
slice(None, None, None),
[0, 1],
np.array([0, 1]),
pd.Series([0, 1])
]
for slice_ in slices:
tslice_ = _non_reducing_slice(slice_)
self.assertTrue(isinstance(df.loc[tslice_], DataFrame))
def test_list_slice(self):
# like dataframe getitem
slices = [['A'], pd.Series(['A']), np.array(['A'])]
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}, index=['A', 'B'])
expected = pd.IndexSlice[:, ['A']]
for subset in slices:
result = _non_reducing_slice(subset)
tm.assert_frame_equal(df.loc[result], df.loc[expected])
def test_maybe_numeric_slice(self):
df = pd.DataFrame({'A': [1, 2], 'B': ['c', 'd'], 'C': [True, False]})
result = _maybe_numeric_slice(df, slice_=None)
expected = pd.IndexSlice[:, ['A']]
self.assertEqual(result, expected)
result = | _maybe_numeric_slice(df, None, include_bool=True) | pandas.core.indexing._maybe_numeric_slice |
import os
from pathlib import Path
import pandas as pd
def load_tables():
##### LOAD THE RAW DATA #####
df = pd.DataFrame()
parent = Path(__file__).parent.parent
DATA_DIR = os.path.join(parent, 'data')
WEEKDAYS = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
for f in os.listdir(DATA_DIR):
if f.endswith('.csv') and '20210201' not in f:
in_file = os.path.join(DATA_DIR, f)
df = df.append( | pd.read_csv(in_file, parse_dates=['Date Time (UTC)']) | pandas.read_csv |
# %% imports
from datetime import datetime
import numpy as np
import pandas as pd
import config as cfg
from src.utils.data_processing import download_file, medea_path, download_energy_balance, process_energy_balance
idx = pd.IndexSlice
eta_hydro_storage = 0.9
# ======================================================================================================================
# %% download and process opsd time series
url_opsd = 'https://data.open-power-system-data.org/time_series/latest/time_series_60min_singleindex.csv'
opsd_file = medea_path('data', 'raw', 'opsd_time_series_60min.csv')
download_file(url_opsd, opsd_file)
ts_opsd = pd.read_csv(opsd_file)
# create medea time series dataframe
ts_medea = ts_opsd[
['utc_timestamp', 'cet_cest_timestamp', 'AT_load_actual_entsoe_transparency', 'AT_solar_generation_actual',
'AT_wind_onshore_generation_actual', 'DE_load_actual_entsoe_transparency',
'DE_solar_generation_actual', 'DE_solar_capacity', 'DE_wind_onshore_generation_actual', 'DE_wind_onshore_capacity',
'DE_wind_offshore_generation_actual', 'DE_wind_offshore_capacity', 'DE_LU_price_day_ahead']]
del ts_opsd
ts_medea = ts_medea.copy()
ts_medea.set_index( | pd.DatetimeIndex(ts_medea['utc_timestamp']) | pandas.DatetimeIndex |
#!/usr/bin/env python
import pandas as pd
import os
from pathlib import Path
wd = 'c:\\Users\\dwagn\\Desktop\\'
pic_folder = wd + 'poke_pics'
os.chdir(wd)
poke_data = pd.read_csv('pokemon.csv').iloc[:,0:3]
poke_data['Name'] = poke_data['Name'].str.lower() \
.rename({'Number' : 'Pokemon_ID'}, axis=1)
pic_path = Path(pic_folder)
files = list(pic_path.glob('*.png'))
nms = [str(x).split('\\')[5].strip('0123456789')[:-4] for x in files] # name from path
pics_data = pd.DataFrame({'File' : files, 'Name' : nms})
full_data = | pd.merge(poke_data, pics_data, on='Name') | pandas.merge |
"""
keras data generator object for image data
"""
import pandas as pd
import numpy as np
import random
from tensorflow.keras.utils import Sequence
from tensorflow.keras.applications.xception import preprocess_input
from data_tools.utils import append_image_data_chunk
class SequentialGenerator(Sequence):
"""
this generator assumes the data has following format:
* each data chunk has two files, a label.csv and a dataset.parquet
* label columns = image_id, animal_id
"""
def __init__(self, base_path, chunk_nums, image_shape=(320, 320, 3), batch_size=1024):
self.image_shape = image_shape
self.batch_size = batch_size
# read each image_data chunk into memory,
# note each image was stored as a column in parquet file,
# here we flip the axis to each image is a row and index of each image is it's image_id
self._data = pd.DataFrame()
for each_num in chunk_nums:
self._data = append_image_data_chunk(self._data, f"{base_path}/dataset_{each_num}.parquet")
# load labels
# TODO decide whether to rely on image data and labels to have the same order
# so far i say yes, but it's probably a bad idea
self._labels = pd.concat([pd.read_csv(f"{base_path}/dataset_labels_{each_num}.csv", index_col=0)for each_num in chunk_nums])
def __len__(self):
if self._labels.shape[0] % self.batch_size != 0:
return self._labels.shape[0] // self.batch_size + 1
else:
return self._labels.shape[0] // self.batch_size
def __getitem__(self, index):
"""
:param index:
:return: image(batch_size, x, y, channel), label (batch_size, )
"""
x, y = self._get_block(index, self.batch_size)
return self._apply_preprocessing(x), y
def _apply_preprocessing(self,data):
return preprocess_input(data)
def _get_block(self, block_index, block_size):
if (block_index + 1) * block_size < self._labels.shape[0]:
block_image = self._data.values[block_index * block_size: (block_index + 1) * block_size, :]
block_label = self._labels["animal_id"].values[block_index * block_size: (block_index + 1) * block_size]
else:
block_image = self._data.values[block_index * block_size:, :]
block_label = self._labels["animal_id"].values[block_index * block_size:]
# reshape batch image before returning
return block_image.reshape((-1, *self.image_shape)), block_label
@staticmethod
def _append_image_data_chunk(image_frame: pd.DataFrame, file_to_append: str):
# this function load a parquet data chunk and 'add' it to image_frame
# note the data is flipped compared to the parquet file
to_add = pd.read_parquet(file_to_append, engine="pyarrow")
return pd.concat([image_frame, to_add.T], axis=0)
class RandomBlockGenerator(SequentialGenerator):
"""
Pretty much the same as SequentialGenerator,
instead of feeding all data row in order,
the orders are shuffled somewhat,
but each epoch will still only go through each row once,
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# the arrangement of each block is generated now
# this helps multiprocessing and ensuring each data row is used once per epoch
self.block_order = [i for i in range(len(self) * 2)]
random.shuffle(self.block_order)
def __getitem__(self, index):
# each batch is two randomly selected blocks
a_x, a_y = self._get_block(self.block_order[index], self.batch_size // 2)
b_x, b_y = self._get_block(self.block_order[index + len(self)], self.batch_size // 2)
# combine and preprocess batch
total_x = np.concatenate((a_x, b_x), axis=0)
del a_x, b_x
total_x = self._apply_preprocessing(total_x)
total_y = np.concatenate((a_y, b_y), axis=0)
return total_x, total_y
class PairedByLabelMixin:
# do i actually need these useless class attributes?
_data = None
_labels = None
batch_size = 0
def __len__(self):
return self._labels.shape[0] // (self.batch_size // 2)
def __getitem__(self, index):
"""
returns two embedding batches and a label on animal_id match
this method is expected to return roughly 50/50 same/different animal_id match
:param index:
:return: (left embeddings, right embeddings) , animal_id match
"""
# first let's get our anchor data, this will be the left half
block_size = self.batch_size // 2
if (index + 1) * block_size < self._labels.shape[0]:
anchor = self._labels[index * block_size: (index + 1) * block_size]
else:
anchor = self._labels[index * block_size:]
# right half - same id as anchor
same = self.random_but_same_id(anchor, key="animal_id")
# right half - randomly sampled from entire data set, likely to be different
different = self._labels.sample(n=block_size)
same_ness = np.equal(anchor["animal_id"].values, different["animal_id"].values).astype("float32").reshape((-1,1))
# get embedding data based on labels
left_data = self._data.loc[anchor["image_id"]]
right_same = self._data.loc[same["image_id"]]
right_diff = self._data.loc[different["image_id"]]
# get sameness label
batch_label = np.concatenate([
np.full((block_size,1), fill_value=1.0, dtype="float32"),
same_ness
], axis=0)
return (
np.concatenate([left_data.values, left_data.values], axis=0),
np.concatenate([right_same.values, right_diff.values])
), batch_label
@staticmethod
def random_but_same_id(label_frame: pd.DataFrame, key="animal_id") -> pd.DataFrame:
"""
returns a dataframe with same shape as input,
the order of each image is scrambled but the order of selected key column should be the same
"""
unique_keys = label_frame[key].unique()
outputs = []
for each_key in unique_keys:
outputs.append(label_frame[label_frame[key] == each_key].sample(frac=1))
return pd.concat(outputs, ignore_index=True, axis=0)
class PairedEmbeddingGenerator(PairedByLabelMixin, Sequence):
"""
this generator expects data with each row being embedding vector for each image, indexed by image_id
to select image by image_id
d.T.loc[l[:50]["image_id"]]
"""
def __init__(self, base_path, data_nums, batch_size=256, label_nums=(0,)):
self.batch_size = batch_size
# read each embedding_data chunk into memory,
# note each embedding was stored as a column in parquet file,
# here we flip the axis to each image is a row and index of each image is it's image_id
self._data = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
import datetime
import matplotlib.pyplot as plt
def loading_data():
print("\n")
print("loading data function")
path = "D:/Notas de Estudio/Proyectos/production_analysis__project/production_analysis/data/raw/"
file = []
for (dirpath, dirnames, filenames) in os.walk("D:/Notas de Estudio/Proyectos/production_analysis__project/production_analysis/data/raw"):
file.extend(filenames)
print("Loading file...")
print(file[1])
path = path + file[1]
data = pd.read_excel(path, sheet_name="Prod. EXTR",header=0)
print("cleaning data...")
print("deleting unnecessary columns")
data = data [['FECHA', 'TURNO', 'GRUPO', 'MÁQ.', 'CODIGO', 'PRODUCCION (PZAS)','TOTAL KG', 'SCRAP']]
print("Column FECHA")
data = data[data["FECHA"].notna()]
data = data[data["FECHA"] != "Total"]
data['FECHA'] = pd.to_datetime(data['FECHA'], format= '%Y-%m-%d')
data['FECHA'].dt.day
data['DIA'] = data.loc[:,'FECHA'].dt.day
data['NOMBRE_DIA']=data.loc[:,'FECHA'].dt.day_name()
print('Column TURNO')
data['TURNO'] = data['TURNO'].astype('category')
print('Column GRUPO')
data['GRUPO'] = data['GRUPO'].astype('category')
print('Column CODIGO')
data.loc[:,'CODIGO'] = data['CODIGO'].fillna(0)
print('Column PRODUCCION (PZAS)')
data['PRODUCCION (PZAS)'] = data['PRODUCCION (PZAS)'].fillna(0)
data['PRODUCCION (PZAS)'] = data['PRODUCCION (PZAS)'].astype('int64')
print('Column TOTAL KG')
df = data['TOTAL KG'].fillna(0)
data.loc[:,'TOTAL KG'] = df
data.loc[:,'TOTAL KG'] = data['TOTAL KG'].astype('float')
print('Column SCRAP')
data['SCRAP'] = | pd.to_numeric(data['SCRAP'], errors='coerce') | pandas.to_numeric |
# -*- coding:utf-8 -*-
"""
股票技术指标接口
Created on 2018/07/26
@author: Wangzili
@group : **
@contact: <EMAIL>
所有指标中参数df为通过get_k_data获取的股票数据
"""
import pandas as pd
import numpy as np
import itertools
def ma(df, n=10):
"""
移动平均线 Moving Average
MA(N)=(第1日收盘价+第2日收盘价—+……+第N日收盘价)/N
"""
pv = pd.DataFrame()
pv['date'] = df['date']
pv['v'] = df.close.rolling(n).mean()
return pv
def _ma(series, n):
"""
移动平均
"""
return series.rolling(n).mean()
def md(df, n=10):
"""
移动标准差
STD=S(CLOSE,N)=[∑(CLOSE-MA(CLOSE,N))^2/N]^0.5
"""
_md = pd.DataFrame()
_md['date'] = df.date
_md["md"] = df.close.rolling(n).std(ddof=0)
return _md
def _md(series, n):
"""
标准差MD
"""
return series.rolling(n).std(ddof=0) # 有时候会用ddof=1
def ema(df, n=12):
"""
指数平均数指标 Exponential Moving Average
今日EMA(N)=2/(N+1)×今日收盘价+(N-1)/(N+1)×昨日EMA(N)
EMA(X,N)=[2×X+(N-1)×EMA(ref(X),N]/(N+1)
"""
_ema = pd.DataFrame()
_ema['date'] = df['date']
_ema['ema'] = df.close.ewm(ignore_na=False, span=n, min_periods=0, adjust=False).mean()
return _ema
def _ema(series, n):
"""
指数平均数
"""
return series.ewm(ignore_na=False, span=n, min_periods=0, adjust=False).mean()
def macd(df, n=12, m=26, k=9):
"""
平滑异同移动平均线(Moving Average Convergence Divergence)
今日EMA(N)=2/(N+1)×今日收盘价+(N-1)/(N+1)×昨日EMA(N)
DIFF= EMA(N1)- EMA(N2)
DEA(DIF,M)= 2/(M+1)×DIF +[1-2/(M+1)]×DEA(REF(DIF,1),M)
MACD(BAR)=2×(DIF-DEA)
return:
osc: MACD bar / OSC 差值柱形图 DIFF - DEM
diff: 差离值
dea: 讯号线
"""
_macd = pd.DataFrame()
_macd['date'] = df['date']
_macd['diff'] = _ema(df.close, n) - _ema(df.close, m)
_macd['dea'] = _ema(_macd['diff'], k)
_macd['macd'] = _macd['diff'] - _macd['dea']
return _macd
def kdj(df, n=9):
"""
随机指标KDJ
N日RSV=(第N日收盘价-N日内最低价)/(N日内最高价-N日内最低价)×100%
当日K值=2/3前1日K值+1/3×当日RSV=SMA(RSV,M1)
当日D值=2/3前1日D值+1/3×当日K= SMA(K,M2)
当日J值=3 ×当日K值-2×当日D值
"""
_kdj = pd.DataFrame()
_kdj['date'] = df['date']
rsv = (df.close - df.low.rolling(n).min()) / (df.high.rolling(n).max() - df.low.rolling(n).min()) * 100
_kdj['k'] = sma(rsv, 3)
_kdj['d'] = sma(_kdj.k, 3)
_kdj['j'] = 3 * _kdj.k - 2 * _kdj.d
return _kdj
def rsi(df, n=6):
"""
相对强弱指标(Relative Strength Index,简称RSI
LC= REF(CLOSE,1)
RSI=SMA(MAX(CLOSE-LC,0),N,1)/SMA(ABS(CLOSE-LC),N1,1)×100
SMA(C,N,M)=M/N×今日收盘价+(N-M)/N×昨日SMA(N)
"""
# pd.set_option('display.max_rows', 1000)
_rsi = pd.DataFrame()
_rsi['date'] = df['date']
px = df.close - df.close.shift(1)
px[px < 0] = 0
_rsi['rsi'] = sma(px, n) / sma((df['close'] - df['close'].shift(1)).abs(), n) * 100
# def tmax(x):
# if x < 0:
# x = 0
# return x
# _rsi['rsi'] = sma((df['close'] - df['close'].shift(1)).apply(tmax), n) / sma((df['close'] - df['close'].shift(1)).abs(), n) * 100
return _rsi
def vrsi(df, n=6):
"""
量相对强弱指标
VRSI=SMA(最大值(成交量-REF(成交量,1),0),N,1)/SMA(ABS((成交量-REF(成交量,1),N,1)×100%
"""
_vrsi = pd.DataFrame()
_vrsi['date'] = df['date']
px = df['volume'] - df['volume'].shift(1)
px[px < 0] = 0
_vrsi['vrsi'] = sma(px, n) / sma((df['volume'] - df['volume'].shift(1)).abs(), n) * 100
return _vrsi
def boll(df, n=26, k=2):
"""
布林线指标BOLL boll(26,2) MID=MA(N)
标准差MD=根号[∑(CLOSE-MA(CLOSE,N))^2/N]
UPPER=MID+k×MD
LOWER=MID-k×MD
"""
_boll = pd.DataFrame()
_boll['date'] = df.date
_boll['mid'] = _ma(df.close, n)
_mdd = _md(df.close, n)
_boll['up'] = _boll.mid + k * _mdd
_boll['low'] = _boll.mid - k * _mdd
return _boll
def bbiboll(df, n=10, k=3):
"""
BBI多空布林线 bbiboll(10,3)
BBI={MA(3)+ MA(6)+ MA(12)+ MA(24)}/4
标准差MD=根号[∑(BBI-MA(BBI,N))^2/N]
UPR= BBI+k×MD
DWN= BBI-k×MD
"""
# pd.set_option('display.max_rows', 1000)
_bbiboll = pd.DataFrame()
_bbiboll['date'] = df.date
_bbiboll['bbi'] = (_ma(df.close, 3) + _ma(df.close, 6) + _ma(df.close, 12) + _ma(df.close, 24)) / 4
_bbiboll['md'] = _md(_bbiboll.bbi, n)
_bbiboll['upr'] = _bbiboll.bbi + k * _bbiboll.md
_bbiboll['dwn'] = _bbiboll.bbi - k * _bbiboll.md
return _bbiboll
def wr(df, n=14):
"""
威廉指标 w&r
WR=[最高值(最高价,N)-收盘价]/[最高值(最高价,N)-最低值(最低价,N)]×100%
"""
_wr = pd.DataFrame()
_wr['date'] = df['date']
higest = df.high.rolling(n).max()
_wr['wr'] = (higest - df.close) / (higest - df.low.rolling(n).min()) * 100
return _wr
def bias(df, n=12):
"""
乖离率 bias
bias=[(当日收盘价-12日平均价)/12日平均价]×100%
"""
_bias = pd.DataFrame()
_bias['date'] = df.date
_mav = df.close.rolling(n).mean()
_bias['bias'] = (np.true_divide((df.close - _mav), _mav)) * 100
# _bias["bias"] = np.vectorize(lambda x: round(Decimal(x), 4))(BIAS)
return _bias
def asi(df, n=5):
"""
振动升降指标(累计震动升降因子) ASI # 同花顺给出的公式不完整就不贴出来了
"""
_asi = pd.DataFrame()
_asi['date'] = df.date
_m = pd.DataFrame()
_m['a'] = (df.high - df.close.shift()).abs()
_m['b'] = (df.low - df.close.shift()).abs()
_m['c'] = (df.high - df.low.shift()).abs()
_m['d'] = (df.close.shift() - df.open.shift()).abs()
_m['r'] = _m.apply(lambda x: x.a + 0.5 * x.b + 0.25 * x.d if max(x.a, x.b, x.c) == x.a else (
x.b + 0.5 * x.a + 0.25 * x.d if max(x.a, x.b, x.c) == x.b else x.c + 0.25 * x.d
), axis=1)
_m['x'] = df.close - df.close.shift() + 0.5 * (df.close - df.open) + df.close.shift() - df.open.shift()
_m['k'] = np.maximum(_m.a, _m.b)
_asi['si'] = 16 * (_m.x / _m.r) * _m.k
_asi["asi"] = _ma(_asi.si, n)
return _asi
def vr_rate(df, n=26):
"""
成交量变异率 vr or vr_rate
VR=(AVS+1/2CVS)/(BVS+1/2CVS)×100
其中:
AVS:表示N日内股价上涨成交量之和
BVS:表示N日内股价下跌成交量之和
CVS:表示N日内股价不涨不跌成交量之和
"""
_vr = pd.DataFrame()
_vr['date'] = df['date']
_m = pd.DataFrame()
_m['volume'] = df.volume
_m['cs'] = df.close - df.close.shift(1)
_m['avs'] = _m.apply(lambda x: x.volume if x.cs > 0 else 0, axis=1)
_m['bvs'] = _m.apply(lambda x: x.volume if x.cs < 0 else 0, axis=1)
_m['cvs'] = _m.apply(lambda x: x.volume if x.cs == 0 else 0, axis=1)
_vr["vr"] = (_m.avs.rolling(n).sum() + 1 / 2 * _m.cvs.rolling(n).sum()
) / (_m.bvs.rolling(n).sum() + 1 / 2 * _m.cvs.rolling(n).sum()) * 100
return _vr
def vr(df, n=5):
"""
开市后平均每分钟的成交量与过去5个交易日平均每分钟成交量之比
量比:=V/REF(MA(V,5),1);
涨幅:=(C-REF(C,1))/REF(C,1)*100;
1)量比大于1.8,涨幅小于2%,现价涨幅在0—2%之间,在盘中选股的
选股:量比>1.8 AND 涨幅>0 AND 涨幅<2;
"""
_vr = pd.DataFrame()
_vr['date'] = df.date
_vr['vr'] = df.volume / _ma(df.volume, n).shift(1)
_vr['rr'] = (df.close - df.close.shift(1)) / df.close.shift(1) * 100
return _vr
def arbr(df, n=26):
"""
人气意愿指标 arbr(26)
N日AR=N日内(H-O)之和除以N日内(O-L)之和
其中,H为当日最高价,L为当日最低价,O为当日开盘价,N为设定的时间参数,一般原始参数日设定为26日
N日BR=N日内(H-CY)之和除以N日内(CY-L)之和
其中,H为当日最高价,L为当日最低价,CY为前一交易日的收盘价,N为设定的时间参数,一般原始参数日设定为26日。
"""
_arbr = pd.DataFrame()
_arbr['date'] = df.date
_arbr['ar'] = (df.high - df.open).rolling(n).sum() / (df.open - df.low).rolling(n).sum() * 100
_arbr['br'] = (df.high - df.close.shift(1)).rolling(n).sum() / (df.close.shift() - df.low).rolling(n).sum() * 100
return _arbr
def dpo(df, n=20, m=6):
"""
区间震荡线指标 dpo(20,6)
DPO=CLOSE-MA(CLOSE, N/2+1)
MADPO=MA(DPO,M)
"""
_dpo = pd.DataFrame()
_dpo['date'] = df['date']
_dpo['dpo'] = df.close - _ma(df.close, int(n / 2 + 1))
_dpo['dopma'] = _ma(_dpo.dpo, m)
return _dpo
def trix(df, n=12, m=20):
"""
三重指数平滑平均 TRIX(12)
TR= EMA(EMA(EMA(CLOSE,N),N),N),即进行三次平滑处理
TRIX=(TR-昨日TR)/ 昨日TR×100
TRMA=MA(TRIX,M)
"""
_trix = pd.DataFrame()
_trix['date'] = df.date
tr = _ema(_ema(_ema(df.close, n), n), n)
_trix['trix'] = (tr - tr.shift()) / tr.shift() * 100
_trix['trma'] = _ma(_trix.trix, m)
return _trix
def bbi(df):
"""
多空指数 BBI(3,6,12,24)
BBI=(3日均价+6日均价+12日均价+24日均价)/4
"""
_bbi = pd.DataFrame()
_bbi['date'] = df['date']
_bbi['bbi'] = (_ma(df.close, 3) + _ma(df.close, 6) + _ma(df.close, 12) + _ma(df.close, 24)) / 4
return _bbi
def mtm(df, n=6, m=5):
"""
动力指标 MTM(6,5)
MTM(N日)=C-REF(C,N)式中,C=当日的收盘价,REF(C,N)=N日前的收盘价;N日是只计算交易日期,剔除掉节假日。
MTMMA(MTM,N1)= MA(MTM,N1)
N表示间隔天数,N1表示天数
"""
_mtm = pd.DataFrame()
_mtm['date'] = df.date
_mtm['mtm'] = df.close - df.close.shift(n)
_mtm['mtmma'] = _ma(_mtm.mtm, m)
return _mtm
def obv(df):
"""
能量潮 On Balance Volume
多空比率净额= [(收盘价-最低价)-(最高价-收盘价)] ÷( 最高价-最低价)×V # 同花顺貌似用的下面公式
主公式:当日OBV=前一日OBV+今日成交量
1.基期OBV值为0,即该股上市的第一天,OBV值为0
2.若当日收盘价>上日收盘价,则当日OBV=前一日OBV+今日成交量
3.若当日收盘价<上日收盘价,则当日OBV=前一日OBV-今日成交量
4.若当日收盘价=上日收盘价,则当日OBV=前一日OBV
"""
_obv = pd.DataFrame()
_obv["date"] = df['date']
# tmp = np.true_divide(((df.close - df.low) - (df.high - df.close)), (df.high - df.low))
# _obv['obvv'] = tmp * df.volume
# _obv["obv"] = _obv.obvv.expanding(1).sum() / 100
_m = pd.DataFrame()
_m['date'] = df.date
_m['cs'] = df.close - df.close.shift()
_m['v'] = df.volume
_m['vv'] = _m.apply(lambda x: x.v if x.cs > 0 else (-x.v if x.cs < 0 else 0), axis=1)
_obv['obv'] = _m.vv.expanding(1).sum()
return _obv
def cci(df, n=14):
"""
顺势指标
TYP:=(HIGH+LOW+CLOSE)/3
CCI:=(TYP-MA(TYP,N))/(0.015×AVEDEV(TYP,N))
"""
_cci = pd.DataFrame()
_cci["date"] = df['date']
typ = (df.high + df.low + df.close) / 3
_cci['cci'] = ((typ - typ.rolling(n).mean()) /
(0.015 * typ.rolling(min_periods=1, center=False, window=n).apply(
lambda x: np.fabs(x - x.mean()).mean())))
return _cci
def priceosc(df, n=12, m=26):
"""
价格振动指数
PRICEOSC=(MA(C,12)-MA(C,26))/MA(C,12) * 100
"""
_c = pd.DataFrame()
_c['date'] = df['date']
man = _ma(df.close, n)
_c['osc'] = (man - _ma(df.close, m)) / man * 100
return _c
def sma(a, n, m=1):
"""
平滑移动指标 Smooth Moving Average
"""
''' # 方法一,此方法有缺陷
_sma = []
for index, value in enumerate(a):
if index == 0 or pd.isna(value) or np.isnan(value):
tsma = 0
else:
# Y=(M*X+(N-M)*Y')/N
tsma = (m * value + (n - m) * tsma) / n
_sma.append(tsma)
return pd.Series(_sma)
'''
''' # 方法二
results = np.nan_to_num(a).copy()
# FIXME this is very slow
for i in range(1, len(a)):
results[i] = (m * results[i] + (n - m) * results[i - 1]) / n
# results[i] = ((n - 1) * results[i - 1] + results[i]) / n
# return results
'''
# b = np.nan_to_num(a).copy()
# return ((n - m) * a.shift(1) + m * a) / n
a = a.fillna(0)
b = a.ewm(min_periods=0, ignore_na=False, adjust=False, alpha=m/n).mean()
return b
def dbcd(df, n=5, m=16, t=76):
"""
异同离差乖离率 dbcd(5,16,76)
BIAS=(C-MA(C,N))/MA(C,N)
DIF=(BIAS-REF(BIAS,M))
DBCD=SMA(DIF,T,1) =(1-1/T)×SMA(REF(DIF,1),T,1)+ 1/T×DIF
MM=MA(DBCD,5)
"""
_dbcd = pd.DataFrame()
_dbcd['date'] = df.date
man = _ma(df.close, n)
_bias = (df.close - man) / man
_dif = _bias - _bias.shift(m)
_dbcd['dbcd'] = sma(_dif, t)
_dbcd['mm'] = _ma(_dbcd.dbcd, n)
return _dbcd
def roc(df, n=12, m=6):
"""
变动速率 roc(12,6)
ROC=(今日收盘价-N日前的收盘价)/ N日前的收盘价×100%
ROCMA=MA(ROC,M)
ROC:(CLOSE-REF(CLOSE,N))/REF(CLOSE,N)×100
ROCMA:MA(ROC,M)
"""
_roc = pd.DataFrame()
_roc['date'] = df['date']
_roc['roc'] = (df.close - df.close.shift(n))/df.close.shift(n) * 100
_roc['rocma'] = _ma(_roc.roc, m)
return _roc
def vroc(df, n=12):
"""
量变动速率
VROC=(当日成交量-N日前的成交量)/ N日前的成交量×100%
"""
_vroc = pd.DataFrame()
_vroc['date'] = df['date']
_vroc['vroc'] = (df.volume - df.volume.shift(n)) / df.volume.shift(n) * 100
return _vroc
def cr(df, n=26):
""" 能量指标
CR=∑(H-PM)/∑(PM-L)×100
PM:上一交易日中价((最高、最低、收盘价的均值)
H:当天最高价
L:当天最低价
"""
_cr = pd.DataFrame()
_cr['date'] = df.date
# pm = ((df['high'] + df['low'] + df['close']) / 3).shift(1)
pm = (df[['high', 'low', 'close']]).mean(axis=1).shift(1)
_cr['cr'] = (df.high - pm).rolling(n).sum()/(pm - df.low).rolling(n).sum() * 100
return _cr
def psy(df, n=12):
"""
心理指标 PSY(12)
PSY=N日内上涨天数/N×100
PSY:COUNT(CLOSE>REF(CLOSE,1),N)/N×100
MAPSY=PSY的M日简单移动平均
"""
_psy = pd.DataFrame()
_psy['date'] = df.date
p = df.close - df.close.shift()
p[p <= 0] = np.nan
_psy['psy'] = p.rolling(n).count() / n * 100
return _psy
def wad(df, n=30):
"""
威廉聚散指标 WAD(30)
TRL=昨日收盘价与今日最低价中价格最低者;TRH=昨日收盘价与今日最高价中价格最高者
如果今日的收盘价>昨日的收盘价,则今日的A/D=今日的收盘价-今日的TRL
如果今日的收盘价<昨日的收盘价,则今日的A/D=今日的收盘价-今日的TRH
如果今日的收盘价=昨日的收盘价,则今日的A/D=0
WAD=今日的A/D+昨日的WAD;MAWAD=WAD的M日简单移动平均
"""
def dmd(x):
if x.c > 0:
y = x.close - x.trl
elif x.c < 0:
y = x.close - x.trh
else:
y = 0
return y
_wad = pd.DataFrame()
_wad['date'] = df['date']
_ad = pd.DataFrame()
_ad['trl'] = np.minimum(df.low, df.close.shift(1))
_ad['trh'] = np.maximum(df.high, df.close.shift(1))
_ad['c'] = df.close - df.close.shift()
_ad['close'] = df.close
_ad['ad'] = _ad.apply(dmd, axis=1)
_wad['wad'] = _ad.ad.expanding(1).sum()
_wad['mawad'] = _ma(_wad.wad, n)
return _wad
def mfi(df, n=14):
"""
资金流向指标 mfi(14)
MF=TYP×成交量;TYP:当日中价((最高、最低、收盘价的均值)
如果当日TYP>昨日TYP,则将当日的MF值视为当日PMF值。而当日NMF值=0
如果当日TYP<=昨日TYP,则将当日的MF值视为当日NMF值。而当日PMF值=0
MR=∑PMF/∑NMF
MFI=100-(100÷(1+MR))
"""
_mfi = pd.DataFrame()
_mfi['date'] = df.date
_m = pd.DataFrame()
_m['typ'] = df[['high', 'low', 'close']].mean(axis=1)
_m['mf'] = _m.typ * df.volume
_m['typ_shift'] = _m.typ - _m.typ.shift(1)
_m['pmf'] = _m.apply(lambda x: x.mf if x.typ_shift > 0 else 0, axis=1)
_m['nmf'] = _m.apply(lambda x: x.mf if x.typ_shift <= 0 else 0, axis=1)
# _mfi['mfi'] = 100 - (100 / (1 + _m.pmf.rolling(n).sum() / _m.nmf.rolling(n).sum()))
_m['mr'] = _m.pmf.rolling(n).sum() / _m.nmf.rolling(n).sum()
_mfi['mfi'] = 100 * _m.mr / (1 + _m.mr) # 同花顺自己给出的公式和实际用的公式不一样,真操蛋,浪费两个小时时间
return _mfi
def pvt(df):
"""
pvt 量价趋势指标 pvt
如果设x=(今日收盘价—昨日收盘价)/昨日收盘价×当日成交量,
那么当日PVT指标值则为从第一个交易日起每日X值的累加。
"""
_pvt = pd.DataFrame()
_pvt['date'] = df.date
x = (df.close - df.close.shift(1)) / df.close.shift(1) * df.volume
_pvt['pvt'] = x.expanding(1).sum()
return _pvt
def wvad(df, n=24, m=6):
""" # 算法是对的,同花顺计算wvad用的n=6
威廉变异离散量 wvad(24,6)
WVAD=N1日的∑ {(当日收盘价-当日开盘价)/(当日最高价-当日最低价)×成交量}
MAWVAD=MA(WVAD,N2)
"""
_wvad = pd.DataFrame()
_wvad['date'] = df.date
# _wvad['wvad'] = (np.true_divide((df.close - df.open), (df.high - df.low)) * df.volume).rolling(n).sum()
_wvad['wvad'] = (np.true_divide((df.close - df.open), (df.high - df.low)) * df.volume).rolling(n).sum()
_wvad['mawvad'] = _ma(_wvad.wvad, m)
return _wvad
def cdp(df):
"""
逆势操作 cdp
CDP=(最高价+最低价+收盘价)/3 # 同花顺实际用的(H+L+2*c)/4
AH=CDP+(前日最高价-前日最低价)
NH=CDP×2-最低价
NL=CDP×2-最高价
AL=CDP-(前日最高价-前日最低价)
"""
_cdp = pd.DataFrame()
_cdp['date'] = df.date
# _cdp['cdp'] = (df.high + df.low + df.close * 2).shift(1) / 4
_cdp['cdp'] = df[['high', 'low', 'close', 'close']].shift().mean(axis=1)
_cdp['ah'] = _cdp.cdp + (df.high.shift(1) - df.low.shift())
_cdp['al'] = _cdp.cdp - (df.high.shift(1) - df.low.shift())
_cdp['nh'] = _cdp.cdp * 2 - df.low.shift(1)
_cdp['nl'] = _cdp.cdp * 2 - df.high.shift(1)
return _cdp
def env(df, n=14):
"""
ENV指标 ENV(14)
Upper=MA(CLOSE,N)×1.06
LOWER= MA(CLOSE,N)×0.94
"""
_env = pd.DataFrame()
_env['date'] = df.date
_env['up'] = df.close.rolling(n).mean() * 1.06
_env['low'] = df.close.rolling(n).mean() * 0.94
return _env
def mike(df, n=12):
"""
麦克指标 mike(12)
初始价(TYP)=(当日最高价+当日最低价+当日收盘价)/3
HV=N日内区间最高价
LV=N日内区间最低价
初级压力线(WR)=TYP×2-LV
中级压力线(MR)=TYP+HV-LV
强力压力线(SR)=2×HV-LV
初级支撑线(WS)=TYP×2-HV
中级支撑线(MS)=TYP-HV+LV
强力支撑线(SS)=2×LV-HV
"""
_mike = pd.DataFrame()
_mike['date'] = df.date
typ = df[['high', 'low', 'close']].mean(axis=1)
hv = df.high.rolling(n).max()
lv = df.low.rolling(n).min()
_mike['wr'] = typ * 2 - lv
_mike['mr'] = typ + hv - lv
_mike['sr'] = 2 * hv - lv
_mike['ws'] = typ * 2 - hv
_mike['ms'] = typ - hv + lv
_mike['ss'] = 2 * lv - hv
return _mike
def vma(df, n=5):
"""
量简单移动平均 VMA(5) VMA=MA(volume,N)
VOLUME表示成交量;N表示天数
"""
_vma = pd.DataFrame()
_vma['date'] = df.date
_vma['vma'] = _ma(df.volume, n)
return _vma
def vmacd(df, qn=12, sn=26, m=9):
"""
量指数平滑异同平均 vmacd(12,26,9)
今日EMA(N)=2/(N+1)×今日成交量+(N-1)/(N+1)×昨日EMA(N)
DIFF= EMA(N1)- EMA(N2)
DEA(DIF,M)= 2/(M+1)×DIF +[1-2/(M+1)]×DEA(REF(DIF,1),M)
MACD(BAR)=2×(DIF-DEA)
"""
_vmacd = pd.DataFrame()
_vmacd['date'] = df.date
_vmacd['diff'] = _ema(df.volume, qn) - _ema(df.volume, sn)
_vmacd['dea'] = _ema(_vmacd['diff'], m) # TODO: 不能用_vmacd.diff, 不知道为什么
_vmacd['macd'] = (_vmacd['diff'] - _vmacd['dea'])
return _vmacd
def vosc(df, n=12, m=26):
"""
成交量震荡 vosc(12,26)
VOSC=(MA(VOLUME,SHORT)- MA(VOLUME,LONG))/MA(VOLUME,SHORT)×100
"""
_c = pd.DataFrame()
_c['date'] = df['date']
_c['osc'] = (_ma(df.volume, n) - _ma(df.volume, m)) / _ma(df.volume, n) * 100
return _c
def tapi(df, n=6):
""" # TODO: 由于get_k_data返回数据中没有amount,可以用get_h_data中amount,算法是正确的
加权指数成交值 tapi(6)
TAPI=每日成交总值/当日加权指数=a/PI;A表示每日的成交金额,PI表示当天的股价指数即指收盘价
"""
_tapi = pd.DataFrame()
# _tapi['date'] = df.date
_tapi['tapi'] = df.amount / df.close
_tapi['matapi'] = _ma(_tapi.tapi, n)
return _tapi
def vstd(df, n=10):
"""
成交量标准差 vstd(10)
VSTD=STD(Volume,N)=[∑(Volume-MA(Volume,N))^2/N]^0.5
"""
_vstd = pd.DataFrame()
_vstd['date'] = df.date
_vstd['vstd'] = df.volume.rolling(n).std(ddof=1)
return _vstd
def adtm(df, n=23, m=8):
"""
动态买卖气指标 adtm(23,8)
如果开盘价≤昨日开盘价,DTM=0
如果开盘价>昨日开盘价,DTM=(最高价-开盘价)和(开盘价-昨日开盘价)的较大值
如果开盘价≥昨日开盘价,DBM=0
如果开盘价<昨日开盘价,DBM=(开盘价-最低价)
STM=DTM在N日内的和
SBM=DBM在N日内的和
如果STM > SBM,ADTM=(STM-SBM)/STM
如果STM < SBM , ADTM = (STM-SBM)/SBM
如果STM = SBM,ADTM=0
ADTMMA=MA(ADTM,M)
"""
_adtm = pd.DataFrame()
_adtm['date'] = df.date
_m = pd.DataFrame()
_m['cc'] = df.open - df.open.shift(1)
_m['ho'] = df.high - df.open
_m['ol'] = df.open - df.low
_m['dtm'] = _m.apply(lambda x: max(x.ho, x.cc) if x.cc > 0 else 0, axis=1)
_m['dbm'] = _m.apply(lambda x: x.ol if x.cc < 0 else 0, axis=1)
_m['stm'] = _m.dtm.rolling(n).sum()
_m['sbm'] = _m.dbm.rolling(n).sum()
_m['ss'] = _m.stm - _m.sbm
_adtm['adtm'] = _m.apply(lambda x: x.ss / x.stm if x.ss > 0 else (x.ss / x.sbm if x.ss < 0 else 0), axis=1)
_adtm['adtmma'] = _ma(_adtm.adtm, m)
return _adtm
def mi(df, n=12):
"""
动量指标 mi(12)
A=CLOSE-REF(CLOSE,N)
MI=SMA(A,N,1)
"""
_mi = pd.DataFrame()
_mi['date'] = df.date
_mi['mi'] = sma(df.close - df.close.shift(n), n)
return _mi
def micd(df, n=3, m=10, k=20):
"""
异同离差动力指数 micd(3,10,20)
MI=CLOSE-ref(CLOSE,1)AMI=SMA(MI,N1,1)
DIF=MA(ref(AMI,1),N2)-MA(ref(AMI,1),N3)
MICD=SMA(DIF,10,1)
"""
_micd = pd.DataFrame()
_micd['date'] = df.date
mi = df.close - df.close.shift(1)
ami = sma(mi, n)
dif = _ma(ami.shift(1), m) - _ma(ami.shift(1), k)
_micd['micd'] = sma(dif, m)
return _micd
def rc(df, n=50):
"""
变化率指数 rc(50)
RC=收盘价/REF(收盘价,N)×100
ARC=EMA(REF(RC,1),N,1)
"""
_rc = pd.DataFrame()
_rc['date'] = df.date
_rc['rc'] = df.close / df.close.shift(n) * 100
_rc['arc'] = sma(_rc.rc.shift(1), n)
return _rc
def rccd(df, n=59, m=21, k=28):
""" # TODO: 计算结果错误和同花顺不同,检查不出来为什么
异同离差变化率指数 rate of change convergence divergence rccd(59,21,28)
RC=收盘价/REF(收盘价,N)×100%
ARC=EMA(REF(RC,1),N,1)
DIF=MA(ref(ARC,1),N1)-MA MA(ref(ARC,1),N2)
RCCD=SMA(DIF,N,1)
"""
_rccd = pd.DataFrame()
_rccd['date'] = df.date
rc = df.close / df.close.shift(n) * 100
arc = sma(rc.shift(), n)
dif = _ma(arc.shift(), m) - _ma(arc.shift(), k)
_rccd['rccd'] = sma(dif, n)
return _rccd
def srmi(df, n=9):
"""
SRMIMI修正指标 srmi(9)
如果收盘价>N日前的收盘价,SRMI就等于(收盘价-N日前的收盘价)/收盘价
如果收盘价<N日前的收盘价,SRMI就等于(收盘价-N日签的收盘价)/N日前的收盘价
如果收盘价=N日前的收盘价,SRMI就等于0
"""
_srmi = pd.DataFrame()
_srmi['date'] = df.date
_m = pd.DataFrame()
_m['close'] = df.close
_m['cp'] = df.close.shift(n)
_m['cs'] = df.close - df.close.shift(n)
_srmi['srmi'] = _m.apply(lambda x: x.cs/x.close if x.cs > 0 else (x.cs/x.cp if x.cs < 0 else 0), axis=1)
return _srmi
def dptb(df, n=7):
"""
大盘同步指标 dptb(7)
DPTB=(统计N天中个股收盘价>开盘价,且指数收盘价>开盘价的天数或者个股收盘价<开盘价,且指数收盘价<开盘价)/N
"""
ind = ts.get_k_data("sh000001", start=df.date.iloc[0], end=df.date.iloc[-1])
sd = df.copy()
sd.set_index('date', inplace=True) # 可能出现停盘等情况,所以将date设为index
ind.set_index('date', inplace=True)
_dptb = pd.DataFrame(index=df.date)
q = ind.close - ind.open
_dptb['p'] = sd.close - sd.open
_dptb['q'] = q
_dptb['m'] = _dptb.apply(lambda x: 1 if (x.p > 0 and x.q > 0) or (x.p < 0 and x.q < 0) else np.nan, axis=1)
_dptb['jdrs'] = _dptb.m.rolling(n).count() / n
_dptb.drop(columns=['p', 'q', 'm'], inplace=True)
_dptb.reset_index(inplace=True)
return _dptb
def jdqs(df, n=20):
"""
阶段强势指标 jdqs(20)
JDQS=(统计N天中个股收盘价>开盘价,且指数收盘价<开盘价的天数)/(统计N天中指数收盘价<开盘价的天数)
"""
ind = ts.get_k_data("sh000001", start=df.date.iloc[0], end=df.date.iloc[-1])
sd = df.copy()
sd.set_index('date', inplace=True) # 可能出现停盘等情况,所以将date设为index
ind.set_index('date', inplace=True)
_jdrs = pd.DataFrame(index=df.date)
q = ind.close - ind.open
_jdrs['p'] = sd.close - sd.open
_jdrs['q'] = q
_jdrs['m'] = _jdrs.apply(lambda x: 1 if (x.p > 0 and x.q < 0) else np.nan, axis=1)
q[q > 0] = np.nan
_jdrs['t'] = q
_jdrs['jdrs'] = _jdrs.m.rolling(n).count() / _jdrs.t.rolling(n).count()
_jdrs.drop(columns=['p', 'q', 'm', 't'], inplace=True)
_jdrs.reset_index(inplace=True)
return _jdrs
def jdrs(df, n=20):
"""
阶段弱势指标 jdrs(20)
JDRS=(统计N天中个股收盘价<开盘价,且指数收盘价>开盘价的天数)/(统计N天中指数收盘价>开盘价的天数)
"""
ind = ts.get_k_data("sh000001", start=df.date.iloc[0], end=df.date.iloc[-1])
sd = df.copy()
sd.set_index('date', inplace=True)
ind.set_index('date', inplace=True)
_jdrs = pd.DataFrame(index=df.date)
q = ind.close - ind.open
_jdrs['p'] = sd.close - sd.open
_jdrs['q'] = q
_jdrs['m'] = _jdrs.apply(lambda x: 1 if (x.p < 0 and x.q > 0) else np.nan, axis=1)
q[q < 0] = np.nan
_jdrs['t'] = q
_jdrs['jdrs'] = _jdrs.m.rolling(n).count() / _jdrs.t.rolling(n).count()
_jdrs.drop(columns=['p', 'q', 'm', 't'], inplace=True)
_jdrs.reset_index(inplace=True)
return _jdrs
def zdzb(df, n=125, m=5, k=20):
"""
筑底指标 zdzb(125,5,20)
A=(统计N1日内收盘价>=前收盘价的天数)/(统计N1日内收盘价<前收盘价的天数)
B=MA(A,N2)
D=MA(A,N3)
"""
_zdzb = pd.DataFrame()
_zdzb['date'] = df.date
p = df.close - df.close.shift(1)
q = p.copy()
p[p < 0] = np.nan
q[q >= 0] = np.nan
_zdzb['a'] = p.rolling(n).count() / q.rolling(n).count()
_zdzb['b'] = _zdzb.a.rolling(m).mean()
_zdzb['d'] = _zdzb.a.rolling(k).mean()
return _zdzb
def atr(df, n=14):
"""
真实波幅 atr(14)
TR:MAX(MAX((HIGH-LOW),ABS(REF(CLOSE,1)-HIGH)),ABS(REF(CLOSE,1)-LOW))
ATR:MA(TR,N)
"""
_atr = pd.DataFrame()
_atr['date'] = df.date
# _atr['tr'] = np.maximum(df.high - df.low, (df.close.shift(1) - df.low).abs())
# _atr['tr'] = np.maximum.reduce([df.high - df.low, (df.close.shift(1) - df.high).abs(), (df.close.shift(1) - df.low).abs()])
_atr['tr'] = np.vstack([df.high - df.low, (df.close.shift(1) - df.high).abs(), (df.close.shift(1) - df.low).abs()]).max(axis=0)
_atr['atr'] = _atr.tr.rolling(n).mean()
return _atr
def mass(df, n=9, m=25):
"""
梅丝线 mass(9,25)
AHL=MA((H-L),N1)
BHL= MA(AHL,N1)
MASS=SUM(AHL/BHL,N2)
H:表示最高价;L:表示最低价
"""
_mass = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import pandas as pd
import plotly.graph_objects as go
def file_df_to_count_df(df,
ID_SUSCEPTIBLE=1,
ID_INFECTED=0,
ID_RECOVERED=2):
pedestrian_ids = df['pedestrianId'].unique()
sim_times = df['simTime'].unique()
group_counts = pd.DataFrame(columns=['simTime', 'group-s', 'group-i', 'group-r'])
group_counts['simTime'] = sim_times
group_counts['group-s'] = 0
group_counts['group-i'] = 0
group_counts['group-r'] = 0
for pid in pedestrian_ids:
simtime_group = df[df['pedestrianId'] == pid][['simTime', 'groupId-PID5']].values
current_state = ID_SUSCEPTIBLE
group_counts.loc[group_counts['simTime'] >= 0, 'group-s'] += 1
for (st, g) in simtime_group:
if g != current_state and g == ID_INFECTED and current_state == ID_SUSCEPTIBLE:
current_state = g
group_counts.loc[group_counts['simTime'] > st, 'group-s'] -= 1
group_counts.loc[group_counts['simTime'] > st, 'group-i'] += 1
if g != current_state and g == ID_RECOVERED and current_state == ID_INFECTED:
group_counts.loc[group_counts['simTime'] > st, 'group-i'] -= 1
group_counts.loc[group_counts['simTime'] > st, 'group-r'] += 1
break
return group_counts
def create_folder_data_scatter(folder):
"""
Create scatter plot from folder data.
:param folder:
:return:
"""
file_path = os.path.join(folder, "SIRinformation.csv")
if not os.path.exists(file_path):
return None
data = | pd.read_csv(file_path, delimiter=" ") | pandas.read_csv |
import tweepy
import pandas
import config
import matplotlib.pyplot as plt
from textblob import TextBlob
# Initialization
tweetDataframe = pandas.DataFrame()
sentimentDataframe = | pandas.DataFrame() | pandas.DataFrame |
import fileinput
import glob
import os
from Bio import SeqIO
import filelock
import numpy as np
import pandas as pd
from utils.data_utils import is_valid_seq, seqs_to_onehot
def merge_dfs(in_rgx, out_path, index_cols, groupby_cols, ignore_cols):
"""
Merge multiple pandas DataFrames into one and provides a summary file.
Args:
- in_rgx: regex for input filepath
- out_path: output path
- index_cols: index column names for DataFrame
- groupby_cols: groupby column names in the summary step
- ignore_cols: columns to be ignored in the summary step
"""
lock = filelock.FileLock(out_path + '.lock')
with lock:
frames = []
for f in glob.glob(in_rgx):
try:
frames.append(pd.read_csv(f))
os.remove(f)
except pd.errors.EmptyDataError:
continue
df = pd.concat(frames, axis=0, sort=True).sort_values(index_cols)
df.set_index(index_cols).to_csv(out_path, float_format='%.4f')
#df = df.drop(columns=ignore_cols)
#means = df.groupby(groupby_cols).mean()
#stds = df.groupby(groupby_cols).std()
#summary = pd.merge(means, stds, on=groupby_cols, suffixes=('_mean', '_std'))
#summary = summary.sort_index(axis=1)
#save_path = out_path.replace(".csv", "_summary.csv")
#summary.to_csv(save_path, float_format='%.4f')
#return summary
def parse_var(s):
"""
Parse a key, value pair, separated by '='
That's the reverse of ShellArgs.
On the command line (argparse) a declaration will typically look like:
foo=hello
or
foo="hello world"
"""
items = s.split('=')
key = items[0].strip() # we remove blanks around keys, as is logical
if len(items) > 1:
# rejoin the rest:
value = '='.join(items[1:])
return (key, value)
def parse_vars(items):
"""
Parse a series of key-value pairs and return a dictionary
"""
d = {}
if items:
for item in items:
key, value = parse_var(item)
try:
d[key] = float(value)
except:
d[key] = value
return d
def load_data_split(dataset_name, split_id, seed=0, ignore_gaps=False):
data_path = os.path.join('data', dataset_name, 'data.csv')
# Sample shuffles the DataFrame.
data_pre_split = pd.read_csv(data_path).sample(frac=1.0, random_state=seed)
if not ignore_gaps:
is_valid = data_pre_split['seq'].apply(is_valid_seq)
data_pre_split = data_pre_split[is_valid]
if split_id == -1:
return data_pre_split
return np.array_split(data_pre_split, 3)[split_id]
def get_wt_log_fitness(dataset_name):
data_path = os.path.join('data', dataset_name, 'data.csv')
data = pd.read_csv(data_path)
try:
return data[data.n_mut == 0].log_fitness.mean()
except:
return data.log_fitness.mean()
def get_log_fitness_cutoff(dataset_name):
data_path = os.path.join('data', dataset_name, 'log_fitness_cutoff.npy')
return np.loadtxt(data_path).item()
def count_rows(filename_glob_pattern):
cnt = 0
for f in sorted(glob.glob(filename_glob_pattern)):
with open(f) as fp:
for line in fp:
cnt += 1
return cnt
def load_rows_by_numbers(filename_glob_pattern, line_numbers):
lns_sorted = sorted(line_numbers)
lns_idx = np.argsort(line_numbers)
n_rows = len(line_numbers)
current_ln = 0 # current (accumulated) line number in opened file
j = 0 # index in lns
rows = None
for f in sorted(glob.glob(filename_glob_pattern)):
with open(f) as fp:
for line in fp:
while j < n_rows and lns_sorted[j] == current_ln:
thisrow = np.array([float(x) for x in line.split(' ')])
if rows is None:
rows = np.full((n_rows, len(thisrow)), np.nan)
rows[lns_idx[j], :] = thisrow
j += 1
current_ln += 1
assert j == n_rows, (f"Expected {n_rows} rows, found {j}. "
f"Scanned {current_ln} lines from {filename_glob_pattern}.")
return rows
def load(filename_glob_pattern):
files = sorted(glob.glob(filename_glob_pattern))
if len(files) == 0:
print("No files found for", filename_glob_pattern)
return np.loadtxt(fileinput.input(files))
def save(filename_pattern, data, entries_per_file=2000):
n_files = int(data.shape[0] / entries_per_file)
if data.shape[0] % entries_per_file > 0:
n_files += 1
for i in range(n_files):
filename = filename_pattern + f'-{i:03d}-of-{n_files:03d}'
l_idx = i * entries_per_file
r_idx = min(l_idx + entries_per_file, data.shape[0])
np.savetxt(filename, data[l_idx:r_idx])
def load_and_filter_seqs(data_filename):
"""
seqs_filename: file to write out filtered sequences
"""
df = | pd.read_csv(data_filename) | pandas.read_csv |
import pandas as pd
import dateutil
from lusidtools.lpt import lse
from lusidtools.lpt import stdargs
from lusidtools.lpt import qry_instr_ids, lpt
from .either import Either
import re
import urllib.parse
rexp = re.compile(r".*page=([^=']{10,}).*")
TOOLNAME = "instr_list"
TOOLTIP = "List all instruments"
def parse(extend=None, args=None):
return (
stdargs.Parser("Query Instruments", ["filename", "limit", "properties"])
.add("--batch", type=int, default=2000)
.add("--filter")
.extend(extend)
.parse(args)
)
def process_args(api, args):
def list_instruments(ids):
id_columns = {"identifiers.KEY:{}".format(v): v for v in ids["Id"].values}
def fetch_page(page_token):
return api.call.list_instruments(
limit=args.batch,
page=page_token,
instrument_property_keys=args.properties,
filter=args.filter,
)
results = []
def got_page(result):
columns = ["lusid_instrument_id", "name"]
columns.extend(sorted(id_columns.keys()))
columns.extend(f"P:{c}" for c in args.properties)
df = lpt.to_df(result, columns).dropna(axis=1, how="all")
df.rename(columns=id_columns, inplace=True)
results.append(df)
links = [l for l in result.content.links if l.relation == "NextPage"]
if len(links) > 0:
match = rexp.match(links[0].href)
if match:
print("{} {}".format(len(results), match.group(1)))
return urllib.parse.unquote(match.group(1))
return None
page = Either(None)
while True:
page = fetch_page(page.right).bind(got_page)
if page.is_left():
return page
if page.right == None:
break
return lpt.trim_df(
| pd.concat(results, ignore_index=True, sort=False) | pandas.concat |
"""Cancer data classification
Classifying the Wisconsin cancer data from UCI repository
into benign and malignant classes with k Nearest Neighbors
"""
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
# File : cancer_knn.py
# find whether cancer is malignant or benign using kNN
import time
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics, model_selection, neighbors, preprocessing
warnings.filterwarnings('ignore')
# get the initial time
t_init = time.time()
# url for the Wisconsin Breast Cancer data from UCI
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'
# set the names of the columns as pulled from the file accompanying the dataset
# which is breast-cancer-wisconsin.names
names = [
"SampleCodeNumber", "ClumpThickness", "UniformityCellSize",
"UniformityCellShape", "MarginalAdhesion", "SingleEpithelialCellSize",
"BareNuclei", "BlandChromatin", "NormalNucleoli", "Mitoses", "Class"
]
print('[INFO] gathering the {} data'.format(url.split('/')[-2]))
df = pd.read_csv(url, names=names)
print('[INFO] shape of the cancer data {}'.format(df.shape))
print('[INFO] information about the cancer database \n{}'.format(df.info()))
print('[INFO] report of the data at a fine grained level \n{}'.format(
df.describe()))
# As per the documentation note of the cancer dataset, there are some
# missing attribute values. There are 16 instances in Groups 1 to 6 that
# contain a single missing (i.e., unavailable) attribute value, now
# denoted by "?".
missing_counts = df.apply(lambda x: x == '?', axis=1).sum()
null_counts = df.apply(lambda x: x.isnull().values.ravel().sum())
isnull_predicate = df.isnull().values.any()
print('[INFO] report of the missing attribute information \n{}'.format(
missing_counts))
print('[INFO] BareNuclei attribute information details \n{}'.format(
df.groupby('BareNuclei').BareNuclei.count()))
print('[INFO] does the dataset has any null values ? {}'.format(
isnull_predicate))
print(
'[INFO] null attribute value information if any \n{}'.format(null_counts))
# As per the above result, BareNuclei has 16 values equal to "?" for which
# we may either discard the rows with missing values or replace them with
# the most common or frequent values in the dataset given by
# df[df.BareNuclei != ‘?’]
# most frequent value of BareNuclei from the table
frequent_value = df['BareNuclei'].value_counts().index[0]
print('[INFO] replacing the ? with most frequent value of {}'.format(
frequent_value))
df['BareNuclei'] = df['BareNuclei'].replace('?', np.NaN)
df['BareNuclei'] = df['BareNuclei'].fillna(frequent_value)
df['BareNuclei'] = df['BareNuclei'].apply(lambda x: int(x))
# Heatmap of the correlation matrix calculated from pandas with index of
# the nlargest = 10
# nlargest represents the n largest values sorted in decreasing order.
plt.figure(1)
fields = df.corr().nlargest(10, 'Class')['Class'].index
corr = df[fields].corr()
sns.heatmap(corr, annot=True, fmt=".2f", linewidths=0.4)
plt.title('Heatmap of Cancer Data Correlation Matrix')
plt.show()
# distribute the dataset between training data and target/labels as under
X = df.drop(['SampleCodeNumber', 'Class'], axis=1)
y = df['Class']
# here we are representing class label 2 as 'benign' and 4 as 'malignant'
df.Class.replace([2, 4], ['benign', 'malignant'], inplace=True)
print('[INFO] target class labels for cancer {}'.format(np.unique(y)))
print('[INFO] count of benign and malignant classes \n{}'.format(
df.Class.value_counts()))
plt.figure(2)
sns.countplot(df['Class'],
label='Count',
palette=sns.color_palette("deep", 10))
plt.show()
# as per the accompanying documentation, the class labels 2 and 4 correspond
# to cancer states, Benign and Malignant as under
# class label = 2 -> Benign
# class label = 4 -> Malignant
# we can encode the labels with scikit learn LabelEncoder though it's
# not needed in this case as it's usually applied in the cases where the
# target labels are all strings
le = preprocessing.LabelEncoder()
labels = le.fit_transform(df['Class'])
print('[INFO] scikit encoded labels {}'.format(np.unique(labels)))
# get a box plot of all the parameters
plt.figure(3)
df.drop('Class',
axis=1).plot(kind='box',
subplots=True,
layout=(4, 3),
sharex=False,
sharey=False,
figsize=(9, 9),
title='Box Plot of individual cancer input variables')
plt.show()
# Feature Scaling - Standardization
# As a part of optimization of the algorithm, we can apply feature
# scaling, by standardizing features using StandardScaler class from
# sklearn's preprocessing module. Scaling will ensure that the features
# will have a 0 mean and standard deviation of 1. This helps in all the
# features contributing equally.
scaler = preprocessing.StandardScaler()
print('[INFO] re-scaling the features with options {}'.format(
scaler.get_params()))
X_std_array = scaler.fit_transform(X.values)
X_std = | pd.DataFrame(X_std_array, index=X.index, columns=X.columns) | pandas.DataFrame |
"""Python module for manipulating datasets."""
from __future__ import absolute_import
import random
import os
import os.path
import logging
import multiprocessing
from functools import partial
import pandas as pd
import numpy as np
from sklearn import cross_validation, preprocessing
from sklearn.decomposition import PCA
from . import fileutils
def first(iterable):
"""Returns the first element of an iterable"""
for element in iterable:
return element
class SegmentCrossValidator:
"""Wrapper for the scikit_learn CV generators to generate folds on a segment basis."""
def __init__(self, dataframe, base_cv=None, **cv_kwargs):
# We create a copy of the dataframe with a new last level
# index which is an enumeration of the rows (like proper indices)
self.all_segments = pd.DataFrame({'Preictal': dataframe['Preictal'], 'i': np.arange(len(dataframe))})
self.all_segments.set_index('i', append=True, inplace=True)
# Now create a series with only the segments as rows. This is what we will pass into the wrapped cross
# validation generator
self.segments = self.all_segments['Preictal'].groupby(level='segment').first()
self.segments.sort(inplace=True)
if base_cv is None:
self.cv = cross_validation.StratifiedKFold(self.segments, **cv_kwargs)
else:
self.cv = base_cv(self.segments, **cv_kwargs)
def __iter__(self):
"""
Return a generator object which returns a pair of indices for every iteration.
"""
for training_indices, test_indices in self.cv:
# The indices returned from self.cv are relative to the segment name data series, we pick out the segment
# names they belong to
training_segments = list(self.segments[training_indices].index)
test_segments = list(self.segments[test_indices].index)
# Now that we have the segment names, we pick out the rows in the properly indexed dataframe
all_training_indices = self.all_segments.loc[training_segments]
all_test_indices = self.all_segments.loc[test_segments]
# Now pick out the values for only the 'i' level index of the rows which matched the segment names
original_df_training_indices = all_training_indices.index.get_level_values('i')
original_df_test_indices = all_test_indices.index.get_level_values('i')
yield original_df_training_indices, original_df_test_indices
def __len__(self):
return len(self.cv)
def mean(*dataframes):
"""Returns the means of the given dataframe(s), calculated without
concatenating the frame"""
lengths = sum([len(dataframe) for dataframe in dataframes])
sums = dataframes[0].sum()
for dataframe in dataframes[1:]:
sums += dataframe.sum()
means = sums / lengths
return means
def transform(transformation, interictal, preictal, test):
"""
Performs a transformation on the supplied *interictal*, *preictal* and *test* Pandas dataframes.
:param transformation: An object that implements the fit_transform function, which applies a transformation
to a numpy array.
:param interictal: Pandas Dataframe containing the interictal samples
:param preictal: Pandas Dataframe containing the preictal samples
:param test: Pandas Dataframe containing the test samples
:return: A List containing the three input Dataframes, with the *transformation* applied to them
"""
if not hasattr(transformation, 'fit_transform'):
logging.warning(
"Transformation {} has not fit_transform function, no transformation applied".format(transformation))
return [interictal, preictal, test]
interictal = interictal.drop('Preictal', axis=1)
preictal = preictal.drop('Preictal', axis=1)
# Keep structure info as we will need to rebuild the dataframes
interictal_index = interictal.index
interictal_columns = interictal.columns
preictal_index = preictal.index
preictal_columns = preictal.columns
test_index = test.index
test_columns = test.columns
inter_samples = interictal.shape[0]
# Concatenate the training data as we will use those for
# fitting the scaler
# This can be quite memory intensive, especially if the
# dataframes are big
training_frame = pd.concat([interictal, preictal], axis=0)
# Perform the transformation
transformed_train = transformation.fit_transform(training_frame)
transformed_test = transformation.transform(test)
# Rebuild the dataframes
interictal_tr_array = transformed_train[:inter_samples]
preictal_tr_array = transformed_train[inter_samples:]
# A transformation like PCA changes the columns so we have to
# rebuild them
# TODO: Is there any point in keeping the columns?
# Wouldn't None work either way?
if len(interictal_columns) != interictal_tr_array.shape[1]:
interictal_columns = None
preictal_columns = None
test_columns = None
new_interictal = pd.DataFrame(
interictal_tr_array, index=interictal_index,
columns=interictal_columns)
new_preictal = pd.DataFrame(
preictal_tr_array, index=preictal_index,
columns=preictal_columns)
new_test = pd.DataFrame(
transformed_test, index=test_index,
columns=test_columns)
new_interictal['Preictal'] = 0
new_preictal['Preictal'] = 1
return [new_interictal, new_preictal, new_test]
def pca_transform(dataframes):
"""Performs PCA transformation on the *dataframes*"""
interictal, preictal, test = dataframes
# Perform the PCA
pca = PCA()
return transform(pca, interictal, preictal, test)
def scale(dataframes):
"""
Returns standardized (mean 0, standard deviation 1) versions of the given data frames.
:param dataframes: A sequence containing the 3 interictal, preictal and test dataframes.
Note that they should be unpacked in this exact order.
:return: A list of the standardized dataframes
"""
interictal, preictal, test = dataframes
# Perform the scaling
scaler = preprocessing.StandardScaler()
return transform(scaler, interictal, preictal, test)
def split_experiment_data(interictal,
preictal,
training_ratio,
do_segment_split=True,
random_state=None):
"""
Splits the interictal and preictal training data into a training and test set.
:param interictal: A data frame containing the interictal samples.
:param preictal: A data frame containing the preictal samples.
:param training_ratio: A value in the range (0,1) which indicates the ratio of samples to use for training.
:param do_segment_split: If True, will split the dataframes based on segment names.
:param random_state: A set seed to ensure reproducibility of the experiments.
:return: A partition of the concatenated interictal and preictal data frames into
two seperate and disjunct data frames, such that the first partition
contains a ratio of *training_ratio* of all the data.
"""
dataset = merge_interictal_preictal(interictal, preictal)
return split_dataset(dataset,
training_ratio=training_ratio,
do_segment_split=do_segment_split,
random_state=random_state)
def merge_interictal_preictal(interictal, preictal):
"""
Merges the *interictal* and *preictal* data frames to a single data frame. Also sorts the multilevel index.
:param interictal: A data frame containing the interictal samples.
:param preictal: A data frame containing the preictal samples.
:return: A data frame containing both interictal and preictal data. The multilevel index of the data frame
is sorted.
"""
logging.info("Merging interictal and preictal datasets")
try:
preictal.sortlevel('segment', inplace=True)
if isinstance(preictal.columns, pd.MultiIndex):
preictal.sortlevel(axis=1, inplace=True)
interictal.sortlevel('segment', inplace=True)
if isinstance(interictal.columns, pd.MultiIndex):
interictal.sortlevel(axis=1, inplace=True)
except TypeError:
logging.warning("TypeError when trying to merge interictal and preictal sets.")
dataset = | pd.concat((interictal, preictal)) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>
UNESCO-IHE 2016
Contact: <EMAIL>
Repository: https://github.com/wateraccounting/wa
Module: Sheets/sheet1
"""
import os
import pandas as pd
import time
import xml.etree.ElementTree as ET
import subprocess
def create_sheet3(basin, period, units, data, output, template=False):
"""
Keyword arguments:
basin -- The name of the basin
period -- The period of analysis
units -- A list with the units of the data:
[<water consumption>, <land productivity>, <water productivity>]
data -- A csv file that contains the water data. The csv file has to
follow an specific format. A sample csv is available in the link:
https://github.com/wateraccounting/wa/tree/master/Sheets/csv
output -- A list (length 2) with the output paths of the jpg files
for the two parts of the sheet
template -- A list (length 2) of the svg files of the sheet.
Use False (default) to use the standard svg files.
Example:
from wa.Sheets import *
create_sheet3(basin='Helmand', period='2007-2011',
units=['km3/yr', 'kg/ha/yr', 'kg/m3'],
data=[r'C:\Sheets\csv\Sample_sheet3_part1.csv',
r'C:\Sheets\csv\Sample_sheet3_part2.csv'],
output=[r'C:\Sheets\sheet_3_part1.jpg',
r'C:\Sheets\sheet_3_part2.jpg'])
"""
# Read table
df1 = pd.read_csv(data[0], sep=';')
df2 = pd.read_csv(data[1], sep=';')
# Data frames
df1c = df1.loc[df1.USE == "CROP"]
df1n = df1.loc[df1.USE == "NON-CROP"]
df2c = df2.loc[df2.USE == "CROP"]
df2n = df2.loc[df2.USE == "NON-CROP"]
# Read csv file part 1
crop_r01c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c01 = crop_r02c01 + crop_r03c01
crop_r01c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c02 = crop_r02c02 + crop_r03c02
crop_r01c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c03 = crop_r02c03 + crop_r03c03
crop_r01c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c04 = crop_r02c04 + crop_r03c04
crop_r01c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c05 = crop_r02c05 + crop_r03c05
crop_r01c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c06 = crop_r02c06 + crop_r03c06
crop_r01c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c07 = crop_r02c07 + crop_r03c07
crop_r01c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c08 = crop_r02c08 + crop_r03c08
crop_r01c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c09 = crop_r02c09 + crop_r03c09
crop_r01c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c10 = crop_r02c10 + crop_r03c10
crop_r01c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c11 = crop_r02c11 + crop_r03c11
crop_r01c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c12 = crop_r02c12 + crop_r03c12
noncrop_r01c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c01 = noncrop_r02c01 + noncrop_r03c01
noncrop_r01c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c02 = noncrop_r02c02 + noncrop_r03c02
crop_r01 = pd.np.nansum([crop_r01c01, crop_r01c02, crop_r01c03,
crop_r01c04, crop_r01c05, crop_r01c06,
crop_r01c07, crop_r01c08, crop_r01c09,
crop_r01c10, crop_r01c11, crop_r01c12])
crop_r02 = pd.np.nansum([crop_r02c01, crop_r02c02, crop_r02c03,
crop_r02c04, crop_r02c05, crop_r02c06,
crop_r02c07, crop_r02c08, crop_r02c09,
crop_r02c10, crop_r02c11, crop_r02c12])
crop_r03 = pd.np.nansum([crop_r03c01, crop_r03c02, crop_r03c03,
crop_r03c04, crop_r03c05, crop_r03c06,
crop_r03c07, crop_r03c08, crop_r03c09,
crop_r03c10, crop_r03c11, crop_r03c12])
crop_r04 = crop_r02 + crop_r03
noncrop_r01 = pd.np.nansum([noncrop_r01c01, noncrop_r01c02])
noncrop_r02 = pd.np.nansum([noncrop_r02c01, noncrop_r02c02])
noncrop_r03 = pd.np.nansum([noncrop_r03c01, noncrop_r03c02])
noncrop_r04 = noncrop_r02 + noncrop_r03
ag_water_cons = crop_r01 + crop_r04 + noncrop_r01 + noncrop_r04
# Read csv file part 2
# Land productivity
lp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
# Water productivity
wp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
# Calculations & modify svgs
if not template:
path = os.path.dirname(os.path.abspath(__file__))
svg_template_path_1 = os.path.join(path, 'svg', 'sheet_3_part1.svg')
svg_template_path_2 = os.path.join(path, 'svg', 'sheet_3_part2.svg')
else:
svg_template_path_1 = os.path.abspath(template[0])
svg_template_path_2 = os.path.abspath(template[1])
tree1 = ET.parse(svg_template_path_1)
tree2 = ET.parse(svg_template_path_2)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Titles
xml_txt_box = tree1.findall('''.//*[@id='basin']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree1.findall('''.//*[@id='period']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree1.findall('''.//*[@id='units']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 1: Agricultural water consumption (' + units[0] + ')'
xml_txt_box = tree2.findall('''.//*[@id='basin2']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree2.findall('''.//*[@id='period2']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree2.findall('''.//*[@id='units2']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 2: Land productivity (' + units[1] + ') and water productivity (' + units[2] + ')'
# Part 1
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c01']''')[0]
if not pd.isnull(crop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c02']''')[0]
if not pd.isnull(crop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c03']''')[0]
if not pd.isnull(crop_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c04']''')[0]
if not pd.isnull(crop_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c05']''')[0]
if not pd.isnull(crop_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c06']''')[0]
if not pd.isnull(crop_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c07']''')[0]
if not pd.isnull(crop_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c08']''')[0]
if not pd.isnull(crop_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c09']''')[0]
if not pd.isnull(crop_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c10']''')[0]
if not pd.isnull(crop_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c11']''')[0]
if not pd.isnull(crop_r01c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c12']''')[0]
if not pd.isnull(crop_r01c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01']''')[0]
if not pd.isnull(crop_r01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c01']''')[0]
if not pd.isnull(crop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c02']''')[0]
if not pd.isnull(crop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c03']''')[0]
if not pd.isnull(crop_r02c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c04']''')[0]
if not pd.isnull(crop_r02c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c05']''')[0]
if not pd.isnull(crop_r02c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c06']''')[0]
if not pd.isnull(crop_r02c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c07']''')[0]
if not pd.isnull(crop_r02c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c08']''')[0]
if not pd.isnull(crop_r02c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c09']''')[0]
if not pd.isnull(crop_r02c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c10']''')[0]
if not pd.isnull(crop_r02c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c11']''')[0]
if not pd.isnull(crop_r02c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c12']''')[0]
if not pd.isnull(crop_r02c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02']''')[0]
if not pd.isnull(crop_r02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c01']''')[0]
if not pd.isnull(crop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c02']''')[0]
if not pd.isnull(crop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c03']''')[0]
if not pd.isnull(crop_r03c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c04']''')[0]
if not pd.isnull(crop_r03c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c05']''')[0]
if not pd.isnull(crop_r03c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c06']''')[0]
if not pd.isnull(crop_r03c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c07']''')[0]
if not pd.isnull(crop_r03c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c08']''')[0]
if not pd.isnull(crop_r03c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c09']''')[0]
if not pd.isnull(crop_r03c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c10']''')[0]
if not pd.isnull(crop_r03c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c11']''')[0]
if not pd.isnull(crop_r03c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c12']''')[0]
if not pd.isnull(crop_r03c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03']''')[0]
if not pd.isnull(crop_r03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c01']''')[0]
if not pd.isnull(crop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c02']''')[0]
if not pd.isnull(crop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c03']''')[0]
if not pd.isnull(crop_r04c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c04']''')[0]
if not pd.isnull(crop_r04c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c05']''')[0]
if not pd.isnull(crop_r04c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c06']''')[0]
if not pd.isnull(crop_r04c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c07']''')[0]
if not pd.isnull(crop_r04c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c08']''')[0]
if not pd.isnull(crop_r04c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c09']''')[0]
if not pd.isnull(crop_r04c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c10']''')[0]
if not pd.isnull(crop_r04c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c11']''')[0]
if not pd.isnull(crop_r04c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c12']''')[0]
if not pd.isnull(crop_r04c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04']''')[0]
if not pd.isnull(crop_r04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c01']''')[0]
if not pd.isnull(noncrop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c02']''')[0]
if not pd.isnull(noncrop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01']''')[0]
if not pd.isnull(noncrop_r01) and noncrop_r01 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c01']''')[0]
if not pd.isnull(noncrop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c02']''')[0]
if not pd.isnull(noncrop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02']''')[0]
if not pd.isnull(noncrop_r02) and noncrop_r02 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c01']''')[0]
if not pd.isnull(noncrop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c02']''')[0]
if not pd.isnull(noncrop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03']''')[0]
if not pd.isnull(noncrop_r03) and noncrop_r03 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c01']''')[0]
if not pd.isnull(noncrop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c02']''')[0]
if not pd.isnull(noncrop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04']''')[0]
if not pd.isnull(noncrop_r04) and noncrop_r04 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
# Part 2
xml_txt_box = tree1.findall('''.//*[@id='ag_water_cons']''')[0]
if not pd.isnull(ag_water_cons):
xml_txt_box.getchildren()[0].text = '%.2f' % ag_water_cons
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c01']''')[0]
if not pd.isnull(lp_r01c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c02']''')[0]
if not pd.isnull(lp_r01c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c03']''')[0]
if not pd.isnull(lp_r01c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c04']''')[0]
if not pd.isnull(lp_r01c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c05']''')[0]
if not pd.isnull(lp_r01c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c06']''')[0]
if not pd.isnull(lp_r01c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c07']''')[0]
if not pd.isnull(lp_r01c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c08']''')[0]
if not pd.isnull(lp_r01c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c09']''')[0]
if not pd.isnull(lp_r01c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c10']''')[0]
if not pd.isnull(lp_r01c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c11']''')[0]
if not pd.isnull(lp_r01c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c12']''')[0]
if not pd.isnull(lp_r01c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c01']''')[0]
if not pd.isnull(lp_r02c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c02']''')[0]
if not pd.isnull(lp_r02c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c03']''')[0]
if not pd.isnull(lp_r02c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c04']''')[0]
if not pd.isnull(lp_r02c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c05']''')[0]
if not pd.isnull(lp_r02c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c06']''')[0]
if not pd.isnull(lp_r02c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c07']''')[0]
if not pd.isnull(lp_r02c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c08']''')[0]
if not pd.isnull(lp_r02c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c09']''')[0]
if not pd.isnull(lp_r02c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c10']''')[0]
if not pd.isnull(lp_r02c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c11']''')[0]
if not pd.isnull(lp_r02c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c12']''')[0]
if not pd.isnull(lp_r02c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c01']''')[0]
if not pd.isnull(lp_r03c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c02']''')[0]
if not pd.isnull(lp_r03c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c03']''')[0]
if not pd.isnull(lp_r03c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c04']''')[0]
if not pd.isnull(lp_r03c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c05']''')[0]
if not pd.isnull(lp_r03c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c06']''')[0]
if not pd.isnull(lp_r03c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c07']''')[0]
if not pd.isnull(lp_r03c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c08']''')[0]
if not pd.isnull(lp_r03c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c09']''')[0]
if not pd.isnull(lp_r03c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c10']''')[0]
if not pd.isnull(lp_r03c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c11']''')[0]
if not | pd.isnull(lp_r03c11) | pandas.isnull |
#!/usr/bin/env python3
"""
Pre-processing pipeline for Google speech_commands
"""
import os
import re
from pathlib import Path
from typing import Any, Dict
import luigi
import pandas as pd
import soundfile as sf
from tqdm import tqdm
import hearpreprocess.pipeline as pipeline
import hearpreprocess.util.luigi as luigi_util
from hearpreprocess.pipeline import (
TRAIN_PERCENTAGE,
TRAINVAL_PERCENTAGE,
VALIDATION_PERCENTAGE,
)
WORDS = ["down", "go", "left", "no", "off", "on", "right", "stop", "up", "yes"]
BACKGROUND_NOISE = "_background_noise_"
UNKNOWN = "_unknown_"
SILENCE = "_silence_"
generic_task_config = {
"task_name": "speech_commands",
"version": "v0.0.2",
"embedding_type": "scene",
"prediction_type": "multiclass",
"split_mode": "trainvaltest",
"sample_duration": 1.0,
"evaluation": ["top1_acc"],
"download_urls": [
{
"split": "train",
"url": "http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz", # noqa: E501
"md5": "6b74f3901214cb2c2934e98196829835",
},
{
"split": "test",
"url": "http://download.tensorflow.org/data/speech_commands_test_set_v0.02.tar.gz", # noqa: E501
"md5": "854c580ee90bff80c516491c84544e32",
},
],
"default_mode": "5h",
# Different modes for preprocessing this dataset
# We use all modes EXCEPT small, unless flag "--small" used.
"modes": {
"5h": {
# No more than 5 hours of audio (training + validation)
"max_task_duration_by_split": {
"train": 3600 * 5 * TRAIN_PERCENTAGE / TRAINVAL_PERCENTAGE,
"valid": 3600 * 5 * VALIDATION_PERCENTAGE / TRAINVAL_PERCENTAGE,
# The test set is 1.33 hours, so we use the entire thing
"test": None,
}
},
"full": {
"max_task_duration_by_split": {"test": None, "train": None, "valid": None}
},
"small": {
"download_urls": [
{
"split": "train",
"url": "https://github.com/neuralaudio/hear2021-open-tasks-downsampled/raw/main/speech_commands_v0.02-small.zip", # noqa: E501
"md5": "455123a88b8410d1f955c77ad331524f",
},
{
"split": "test",
"url": "https://github.com/neuralaudio/hear2021-open-tasks-downsampled/raw/main/speech_commands_test_set_v0.02-small.zip", # noqa: E501
"md5": "26d08374a7abd13ca2f4a4b8424f41d0",
},
],
"max_task_duration_by_split": {"train": None, "valid": None, "test": None},
},
},
}
class GenerateTrainDataset(luigi_util.WorkTask):
"""
Silence / background samples in the train (and, after split,
validation) sets need to be created by slicing up longer
background samples into 1 sec slices. This is the same method
used in the TensorFlow dataset generator.
https://github.com/tensorflow/datasets/blob/79d56e662a15cd11e1fb3b679e0f978c8041566f/tensorflow_datasets/audio/speech_commands.py#L142 # noqa
"""
# Requires an extracted dataset task to be completed
train_data = luigi.TaskParameter()
def requires(self):
return {"train": self.train_data}
@property
def output_path(self):
return self.workdir
def run(self):
train_path = Path(self.requires()["train"].workdir).joinpath("train")
background_audio = list(train_path.glob(f"{BACKGROUND_NOISE}/*.wav"))
assert len(background_audio) > 0
# Read all the background audio files and split into 1 second segments,
# save all the segments into a folder called _silence_
silence_dir = os.path.join(self.workdir, SILENCE)
os.makedirs(silence_dir, exist_ok=True)
print("Generating silence files from background sounds ...")
for audio_path in tqdm(background_audio):
audio, sr = sf.read(str(audio_path))
assert audio.ndim == 1
basename = os.path.basename(audio_path)
name, ext = os.path.splitext(basename)
for start in range(0, len(audio) - sr, sr // 2):
audio_segment = audio[start : start + sr]
filename = f"{name}-{start}{ext}"
filename = os.path.join(silence_dir, filename)
sf.write(filename, audio_segment, sr)
# We'll also create symlinks for the dataset here too to make the next
# stage of splitting into training and validation files easier.
for file_obj in train_path.iterdir():
if file_obj.is_dir() and file_obj.name != BACKGROUND_NOISE:
linked_folder = Path(os.path.join(self.workdir, file_obj.name))
assert not linked_folder.exists()
linked_folder.symlink_to(file_obj.absolute(), target_is_directory=True)
# Also need the testing and validation splits
if file_obj.name in ["testing_list.txt", "validation_list.txt"]:
linked_file = Path(os.path.join(self.workdir, file_obj.name))
assert not linked_file.exists()
linked_file.symlink_to(file_obj.absolute())
self.mark_complete()
class ExtractMetadata(pipeline.ExtractMetadata):
train = luigi.TaskParameter()
test = luigi.TaskParameter()
def requires(self):
return {
"train": self.train,
"test": self.test,
}
@staticmethod
def relpath_to_unique_filestem(relpath: str) -> str:
"""
Include the label (parent directory) in the filestem.
"""
# Get the parent directory (label) and the filename
name = "_".join(Path(relpath).parts[-2:])
# Remove the suffix
name = os.path.splitext(name)[0]
return str(name)
@staticmethod
def speaker_hash(unique_filestem: str) -> str:
"""Get the speaker hash as the Split key for speech_commands"""
hsh = re.sub(r"_nohash_.*$", "", unique_filestem)
return hsh
@staticmethod
def get_split_key(df: pd.DataFrame) -> pd.Series:
"""Get the speaker hash as the split key for speech_commands"""
return df["unique_filestem"].apply(ExtractMetadata.speaker_hash)
@staticmethod
def relpath_to_label(relpath: Path):
label = os.path.basename(os.path.dirname(relpath))
if label not in WORDS and label != SILENCE:
label = UNKNOWN
return label
def get_split_paths(self):
"""
Splits the dataset into train/valid/test files using the same method as
described in by the TensorFlow dataset:
https://www.tensorflow.org/datasets/catalog/speech_commands
"""
# Test files
test_path = Path(self.requires()["test"].workdir).joinpath("test")
test_df = pd.DataFrame(test_path.glob("*/*.wav"), columns=["relpath"]).assign(
split=lambda df: "test"
)
# All silence paths to add to the train and validation
train_path = Path(self.requires()["train"].workdir)
all_silence = list(train_path.glob(f"{SILENCE}/*.wav"))
# Validation files
with open(os.path.join(train_path, "validation_list.txt"), "r") as fp:
validation_paths = fp.read().strip().splitlines()
validation_rel_paths = [os.path.join(train_path, p) for p in validation_paths]
# There are no silence files marked explicitly for validation. We add all
# the running_tap.wav samples to the silence class for validation.
# https://github.com/tensorflow/datasets/blob/e24fe9e6b03053d9b925d299a2246ea167dc85cd/tensorflow_datasets/audio/speech_commands.py#L183
val_silence = list(train_path.glob(f"{SILENCE}/running_tap*.wav"))
validation_rel_paths.extend(val_silence)
validation_df = | pd.DataFrame(validation_rel_paths, columns=["relpath"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn import preprocessing
import scipy.io as scio
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
import lightgbm as lgb
from Coreg import Coreg
from DBN import Deep_Believe_Network
from sklearn.metrics import mean_absolute_error,mean_squared_error
from noisy_regressor import lstm_network,lstm_l2_network,Nosiy_NN_regressor,Semisupervised_Nosiy_NN_regressor
from ASCR import ASCR
import warnings
warnings.filterwarnings("ignore")
def make_seqdata(data,step=10):
num = data.shape[0]
data_list = []
for i in range(num):
seq_data = np.zeros((1,step,data.shape[1]))
for k in range(step):
if i-step+k+1 <= 0:
seq_data[0,k,:] = data[0,:]
else:
seq_data[0,k,:] = data[i-step+k+1,:]
data_list.append(seq_data)
return np.concatenate(data_list,axis=0)
data1 = scio.loadmat('/Volumes/文档/数据集/TE_process/TE_mat_data/d00.mat')['data'].T
data2 = scio.loadmat('/Volumes/文档/数据集/TE_process/TE_mat_data/d00_te.mat')['data'].T
dataall = np.row_stack([data1,data2])
label = dataall[:,35]
data = np.delete(dataall,list(range(34,53)),axis=1)
np.random.seed(2019)
train_index = np.random.choice(1460,100,replace = False)
test_index = np.random.choice(list(set(list(np.arange(1460)))-set(train_index)),960,replace = False)
u_index = list(set(list(np.arange(1460)))-set(train_index)-set(test_index))
mi = np.min(label)
di = (np.max(label)-np.min(label))
label = (label-min(label))/(max(label)-min(label))
data = preprocessing.MinMaxScaler().fit_transform(data)
traindata = data[train_index,:]
trainlabel = np.mat(label[train_index]).T
testdata = data[test_index,:]
testlabel = np.mat(label[test_index]).T
testlabel = testlabel*di+mi
udata = data[u_index,:]
result = | pd.DataFrame(testlabel) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pandas as pd
import sqlite3
import xlrd as xl
# if this .sqlite db doesn't already exists, this will create it
# if the .sqlite db *does* already exist, this establishes the desired connection
con = sqlite3.connect("sql_sample_db.sqlite")
# create pandas dataframes from each .csv file:
sales_table = pd.read_csv('https://github.com/NYUDataBootcamp/SQLBootcamp/blob/master/data/sales_table.csv')
car_table = pd.read_csv('https://github.com/NYUDataBootcamp/SQLBootcamp/blob/master/data/car_table.csv')
salesman_table = pd.read_csv('https://github.com/NYUDataBootcamp/SQLBootcamp/blob/master/data/salesman_table.csv')
cust_table = pd.read_csv('https://github.com/NYUDataBootcamp/SQLBootcamp/blob/master/data/cust_table.csv')
dog_table = pd.read_csv('https://github.com/NYUDataBootcamp/SQLBootcamp/blob/master/data/dog_table.csv')
cat_table = pd.read_csv('https://github.com/NYUDataBootcamp/SQLBootcamp/blob/master/data/cat_table.csv')
#%%
# make a list of the tables (dataframes) and table names:
tables = [sales_table, car_table, salesman_table, cust_table, dog_table, cat_table]
table_names = ['sales_table', 'car_table', 'salesman_table', 'cust_table', 'dog_table', 'cat_table']
# drop each table name if it already exists to avoid error if you rerun this bit of code
# then add it back (or add it for the first time, if the table didn't already exist)
for i in range(len(tables)):
table_name = table_names[i]
table = tables[i]
con.execute("DROP TABLE IF EXISTS {}".format(table_name))
pd.io.sql.to_sql(table, "{}".format(table_name), con, index=False)
# Function to make it easy to run queries on this mini-database
def run(query):
results = pd.read_sql("{}".format(query), con).fillna(' ')
return results
# create some dataframes to act as keys to clarify differences between difference rdbms
rdbms_differences = pd.DataFrame()
# show describe options
describe_index = ['Reading a table']
describe_differences = pd.DataFrame({'SQLite' : pd.Series(['PRAGMA TABLE_INFO(table_name)'], index=describe_index),
'MySQL' : pd.Series(['DESCRIBE table_name'], index=describe_index),
'Microsoft SQL Server' : pd.Series(['SP_HELP table_name'], index=describe_index),
'Oracle' : | pd.Series(['DESCRIBE table_table'], index=describe_index) | pandas.Series |
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.utils import check_required_parameters
from datetime import datetime, timedelta
from brightics.common.exception import BrighticsFunctionException as BFE
from brightics.function.extraction.shift_datetime import format_time
def extend_datetime(table, **params):
params = get_default_from_parameters_if_required(params, _extend_datetime)
check_required_parameters(_extend_datetime, params, ['table'])
return _extend_datetime(table, **params)
def _extend_datetime(table, input_col, impute_unit):
arr_order = []
datetime_list = []
for ind, t_str in enumerate(table[input_col]):
try:
if impute_unit == 'year':
arr_order.append(
datetime(year=int(t_str[0:4]), month=1, day=1))
elif impute_unit == 'month':
arr_order.append(datetime(year=int(t_str[0:4]), month=int(
t_str[4:6]), day=1))
elif impute_unit == 'day':
arr_order.append(datetime(year=int(t_str[0:4]), month=int(
t_str[4:6]), day=int(t_str[6:8])))
elif impute_unit == 'hour':
arr_order.append(datetime(year=int(t_str[0:4]), month=int(
t_str[4:6]), day=int(t_str[6:8]), hour=int(t_str[8:10])))
elif impute_unit == 'minute':
arr_order.append(datetime(year=int(t_str[0:4]), month=int(
t_str[4:6]), day=int(t_str[6:8]), hour=int(t_str[8:10]),
minute=int(t_str[10:12])))
datetime_list.append(datetime(year=int(t_str[0:4]), month=int(
t_str[4:6]), day=int(t_str[6:8]), hour=int(t_str[8:10]),
minute=int(t_str[10:12]), second=int(t_str[12:14])))
except:
raise BFE.from_errors(
[{'0100': 'Invalid Datetime format at column {}, index {}.'.format(input_col, ind + 1)}])
# check for ascending order
# If not -> log message error.
tmp = check_ascending(arr_order)
if not tmp[0]:
log_message = 'Date time coulumn should be in strictly ascending order with the unit {}. '.format(
impute_unit)
log_message += 'The following is the first five invalid data: {}'.format(
table[input_col][tmp[1]:tmp[1] + 5].tolist())
raise BFE.from_errors([{'0100': log_message}])
out_table = insert_datetime(
table.copy(), input_col, arr_order, datetime_list, impute_unit)
return {'out_table': out_table}
def insert_datetime(table, input_col, arr_order, datetime_list, impute_unit):
new_col = 'datetime_estimation_info'
origin_cols = table.columns.tolist()
input_col_index = origin_cols.index(input_col)
if impute_unit == 'year':
time_leap = pd.DateOffset(years=1)
elif impute_unit == 'month':
time_leap = | pd.DateOffset(months=1) | pandas.DateOffset |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import os
import pandas
import numpy as np
import pyarrow
import pytest
import re
from modin.config import IsExperimental, Engine, StorageFormat
from modin.pandas.test.utils import io_ops_bad_exc
from .utils import eval_io, ForceOmnisciImport, set_execution_mode, run_and_compare
from pandas.core.dtypes.common import is_list_like
IsExperimental.put(True)
Engine.put("native")
StorageFormat.put("omnisci")
import modin.pandas as pd
from modin.pandas.test.utils import (
df_equals,
bool_arg_values,
to_pandas,
test_data_values,
test_data_keys,
generate_multiindex,
eval_general,
df_equals_with_non_stable_indices,
)
from modin.utils import try_cast_to_pandas
from modin.experimental.core.execution.native.implementations.omnisci_on_native.partitioning.partition_manager import (
OmnisciOnNativeDataframePartitionManager,
)
from modin.experimental.core.execution.native.implementations.omnisci_on_native.df_algebra import (
FrameNode,
)
@pytest.mark.usefixtures("TestReadCSVFixture")
class TestCSV:
from modin import __file__ as modin_root
root = os.path.dirname(
os.path.dirname(os.path.abspath(modin_root)) + ".."
) # root of modin repo
boston_housing_names = [
"index",
"CRIM",
"ZN",
"INDUS",
"CHAS",
"NOX",
"RM",
"AGE",
"DIS",
"RAD",
"TAX",
"PTRATIO",
"B",
"LSTAT",
"PRICE",
]
boston_housing_dtypes = {
"index": "int64",
"CRIM": "float64",
"ZN": "float64",
"INDUS": "float64",
"CHAS": "float64",
"NOX": "float64",
"RM": "float64",
"AGE": "float64",
"DIS": "float64",
"RAD": "float64",
"TAX": "float64",
"PTRATIO": "float64",
"B": "float64",
"LSTAT": "float64",
"PRICE": "float64",
}
def test_usecols_csv(self):
"""check with the following arguments: names, dtype, skiprows, delimiter"""
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
for kwargs in (
{"delimiter": ","},
{"sep": None},
{"skiprows": 1, "names": ["A", "B", "C", "D", "E"]},
{"dtype": {"a": "int32", "e": "string"}},
{"dtype": {"a": np.dtype("int32"), "b": np.dtype("int64"), "e": "string"}},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
def test_housing_csv(self):
csv_file = os.path.join(self.root, "examples/data/boston_housing.csv")
for kwargs in (
{
"skiprows": 1,
"names": self.boston_housing_names,
"dtype": self.boston_housing_dtypes,
},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
def test_time_parsing(self):
csv_file = os.path.join(
self.root, "modin/pandas/test/data", "test_time_parsing.csv"
)
for kwargs in (
{
"skiprows": 1,
"names": [
"timestamp",
"symbol",
"high",
"low",
"open",
"close",
"spread",
"volume",
],
"parse_dates": ["timestamp"],
"dtype": {"symbol": "string"},
},
):
rp = pandas.read_csv(csv_file, **kwargs)
rm = pd.read_csv(csv_file, engine="arrow", **kwargs)
with ForceOmnisciImport(rm):
rm = to_pandas(rm)
df_equals(rm["timestamp"].dt.year, rp["timestamp"].dt.year)
df_equals(rm["timestamp"].dt.month, rp["timestamp"].dt.month)
df_equals(rm["timestamp"].dt.day, rp["timestamp"].dt.day)
def test_csv_fillna(self):
csv_file = os.path.join(self.root, "examples/data/boston_housing.csv")
for kwargs in (
{
"skiprows": 1,
"names": self.boston_housing_names,
"dtype": self.boston_housing_dtypes,
},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
comparator=lambda df1, df2: df_equals(
df1["CRIM"].fillna(1000), df2["CRIM"].fillna(1000)
),
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
@pytest.mark.parametrize("null_dtype", ["category", "float64"])
def test_null_col(self, null_dtype):
csv_file = os.path.join(
self.root, "modin/pandas/test/data", "test_null_col.csv"
)
ref = pandas.read_csv(
csv_file,
names=["a", "b", "c"],
dtype={"a": "int64", "b": "int64", "c": null_dtype},
skiprows=1,
)
ref["a"] = ref["a"] + ref["b"]
exp = pd.read_csv(
csv_file,
names=["a", "b", "c"],
dtype={"a": "int64", "b": "int64", "c": null_dtype},
skiprows=1,
)
exp["a"] = exp["a"] + exp["b"]
# df_equals cannot compare empty categories
if null_dtype == "category":
ref["c"] = ref["c"].astype("string")
with ForceOmnisciImport(exp):
exp = to_pandas(exp)
exp["c"] = exp["c"].astype("string")
df_equals(ref, exp)
def test_read_and_concat(self):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
ref1 = pandas.read_csv(csv_file)
ref2 = pandas.read_csv(csv_file)
ref = pandas.concat([ref1, ref2])
exp1 = pandas.read_csv(csv_file)
exp2 = pandas.read_csv(csv_file)
exp = pd.concat([exp1, exp2])
with ForceOmnisciImport(exp):
df_equals(ref, exp)
@pytest.mark.parametrize("names", [None, ["a", "b", "c", "d", "e"]])
@pytest.mark.parametrize("header", [None, 0])
def test_from_csv(self, header, names):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=csv_file,
header=header,
names=names,
)
@pytest.mark.parametrize("kwargs", [{"sep": "|"}, {"delimiter": "|"}])
def test_sep_delimiter(self, kwargs):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_delim.csv")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
@pytest.mark.skip(reason="https://github.com/modin-project/modin/issues/2174")
def test_float32(self):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
kwargs = {
"dtype": {"a": "float32", "b": "float32"},
}
pandas_df = pandas.read_csv(csv_file, **kwargs)
pandas_df["a"] = pandas_df["a"] + pandas_df["b"]
modin_df = pd.read_csv(csv_file, **kwargs, engine="arrow")
modin_df["a"] = modin_df["a"] + modin_df["b"]
with ForceOmnisciImport(modin_df):
df_equals(modin_df, pandas_df)
# Datetime Handling tests
@pytest.mark.parametrize("engine", [None, "arrow"])
@pytest.mark.parametrize(
"parse_dates",
[
True,
False,
["col2"],
["c2"],
[["col2", "col3"]],
{"col23": ["col2", "col3"]},
],
)
@pytest.mark.parametrize("names", [None, [f"c{x}" for x in range(1, 7)]])
def test_read_csv_datetime(
self,
engine,
parse_dates,
names,
):
parse_dates_unsupported = isinstance(parse_dates, dict) or (
isinstance(parse_dates, list) and isinstance(parse_dates[0], list)
)
if parse_dates_unsupported and engine == "arrow" and not names:
pytest.skip(
"In these cases Modin raises `ArrowEngineException` while pandas "
"doesn't raise any exceptions that causes tests fails"
)
# In these cases Modin raises `ArrowEngineException` while pandas
# raises `ValueError`, so skipping exception type checking
skip_exc_type_check = parse_dates_unsupported and engine == "arrow"
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": engine},
check_exception_type=not skip_exc_type_check,
raising_exceptions=None if skip_exc_type_check else io_ops_bad_exc,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
parse_dates=parse_dates,
names=names,
)
@pytest.mark.parametrize("engine", [None, "arrow"])
@pytest.mark.parametrize(
"usecols",
[
None,
["col1"],
["col1", "col1"],
["col1", "col2", "col6"],
["col6", "col2", "col1"],
[0],
[0, 0],
[0, 1, 5],
[5, 1, 0],
lambda x: x in ["col1", "col2"],
],
)
def test_read_csv_col_handling(
self,
engine,
usecols,
):
eval_io(
fn_name="read_csv",
check_kwargs_callable=not callable(usecols),
md_extra_kwargs={"engine": engine},
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
usecols=usecols,
)
class TestMasks:
data = {
"a": [1, 1, 2, 2, 3],
"b": [None, None, 2, 1, 3],
"c": [3, None, None, 2, 1],
}
cols_values = ["a", ["a", "b"], ["a", "b", "c"]]
@pytest.mark.parametrize("cols", cols_values)
def test_projection(self, cols):
def projection(df, cols, **kwargs):
return df[cols]
run_and_compare(projection, data=self.data, cols=cols)
def test_drop(self):
def drop(df, **kwargs):
return df.drop(columns="a")
run_and_compare(drop, data=self.data)
def test_iloc(self):
def mask(df, **kwargs):
return df.iloc[[0, 1]]
run_and_compare(mask, data=self.data, allow_subqueries=True)
def test_empty(self):
def empty(df, **kwargs):
return df
run_and_compare(empty, data=None)
def test_filter(self):
def filter(df, **kwargs):
return df[df["a"] == 1]
run_and_compare(filter, data=self.data)
def test_filter_with_index(self):
def filter(df, **kwargs):
df = df.groupby("a").sum()
return df[df["b"] > 1]
run_and_compare(filter, data=self.data)
def test_filter_proj(self):
def filter(df, **kwargs):
df1 = df + 2
return df1[(df["a"] + df1["b"]) > 1]
run_and_compare(filter, data=self.data)
def test_filter_drop(self):
def filter(df, **kwargs):
df = df[["a", "b"]]
df = df[df["a"] != 1]
df["a"] = df["a"] * df["b"]
return df
run_and_compare(filter, data=self.data)
class TestMultiIndex:
data = {"a": np.arange(24), "b": np.arange(24)}
@pytest.mark.parametrize("names", [None, ["", ""], ["name", "name"]])
def test_dup_names(self, names):
index = pandas.MultiIndex.from_tuples(
[(i, j) for i in range(3) for j in range(8)], names=names
)
pandas_df = pandas.DataFrame(self.data, index=index) + 1
modin_df = pd.DataFrame(self.data, index=index) + 1
df_equals(pandas_df, modin_df)
@pytest.mark.parametrize(
"names",
[
None,
[None, "s", None],
["i1", "i2", "i3"],
["i1", "i1", "i3"],
["i1", "i2", "a"],
],
)
def test_reset_index(self, names):
index = pandas.MultiIndex.from_tuples(
[(i, j, k) for i in range(2) for j in range(3) for k in range(4)],
names=names,
)
def applier(lib):
df = lib.DataFrame(self.data, index=index) + 1
return df.reset_index()
eval_general(pd, pandas, applier)
@pytest.mark.parametrize("is_multiindex", [True, False])
@pytest.mark.parametrize(
"column_names", [None, ["level1", None], ["level1", "level2"]]
)
def test_reset_index_multicolumns(self, is_multiindex, column_names):
index = (
pandas.MultiIndex.from_tuples(
[(i, j, k) for i in range(2) for j in range(3) for k in range(4)],
names=["l1", "l2", "l3"],
)
if is_multiindex
else pandas.Index(np.arange(len(self.data["a"])), name="index")
)
columns = pandas.MultiIndex.from_tuples(
[("a", "b"), ("b", "c")], names=column_names
)
data = np.array(list(self.data.values())).T
def applier(df, **kwargs):
df = df + 1
return df.reset_index(drop=False)
run_and_compare(
fn=applier,
data=data,
constructor_kwargs={"index": index, "columns": columns},
)
def test_set_index_name(self):
index = pandas.Index.__new__(pandas.Index, data=[i for i in range(24)])
pandas_df = pandas.DataFrame(self.data, index=index)
pandas_df.index.name = "new_name"
modin_df = pd.DataFrame(self.data, index=index)
modin_df._query_compiler.set_index_name("new_name")
df_equals(pandas_df, modin_df)
def test_set_index_names(self):
index = pandas.MultiIndex.from_tuples(
[(i, j, k) for i in range(2) for j in range(3) for k in range(4)]
)
pandas_df = pandas.DataFrame(self.data, index=index)
pandas_df.index.names = ["new_name1", "new_name2", "new_name3"]
modin_df = pd.DataFrame(self.data, index=index)
modin_df._query_compiler.set_index_names(
["new_name1", "new_name2", "new_name3"]
)
df_equals(pandas_df, modin_df)
class TestFillna:
data = {"a": [1, 1, None], "b": [None, None, 2], "c": [3, None, None]}
values = [1, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}]
@pytest.mark.parametrize("value", values)
def test_fillna_all(self, value):
def fillna(df, value, **kwargs):
return df.fillna(value)
run_and_compare(fillna, data=self.data, value=value)
def test_fillna_bool(self):
def fillna(df, **kwargs):
df["a"] = df["a"] == 1
df["a"] = df["a"].fillna(False)
return df
run_and_compare(fillna, data=self.data)
class TestConcat:
data = {
"a": [1, 2, 3],
"b": [10, 20, 30],
"d": [1000, 2000, 3000],
"e": [11, 22, 33],
}
data2 = {
"a": [4, 5, 6],
"c": [400, 500, 600],
"b": [40, 50, 60],
"f": [444, 555, 666],
}
data3 = {
"f": [2, 3, 4],
"g": [400, 500, 600],
"h": [20, 30, 40],
}
@pytest.mark.parametrize("join", ["inner", "outer"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat(self, join, sort, ignore_index):
def concat(lib, df1, df2, join, sort, ignore_index):
return lib.concat(
[df1, df2], join=join, sort=sort, ignore_index=ignore_index
)
run_and_compare(
concat,
data=self.data,
data2=self.data2,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_concat_with_same_df(self):
def concat(df, **kwargs):
df["f"] = df["a"]
return df
run_and_compare(concat, data=self.data)
def test_setitem_lazy(self):
def applier(df, **kwargs):
df = df + 1
df["a"] = df["a"] + 1
df["e"] = df["a"] + 1
df["new_int8"] = np.int8(10)
df["new_int16"] = np.int16(10)
df["new_int32"] = np.int32(10)
df["new_int64"] = np.int64(10)
df["new_int"] = 10
df["new_float"] = 5.5
df["new_float64"] = np.float64(10.1)
return df
run_and_compare(applier, data=self.data)
def test_setitem_default(self):
def applier(df, lib, **kwargs):
df = df + 1
df["a"] = np.arange(3)
df["b"] = lib.Series(np.arange(3))
return df
run_and_compare(applier, data=self.data, force_lazy=False)
def test_insert_lazy(self):
def applier(df, **kwargs):
df = df + 1
df.insert(2, "new_int", 10)
df.insert(1, "new_float", 5.5)
df.insert(0, "new_a", df["a"] + 1)
return df
run_and_compare(applier, data=self.data)
def test_insert_default(self):
def applier(df, lib, **kwargs):
df = df + 1
df.insert(1, "new_range", np.arange(3))
df.insert(1, "new_series", lib.Series(np.arange(3)))
return df
run_and_compare(applier, data=self.data, force_lazy=False)
def test_concat_many(self):
def concat(df1, df2, lib, **kwargs):
df3 = df1.copy()
df4 = df2.copy()
return lib.concat([df1, df2, df3, df4])
def sort_comparator(df1, df2):
"""Sort and verify equality of the passed frames."""
# We sort values because order of rows in the 'union all' result is inconsistent in OmniSci
df1, df2 = (
try_cast_to_pandas(df).sort_values(df.columns[0]) for df in (df1, df2)
)
return df_equals(df1, df2)
run_and_compare(
concat, data=self.data, data2=self.data2, comparator=sort_comparator
)
def test_concat_agg(self):
def concat(lib, df1, df2):
df1 = df1.groupby("a", as_index=False).agg(
{"b": "sum", "d": "sum", "e": "sum"}
)
df2 = df2.groupby("a", as_index=False).agg(
{"c": "sum", "b": "sum", "f": "sum"}
)
return lib.concat([df1, df2])
run_and_compare(concat, data=self.data, data2=self.data2, allow_subqueries=True)
@pytest.mark.parametrize("join", ["inner", "outer"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat_single(self, join, sort, ignore_index):
def concat(lib, df, join, sort, ignore_index):
return lib.concat([df], join=join, sort=sort, ignore_index=ignore_index)
run_and_compare(
concat,
data=self.data,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_groupby_concat_single(self):
def concat(lib, df):
df = lib.concat([df])
return df.groupby("a").agg({"b": "min"})
run_and_compare(
concat,
data=self.data,
)
@pytest.mark.parametrize("join", ["inner"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat_join(self, join, sort, ignore_index):
def concat(lib, df1, df2, join, sort, ignore_index, **kwargs):
return lib.concat(
[df1, df2], axis=1, join=join, sort=sort, ignore_index=ignore_index
)
run_and_compare(
concat,
data=self.data,
data2=self.data3,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_concat_index_name(self):
df1 = pandas.DataFrame(self.data)
df1 = df1.set_index("a")
df2 = pandas.DataFrame(self.data3)
df2 = df2.set_index("f")
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
df2.index.name = "a"
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
def test_concat_index_names(self):
df1 = pandas.DataFrame(self.data)
df1 = df1.set_index(["a", "b"])
df2 = | pandas.DataFrame(self.data3) | pandas.DataFrame |
import warnings
import numpy
from sklearn.utils import check_array
try:
from scipy.io import arff
HAS_ARFF = True
except:
HAS_ARFF = False
from .utils import check_dataset, ts_size, to_time_series_dataset
def to_sklearn_dataset(dataset, dtype=numpy.float, return_dim=False):
"""Transforms a time series dataset so that it fits the format used in
``sklearn`` estimators.
Parameters
----------
dataset : array-like
The dataset of time series to be transformed.
dtype : data type (default: numpy.float)
Data type for the returned dataset.
return_dim : boolean (optional, default: False)
Whether the dimensionality (third dimension should be returned together
with the transformed dataset).
Returns
-------
numpy.ndarray of shape (n_ts, sz * d)
The transformed dataset of time series.
int (optional, if return_dim=True)
The dimensionality of the original tslearn dataset (third dimension)
Examples
--------
>>> to_sklearn_dataset([[1, 2]], return_dim=True)
(array([[1., 2.]]), 1)
>>> to_sklearn_dataset([[1, 2], [1, 4, 3]])
array([[ 1., 2., nan],
[ 1., 4., 3.]])
See Also
--------
to_time_series_dataset : Transforms a time series dataset to ``tslearn``
format.
"""
tslearn_dataset = to_time_series_dataset(dataset, dtype=dtype)
n_ts = tslearn_dataset.shape[0]
d = tslearn_dataset.shape[2]
if return_dim:
return tslearn_dataset.reshape((n_ts, -1)), d
else:
return tslearn_dataset.reshape((n_ts, -1))
def to_pyts_dataset(X):
"""Transform a tslearn-compatible dataset into a pyts dataset.
Parameters
----------
X: array, shape = (n_ts, sz, d)
tslearn-formatted dataset to be cast to pyts format
Returns
-------
array, shape=(n_ts, sz) if d=1, (n_ts, d, sz) otherwise
pyts-formatted dataset
Examples
--------
>>> tslearn_arr = numpy.random.randn(10, 16, 1)
>>> pyts_arr = to_pyts_dataset(tslearn_arr)
>>> pyts_arr.shape
(10, 16)
>>> tslearn_arr = numpy.random.randn(10, 16, 2)
>>> pyts_arr = to_pyts_dataset(tslearn_arr)
>>> pyts_arr.shape
(10, 2, 16)
>>> tslearn_arr = [numpy.random.randn(16, 1), numpy.random.randn(10, 1)]
>>> to_pyts_dataset(tslearn_arr) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: All the time series in the array should be of equal lengths
"""
X_ = check_dataset(X, force_equal_length=True)
if X_.shape[2] == 1:
return X_.reshape((X_.shape[0], -1))
else:
return X_.transpose((0, 2, 1))
def from_pyts_dataset(X):
"""Transform a pyts-compatible dataset into a tslearn dataset.
Parameters
----------
X: array, shape = (n_ts, sz) or (n_ts, d, sz)
pyts-formatted dataset
Returns
-------
array, shape=(n_ts, sz, d)
tslearn-formatted dataset
Examples
--------
>>> pyts_arr = numpy.random.randn(10, 16)
>>> tslearn_arr = from_pyts_dataset(pyts_arr)
>>> tslearn_arr.shape
(10, 16, 1)
>>> pyts_arr = numpy.random.randn(10, 2, 16)
>>> tslearn_arr = from_pyts_dataset(pyts_arr)
>>> tslearn_arr.shape
(10, 16, 2)
>>> pyts_arr = numpy.random.randn(10)
>>> from_pyts_dataset(pyts_arr) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: X is not a valid input pyts array.
"""
X_ = check_array(X, ensure_2d=False, allow_nd=True)
if X_.ndim == 2:
shape = list(X_.shape) + [1]
return X_.reshape(shape)
elif X_.ndim == 3:
return X_.transpose((0, 2, 1))
else:
raise ValueError("X is not a valid input pyts array. "
"Its dimensions, once cast to numpy.ndarray "
"are {}".format(X_.shape))
def to_seglearn_dataset(X):
"""Transform a tslearn-compatible dataset into a seglearn dataset.
Parameters
----------
X: array, shape = (n_ts, sz, d)
tslearn-formatted dataset to be cast to seglearn format
Returns
-------
array of arrays, shape=(n_ts, )
seglearn-formatted dataset. i-th sub-array in the list has shape
(sz_i, d)
Examples
--------
>>> tslearn_arr = numpy.random.randn(10, 16, 1)
>>> seglearn_arr = to_seglearn_dataset(tslearn_arr)
>>> seglearn_arr.shape
(10, 16, 1)
>>> tslearn_arr = numpy.random.randn(10, 16, 2)
>>> seglearn_arr = to_seglearn_dataset(tslearn_arr)
>>> seglearn_arr.shape
(10, 16, 2)
>>> tslearn_arr = [numpy.random.randn(16, 2), numpy.random.randn(10, 2)]
>>> seglearn_arr = to_seglearn_dataset(tslearn_arr)
>>> seglearn_arr.shape
(2,)
>>> seglearn_arr[0].shape
(16, 2)
>>> seglearn_arr[1].shape
(10, 2)
"""
X_ = check_dataset(X)
return numpy.array([Xi[:ts_size(Xi)] for Xi in X_])
def from_seglearn_dataset(X):
"""Transform a seglearn-compatible dataset into a tslearn dataset.
Parameters
----------
X: list of arrays, or array of arrays, shape = (n_ts, )
seglearn-formatted dataset. i-th sub-array in the list has shape
(sz_i, d)
Returns
-------
array, shape=(n_ts, sz, d), where sz is the maximum of all array lengths
tslearn-formatted dataset
Examples
--------
>>> seglearn_arr = [numpy.random.randn(10, 1), numpy.random.randn(10, 1)]
>>> tslearn_arr = from_seglearn_dataset(seglearn_arr)
>>> tslearn_arr.shape
(2, 10, 1)
>>> seglearn_arr = [numpy.random.randn(10, 1), numpy.random.randn(5, 1)]
>>> tslearn_arr = from_seglearn_dataset(seglearn_arr)
>>> tslearn_arr.shape
(2, 10, 1)
>>> seglearn_arr = numpy.random.randn(2, 10, 1)
>>> tslearn_arr = from_seglearn_dataset(seglearn_arr)
>>> tslearn_arr.shape
(2, 10, 1)
"""
return to_time_series_dataset(X)
def to_stumpy_dataset(X):
"""Transform a tslearn-compatible dataset into a stumpy dataset.
Parameters
----------
X: array, shape = (n_ts, sz, d)
tslearn-formatted dataset to be cast to stumpy format
Returns
-------
list of arrays of shape=(d, sz_i) if d > 1 or (sz_i, ) otherwise
stumpy-formatted dataset.
Examples
--------
>>> tslearn_arr = numpy.random.randn(10, 16, 1)
>>> stumpy_arr = to_stumpy_dataset(tslearn_arr)
>>> len(stumpy_arr)
10
>>> stumpy_arr[0].shape
(16,)
>>> tslearn_arr = numpy.random.randn(10, 16, 2)
>>> stumpy_arr = to_stumpy_dataset(tslearn_arr)
>>> len(stumpy_arr)
10
>>> stumpy_arr[0].shape
(2, 16)
"""
X_ = check_dataset(X)
def transpose_or_flatten(ts):
if ts.shape[1] == 1:
return ts.reshape((-1, ))
else:
return ts.transpose()
return [transpose_or_flatten(Xi[:ts_size(Xi)]) for Xi in X_]
def from_stumpy_dataset(X):
"""Transform a stumpy-compatible dataset into a tslearn dataset.
Parameters
----------
X: list of arrays of shapes (d, sz_i) if d > 1 or (sz_i, ) otherwise
stumpy-formatted dataset.
Returns
-------
array, shape=(n_ts, sz, d), where sz is the maximum of all array lengths
tslearn-formatted dataset
Examples
--------
>>> stumpy_arr = [numpy.random.randn(10), numpy.random.randn(10)]
>>> tslearn_arr = from_stumpy_dataset(stumpy_arr)
>>> tslearn_arr.shape
(2, 10, 1)
>>> stumpy_arr = [numpy.random.randn(3, 10), numpy.random.randn(3, 5)]
>>> tslearn_arr = from_stumpy_dataset(stumpy_arr)
>>> tslearn_arr.shape
(2, 10, 3)
"""
def transpose_or_expand(ts):
if ts.ndim == 1:
return ts.reshape((-1, 1))
else:
return ts.transpose()
return to_time_series_dataset([transpose_or_expand(Xi) for Xi in X])
def to_sktime_dataset(X):
"""Transform a tslearn-compatible dataset into a sktime dataset.
Parameters
----------
X: array, shape = (n_ts, sz, d)
tslearn-formatted dataset to be cast to sktime format
Returns
-------
Pandas data-frame
sktime-formatted dataset (cf.
`link <https://alan-turing-institute.github.io/sktime/examples/loading_data.html>`_)
Examples
--------
>>> tslearn_arr = numpy.random.randn(10, 16, 1)
>>> sktime_arr = to_sktime_dataset(tslearn_arr)
>>> sktime_arr.shape
(10, 1)
>>> sktime_arr["dim_0"][0].shape
(16,)
>>> tslearn_arr = numpy.random.randn(10, 16, 2)
>>> sktime_arr = to_sktime_dataset(tslearn_arr)
>>> sktime_arr.shape
(10, 2)
>>> sktime_arr["dim_1"][0].shape
(16,)
Notes
-----
Conversion from/to sktime format requires pandas to be installed.
""" # noqa: E501
try:
import pandas as pd
except ImportError:
raise ImportError("Conversion from/to sktime cannot be performed "
"if pandas is not installed.")
X_ = check_dataset(X)
X_pd = pd.DataFrame(dtype=numpy.float32)
for dim in range(X_.shape[2]):
X_pd['dim_' + str(dim)] = [pd.Series(data=Xi[:ts_size(Xi), dim])
for Xi in X_]
return X_pd
def from_sktime_dataset(X):
"""Transform a sktime-compatible dataset into a tslearn dataset.
Parameters
----------
X: pandas data-frame
sktime-formatted dataset (cf.
`link <https://alan-turing-institute.github.io/sktime/examples/loading_data.html>`_)
Returns
-------
array, shape=(n_ts, sz, d)
tslearn-formatted dataset
Examples
--------
>>> import pandas as pd
>>> sktime_df = pd.DataFrame()
>>> sktime_df["dim_0"] = [pd.Series([1, 2, 3]), pd.Series([4, 5, 6])]
>>> tslearn_arr = from_sktime_dataset(sktime_df)
>>> tslearn_arr.shape
(2, 3, 1)
>>> sktime_df = pd.DataFrame()
>>> sktime_df["dim_0"] = [pd.Series([1, 2, 3]),
... pd.Series([4, 5, 6, 7])]
>>> sktime_df["dim_1"] = [pd.Series([8, 9, 10]),
... pd.Series([11, 12, 13, 14])]
>>> tslearn_arr = from_sktime_dataset(sktime_df)
>>> tslearn_arr.shape
(2, 4, 2)
>>> sktime_arr = numpy.random.randn(10, 1, 16)
>>> from_sktime_dataset(
... sktime_arr
... ) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: X is not a valid input sktime array.
Notes
-----
Conversion from/to sktime format requires pandas to be installed.
""" # noqa: E501
try:
import pandas as pd
except ImportError:
raise ImportError("Conversion from/to sktime cannot be performed "
"if pandas is not installed.")
if not isinstance(X, pd.DataFrame):
raise ValueError("X is not a valid input sktime array. "
"A pandas DataFrame is expected.")
data_dimensions = [col_name
for col_name in X.columns
if col_name.startswith("dim_")]
d = len(data_dimensions)
ordered_data_dimensions = ["dim_%d" % di for di in range(d)]
if sorted(ordered_data_dimensions) != sorted(data_dimensions):
raise ValueError("X is not a valid input sktime array. "
"Provided dimensions are not conitiguous."
"{}".format(data_dimensions))
n = X["dim_0"].shape[0]
max_sz = -1
for dim_name in ordered_data_dimensions:
for i in range(n):
if X[dim_name][i].size > max_sz:
max_sz = X[dim_name][i].size
tslearn_arr = numpy.empty((n, max_sz, d))
tslearn_arr[:] = numpy.nan
for di in range(d):
for i in range(n):
sz = X["dim_%d" % di][i].size
tslearn_arr[i, :sz, di] = X["dim_%d" % di][i].values.copy()
return tslearn_arr
def to_pyflux_dataset(X):
"""Transform a tslearn-compatible dataset into a pyflux dataset.
Parameters
----------
X: array, shape = (n_ts, sz, d), where n_ts=1
tslearn-formatted dataset to be cast to pyflux format
Returns
-------
Pandas data-frame
pyflux-formatted dataset (cf.
`link <https://pyflux.readthedocs.io/en/latest/getting_started.html>`_)
Examples
--------
>>> tslearn_arr = numpy.random.randn(1, 16, 1)
>>> pyflux_df = to_pyflux_dataset(tslearn_arr)
>>> pyflux_df.shape
(16, 1)
>>> pyflux_df.columns[0]
'dim_0'
>>> tslearn_arr = numpy.random.randn(1, 16, 2)
>>> pyflux_df = to_pyflux_dataset(tslearn_arr)
>>> pyflux_df.shape
(16, 2)
>>> pyflux_df.columns[1]
'dim_1'
>>> tslearn_arr = numpy.random.randn(10, 16, 1)
>>> to_pyflux_dataset(tslearn_arr) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Array should be made of a single time series (10 here)
Notes
-----
Conversion from/to pyflux format requires pandas to be installed.
""" # noqa: E501
try:
import pandas as pd
except ImportError:
raise ImportError("Conversion from/to pyflux cannot be performed "
"if pandas is not installed.")
X_ = check_dataset(X,
force_equal_length=True,
force_single_time_series=True)
X_pd = pd.DataFrame(X[0], dtype=numpy.float32)
X_pd.columns = ["dim_%d" % di for di in range(X_.shape[2])]
return X_pd
def from_pyflux_dataset(X):
"""Transform a pyflux-compatible dataset into a tslearn dataset.
Parameters
----------
X: pandas data-frame
pyflux-formatted dataset
Returns
-------
array, shape=(n_ts, sz, d), where n_ts=1
tslearn-formatted dataset.
Column order is kept the same as in the original data frame.
Examples
--------
>>> import pandas as pd
>>> pyflux_df = pd.DataFrame()
>>> pyflux_df["dim_0"] = numpy.random.rand(10)
>>> tslearn_arr = from_pyflux_dataset(pyflux_df)
>>> tslearn_arr.shape
(1, 10, 1)
>>> pyflux_df = pd.DataFrame()
>>> pyflux_df["dim_0"] = numpy.random.rand(10)
>>> pyflux_df["dim_1"] = numpy.random.rand(10)
>>> pyflux_df["dim_2"] = numpy.random.rand(10)
>>> tslearn_arr = from_pyflux_dataset(pyflux_df)
>>> tslearn_arr.shape
(1, 10, 3)
>>> pyflux_arr = numpy.random.randn(10, 1, 16)
>>> from_pyflux_dataset(
... pyflux_arr
... ) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: X is not a valid input pyflux array.
Notes
-----
Conversion from/to pyflux format requires pandas to be installed.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("Conversion from/to pyflux cannot be performed "
"if pandas is not installed.")
if not isinstance(X, pd.DataFrame):
raise ValueError("X is not a valid input pyflux array. "
"A pandas DataFrame is expected.")
data_dimensions = [col_name for col_name in X.columns]
d = len(data_dimensions)
n = 1
max_sz = -1
for dim_name in data_dimensions:
if X[dim_name].size > max_sz:
max_sz = X[dim_name].size
tslearn_arr = numpy.empty((n, max_sz, d))
tslearn_arr[:] = numpy.nan
for di, dim_name in enumerate(data_dimensions):
data = X[dim_name].values.copy()
sz = len(data)
tslearn_arr[0, :sz, di] = data
return tslearn_arr
def to_tsfresh_dataset(X):
"""Transform a tslearn-compatible dataset into a tsfresh dataset.
Parameters
----------
X: array, shape = (n_ts, sz, d)
tslearn-formatted dataset to be cast to tsfresh format
Returns
-------
Pandas data-frame
tsfresh-formatted dataset ("flat" data frame, as described
`there <https://tsfresh.readthedocs.io/en/latest/text/data_formats.html#input-option-1-flat-dataframe>`_)
Examples
--------
>>> tslearn_arr = numpy.random.randn(1, 16, 1)
>>> tsfresh_df = to_tsfresh_dataset(tslearn_arr)
>>> tsfresh_df.shape
(16, 3)
>>> tslearn_arr = numpy.random.randn(1, 16, 2)
>>> tsfresh_df = to_tsfresh_dataset(tslearn_arr)
>>> tsfresh_df.shape
(16, 4)
Notes
-----
Conversion from/to tsfresh format requires pandas to be installed.
""" # noqa: E501
try:
import pandas as pd
except ImportError:
raise ImportError("Conversion from/to tsfresh cannot be performed "
"if pandas is not installed.")
X_ = check_dataset(X)
n, sz, d = X_.shape
dataframes = []
for i, Xi in enumerate(X_):
df = pd.DataFrame(columns=["id", "time"] +
["dim_%d" % di for di in range(d)])
Xi_ = Xi[:ts_size(Xi)]
sz = Xi_.shape[0]
df["time"] = numpy.arange(sz)
df["id"] = numpy.zeros((sz, ), dtype=numpy.int32) + i
for di in range(d):
df["dim_%d" % di] = Xi_[:, di]
dataframes.append(df)
return | pd.concat(dataframes) | pandas.concat |
import argparse
import re
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import os
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--input', type=str,
default='../results/logs.csv', help='Input file containing the logs')
ap.add_argument('-o', '--output', type=str,
default='../plots', help='Output directory for the plots')
args = vars(ap.parse_args())
SAVING_DIR = args['output']
sns.set()
def print_datasets(datasets: dict) -> None:
for size, dataset in datasets.items():
print(size)
for name, time in dataset.items():
print('\t' + name + ': ' + str(time))
def getParameterDomain(parameter: str, dataset: dict) -> dict:
"""Get the possible values for a given parameter ('o' or 'a')"""
results = re.findall(
re.compile(
parameter + r"(.[\d.]*)"
), str(dataset)
)
# Dictionary first to avoid duplicates
return sorted(list({match for match in results}))
def extract(regex: str, data: dict) -> (list, list):
"""Extracts data from one dataset according to the given regex.
Returns a pair (domain, values).
The 1st capturing group of the regex must be the varying parameter
and the second the value."""
results = re.findall(re.compile(regex), str(data))
domain = []
values = []
for match in results:
domain.append(float(match[0]))
values.append(float(match[1]))
return (domain, values)
def extract_all(regex: str, data: dict) -> (list, list):
"""Extracts data from all the datasets according to the given regex.
Returns a pair (domain, values), where domain will be the dataset's size.
Assumes regex only gets one element per dataset.
The 1st caputring group of the regex must be the value."""
domain = []
values = []
for length, dataset in data.items():
domain.append(length)
values.append(
float(re.findall(
re.compile(regex), str(dataset)
)[0])
)
return (domain, values)
def create_plot(title: str, xlabel: str, ylabel: str, domain: list, *data):
"""Creates and Saves a 'Spagetti plot' with the given title, horizontal
label, vertical label, domain and functions (name and values) presented
in the data array. Plot saved to constant SAVING_DIR/plot_name"""
agg_data = {'x': domain}
agg_data.update({fn[0]: fn[1]
for fn
in data})
df = | pd.DataFrame(agg_data) | pandas.DataFrame |
import os
import datetime
import pandas as pd
import numpy as np
import math
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import globals
from globals import log_message
from models import classifiers
from data import ini_file, data_exporter, data_proccessor, data_resampling
from visualisation import plot
class TabTraining(ttk.Frame):
def __init__(self, master):
super().__init__(master)
self.build_controls()
self.fill_init_data_into_controls()
def validate_monitoring_run(self):
txtWindowStride = self.txtWindowStride_text.get()
selected_monitor_window_size = int(self.txtSimulationWindowSize_text.get())
selected_monitoring_sample_rate = int(self.cboSimulationSampleRate.get())
algorithm = str(self.cboSimuAlgorithm.get())
return data_proccessor.validate_monitoring_run(txtWindowStride, selected_monitor_window_size, selected_monitoring_sample_rate, algorithm)
def statics_clicked(self):
if self.validate_monitoring_run():
plot.statistics_metrics_show(globals.monitoring_data_frame_resampled_monitor, globals.monitoring_data_fr, globals.curr_monitoring_sampling_rate)
def monitoring_dist_clicked(self):
if self.validate_monitoring_run():
plot.monitoring_show(globals.monitoring_data_fr, globals.curr_monitoring_sampling_rate)
def simulation_clicked(self):
if (self.rdoSimuStartTime.get() == 0) and (self.validate_monitoring_run()):
plot.simulation_show(globals.label_set, globals.monitoring_data_fr, int(self.txtSimuFrameDtPoints_text.get()),
int(self.txtSimuFrameStride_text.get()), int(self.txtSimuFrameDelay_text.get()),
int(self.txtSimuFrameRepeat_text.get()))
def models_fitting_clicked(self):
print(' ')
print(' ')
log_message('START BUILDING NEW MODEL(S)')
time_now = datetime.datetime.now()
globals.timestampforCSVfiles = '%02d' % time_now.hour + 'h' + '%02d' % time_now.minute + 'm' + '%02d' % time_now.second + 's'
# This step check validity of input data and set global default values
valid_input_data, error_message = self.check_input_data()
if not valid_input_data:
messagebox.showinfo('Alert', error_message)
else:
# Start new db connection for experiment result updating
if globals.data_from_db:
connection_status = globals.start_db_connection()
if not connection_status:
globals.stop_app('Database connection for saving experiment result is unsuccessful')
return
input_training_settings = self.get_input_training_settings_from_UI()
# Update input_data_setting into app.in
ini_file.update_training_tab_layout_data(input_training_settings)
self.convert_features_into_json_content()
# Because the binary_mode will changes the non-main labels into 'Non-...'
globals.label_set = self.label_set_origin.copy()
if globals.monitoring_mode:
globals.monitoring_data_frame = globals.monitoring_data_frame_origin.copy()
if globals.binary_mode:
log_message('BINARY classification with main label: ' + globals.main_label)
else:
log_message('MULTI-CLASS classification '+ globals.label_set.str.cat(sep=' '))
train_valid_test_data_filtered_by_selected_labels = globals.train_valid_test_data_frame.loc[
globals.train_valid_test_data_frame['label'].isin(globals.label_set)].sort_values(by=['timestamp'], ascending=True)
if globals.monitoring_mode:
monitoring_data_filtered_by_selected_labels = globals.monitoring_data_frame.loc[
globals.monitoring_data_frame['label'].isin(globals.label_set)].sort_values(
by=['timestamp'], ascending=True)
# The activity existed in monitoring data must cover the labels in training, so that the metrics are properly calculated
monitoring_data_label_set = pd.Series(np.unique(np.array(monitoring_data_filtered_by_selected_labels['label']).tolist()))
monitoring_data_label_set = monitoring_data_label_set.sort_values(ascending=True)
globals.monitoring_mode = monitoring_data_label_set.equals(globals.label_set)
if not globals.monitoring_mode:
messagebox.showinfo('Alert', 'The activities (labels) list in monitoring data is not matched with that from training data. Please re-select the datasets')
return
else:
monitoring_data_filtered_by_selected_labels = pd.DataFrame()
# Reset monitoring data everytime user clickes on fitting
globals.monitoring_data_fr = pd.DataFrame() # This dataframe includes test column values, grounthTruth label and predicted
globals.predicted_data_fr = pd.DataFrame() # This dataframe includes data for the simulation
globals.curr_monitoring_algorithm = ''
globals.curr_monitoring_window_size = 0
globals.curr_monitoring_sampling_rate = 0
self.random_forest = classifiers.CLS(RandomForestClassifier(n_estimators = 100),'Random_Forest')
self.decision_tree = classifiers.CLS(DecisionTreeClassifier(criterion='entropy', max_depth=10),'Decision_Tree')
self.support_vector_machine = classifiers.CLS(svm.SVC(C=1.0, kernel='rbf', gamma='scale', decision_function_shape='ovo'),'SVM')
self.naive_bayes = classifiers.CLS(GaussianNB(),'Naive_Bayes')
self.kfold = int(self.cboKfold.get())
self.no_of_original_train_valid_test_data_points = len(train_valid_test_data_filtered_by_selected_labels.index)
if self.rdoResampling.get() == 0: # User wants to keep the original sampling rates
# These variables are the same in the case of keeping original sampling rate
self.no_of_resampled_train_data_points = self.no_of_original_train_valid_test_data_points
globals.resampling_rate = globals.original_sampling_rate
log_message('--------------------------------------------------------Running with original sampling rate at ' + str(globals.resampling_rate) + 'Hz ')
globals.csv_txt_file_exporter = data_exporter.CsvTxtFileExporter(globals.resampling_rate)
for window_size in range(int(self.txtWindowSizeFrom_text.get()), int(self.txtWindowSizeTo_text.get()) + 1,
int(self.txtWindowStep_text.get())):
window_stride_in_ms = math.floor(window_size * int(self.txtWindowStride_text.get()) / 100)
log_message('Begin train-valid-test processing at window size ' + str(window_size) + 'ms with stride of ' + str(window_stride_in_ms) + 'ms')
log_message('Start calculating features for train data')
self.process_at_window_size_stride (train_valid_test_data_filtered_by_selected_labels, monitoring_data_filtered_by_selected_labels, window_size, window_stride_in_ms)
else: # User wants to resample the input data
for resampling_rate in range(int(str(self.cboDownSamplingFrom.get())), int(str(self.cboDownSamplingTo.get())) + 1,
int(str(self.cboDownSamplingStep.get()))):
globals.resampling_rate = resampling_rate
log_message('--------------------------------------------------------Running with resampled rate at ' + str(globals.resampling_rate) + 'Hz ')
log_message('Sensor data is being resampled')
resampling_dict = {}
for axis in globals.list_axes_to_apply_functions:
resampling_dict[axis] = globals.function_set_for_resampling
resampled_train_valid_data = data_resampling.resampled_frame(train_valid_test_data_filtered_by_selected_labels, globals.label_set,
resampling_dict,
globals.list_axes_to_apply_functions, resampling_rate)
self.no_of_resampled_train_data_points = len(resampled_train_valid_data.index)
if globals.monitoring_mode:
resampled_monitoring_data = data_resampling.resampled_frame(monitoring_data_filtered_by_selected_labels, globals.label_set,
resampling_dict,
globals.list_axes_to_apply_functions, resampling_rate)
else:
resampled_monitoring_data = pd.DataFrame()
globals.csv_txt_file_exporter = data_exporter.CsvTxtFileExporter(globals.resampling_rate)
for window_size in range(int(self.txtWindowSizeFrom_text.get()), int(self.txtWindowSizeTo_text.get()) + 1,
int(self.txtWindowStep_text.get())):
window_stride_in_ms = math.floor(window_size * int(self.txtWindowStride_text.get()) / 100)
log_message('Begin train-valid-test processing at window size ' + str(window_size) + 'ms with stride of ' + str(window_stride_in_ms) + 'ms')
log_message('Start calculating features for train data')
self.process_at_window_size_stride (resampled_train_valid_data, resampled_monitoring_data, window_size, window_stride_in_ms)
# winsound.Beep(1000, 300)
if globals.data_from_db:
connection_status = globals.close_db_connection()
if not connection_status:
log_message('Database connection closing is unsuccessful')
# Monitoring window size is set to the begining window size (for the purpose of better UI experience)
self.txtSimulationWindowSize_text.set(self.txtWindowSizeFrom_text.get())
if globals.monitoring_mode:
self.enable_monitoring_process_buttons()
else:
self.disable_monitoring_process_buttons()
def process_at_window_size_stride (self, train_valid_test_data_frame, monitoring_data_frame, window_size, window_stride_in_ms):
main_and_non_main_labels_set = None
main_and_non_main_labels_narray_temp = None
if globals.binary_mode:
# Because the label_set has changed into Main and Non-Main in the last window setting -> get the origin labels
globals.label_set = self.label_set_origin.copy()
main_and_non_main_labels_set = globals.label_set.copy()
main_and_non_main_labels_narray_temp = main_and_non_main_labels_set.to_numpy(copy=True)
main_and_non_main_labels_narray_temp = np.append(main_and_non_main_labels_narray_temp, 'Non-' + globals.main_label)
agg_train_valid_test_unfiltered_unbalanced = data_resampling.aggregated_frame(train_valid_test_data_frame,
globals.label_set,
globals.features_in_dictionary,
globals.list_axes_to_apply_functions,
window_size,
window_stride_in_ms)
log_message('End calculating features for train data')
globals.csv_txt_file_exporter.create_window_size_stride_folder(window_size, window_stride_in_ms)
if globals.csv_saving:
globals.csv_txt_file_exporter.save_into_csv_file(agg_train_valid_test_unfiltered_unbalanced,'1_train_valid_test_set','01_train_valid_test_imbalanced_set_all_instances.csv')
# End calculating features for training phrase <-
# Calculate the range of data points allowed in a window
minimum_count_allowed = round((window_size * globals.resampling_rate / 1000) * (1 - globals.data_point_filter_rate))
maximum_count_allowed = round((window_size * globals.resampling_rate / 1000) * (1 + globals.data_point_filter_rate))
# Filtering the number of data points for each window ->
agg_train_valid_test_filtered_unbalanced = agg_train_valid_test_unfiltered_unbalanced.loc[
(agg_train_valid_test_unfiltered_unbalanced['count'] >= minimum_count_allowed) & (
agg_train_valid_test_unfiltered_unbalanced['count'] <= maximum_count_allowed)]
# Getting balancing Train_Valid and Test data set
balanced_train_valid_dataset, test_dataset, return_status = data_proccessor.get_balanced_train_valid_test_data_set(agg_train_valid_test_filtered_unbalanced)
if not return_status:
globals.stop_app('Training data is insufficient, having not enough labels/activities for models fitting')
return
if globals.csv_saving:
globals.csv_txt_file_exporter.save_into_csv_file(balanced_train_valid_dataset, '1_train_valid_test_set', '02_train_valid_balanced_dataset_with_' + str(
globals.minimum_train_valid_instance_for_each_label) + '_instances_for_each_class.csv')
balanced_train_valid_dataset = balanced_train_valid_dataset.drop(['cattle_id'], axis = 1)
balanced_train_valid_dataset = balanced_train_valid_dataset.drop(['count'], axis = 1)
balanced_train_valid_dataset = balanced_train_valid_dataset.reset_index(drop = True)
# Open and write infor to a text file ->
globals.csv_txt_file_exporter.create_experiment_result_txt_file()
# Initialising some of metrics for the classification
self.random_forest.init_metrics()
self.decision_tree.init_metrics()
self.support_vector_machine.init_metrics()
self.naive_bayes.init_metrics()
feature_cols = list(balanced_train_valid_dataset.columns.values)
feature_cols.remove('timestamp_human')
feature_cols.remove('label')
X = balanced_train_valid_dataset[feature_cols] # Features
y = balanced_train_valid_dataset['label'] # Target
# Stratified k-fold
kf = StratifiedKFold(n_splits=self.kfold, shuffle=True) # Considering random_state = 0??
k_fold_round = int(0)
for train_index, valid_index in kf.split(X, y):
k_fold_round += 1
X_train = pd.DataFrame(X, columns = feature_cols, index = train_index)
X_valid = pd.DataFrame(X, columns = feature_cols, index = valid_index)
y_train_df = pd.DataFrame(y, columns = ['label'], index = train_index)
y_train = y_train_df['label']
y_valid_df = pd.DataFrame(y, columns = ['label'], index = valid_index)
y_valid = y_valid_df['label']
if globals.csv_saving is True:
globals.csv_txt_file_exporter.save_into_csv_file(X_train, '3_kfold', str(k_fold_round) + 'th_round_fold_X_train.csv')
globals.csv_txt_file_exporter.save_into_csv_file(X_valid, '3_kfold', str(k_fold_round) + 'th_round_fold_X_validation.csv')
globals.csv_txt_file_exporter.save_into_csv_file(y_train, '3_kfold', str(k_fold_round) + 'th_round_fold_y_train.csv')
globals.csv_txt_file_exporter.save_into_csv_file(y_valid, '3_kfold', str(k_fold_round) + 'th_round_fold_y_validation.csv')
globals.csv_txt_file_exporter.write_single_line('\n------------------Round ' + str(k_fold_round) + '------------------')
self.random_forest.train_validate(self.RandomForestVar.get(), X_train, y_train, X_valid, y_valid)
self.decision_tree.train_validate(self.DecisionTreeVar.get(), X_train, y_train, X_valid, y_valid)
self.support_vector_machine.train_validate(self.SVMVar.get(), X_train, y_train, X_valid, y_valid)
self.naive_bayes.train_validate(self.NaiveBayesVar.get(), X_train, y_train, X_valid, y_valid)
globals.csv_txt_file_exporter.write_single_line('\n')
print('-------------------------------------------------------------------------')
self.random_forest.calc_and_save_train_valid_result(self.RandomForestVar.get(), self.kfold)
self.decision_tree.calc_and_save_train_valid_result(self.DecisionTreeVar.get(), self.kfold)
self.support_vector_machine.calc_and_save_train_valid_result(self.SVMVar.get(), self.kfold)
self.naive_bayes.calc_and_save_train_valid_result(self.NaiveBayesVar.get(), self.kfold)
# Begin processing the test phrase
labels_narray_temp = globals.label_set.to_numpy()
test_dataset = test_dataset.dropna().set_index('timestamp')
if globals.csv_saving:
globals.csv_txt_file_exporter.save_into_csv_file(y_valid, '1_train_valid_test_set', '03_test_dataset_counts_filtered.csv')
test_dataset = test_dataset.drop(['cattle_id'], axis = 1)
test_dataset = test_dataset.drop(['count'], axis = 1)
test_dataset = test_dataset.reset_index(drop = True)
X_test = test_dataset[feature_cols] # Features
y_test = test_dataset['label'] # Target
self.random_forest.predict_test_data(self.RandomForestVar.get(), X_test, y_test, labels_narray_temp, main_and_non_main_labels_set, main_and_non_main_labels_narray_temp)
self.decision_tree.predict_test_data(self.DecisionTreeVar.get(), X_test, y_test, labels_narray_temp, main_and_non_main_labels_set, main_and_non_main_labels_narray_temp)
self.support_vector_machine.predict_test_data(self.SVMVar.get(), X_test, y_test, labels_narray_temp, main_and_non_main_labels_set, main_and_non_main_labels_narray_temp)
self.naive_bayes.predict_test_data(self.NaiveBayesVar.get(), X_test, y_test, labels_narray_temp, main_and_non_main_labels_set, main_and_non_main_labels_narray_temp)
if globals.monitoring_mode:
print('-------------------------------------------------------------------------')
monitoring_window_stride_in_ms = math.floor(window_size * int(self.txtWindowSimuStride_text.get()) / 100)
log_message('Begin calculating features for the Monitoring data with window size ' + str(window_size) + 'ms and stride of ' + str(monitoring_window_stride_in_ms) + 'ms')
if globals.binary_mode:
# Because the label_set has changed into Main and Non-Main in the last window setting -> get the origin labels
globals.label_set = self.label_set_origin.copy()
# This dataframe is for testing on unseen data (from monitoring data table) ->
agg_monitor = data_resampling.aggregated_frame(monitoring_data_frame, globals.label_set,
globals.features_in_dictionary,
globals.list_axes_to_apply_functions, window_size, monitoring_window_stride_in_ms)
globals.csv_txt_file_exporter.save_into_csv_file(agg_monitor, '2_monitoring_data', '0_unfiltered_monitoring_data_set.csv')
agg_monitor_counts_filtered = agg_monitor.loc[
(agg_monitor['count'] >= minimum_count_allowed) & (agg_monitor['count'] <= maximum_count_allowed)]
agg_monitor_counts_filtered = agg_monitor_counts_filtered.dropna().reset_index(drop = True)
# To be deleted ->
if globals.csv_saving:
globals.csv_txt_file_exporter.save_into_csv_file(agg_monitor_counts_filtered, '2_monitoring_data', '1_filtered_monitoring_set.csv')
if globals.binary_mode:
# Get the root list of all activities in test data set before changing sub labels/activities into non-... label. This is for the confusion matrix latter.
globals.main_and_non_main_labels_y_root_monitoring_temp = agg_monitor_counts_filtered['label'].to_numpy(copy=True)
# Change the label set of Test dataset into Non-main label for the other non main labels
for _, value in globals.sub_labels_set.items():
agg_monitor_counts_filtered.loc[
agg_monitor_counts_filtered.label == value, 'label'] = 'Non-' + globals.main_label
# Change the label_set into two labels only
globals.label_set = | pd.Series([globals.main_label, 'Non-' + globals.main_label]) | pandas.Series |
from __future__ import absolute_import, division, print_function
import pytest
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series
from string import ascii_lowercase
from blaze.compute.core import compute
from blaze import dshape, discover, transform
from blaze.expr import symbol, join, by, summary, distinct, shape
from blaze.expr import (merge, exp, mean, count, nunique, sum, min, max, any,
var, std, concat)
from blaze.compatibility import builtins, xfail, assert_series_equal
t = symbol('t', 'var * {name: string, amount: int, id: int}')
nt = symbol('t', 'var * {name: ?string, amount: float64, id: int}')
df = DataFrame([['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]], columns=['name', 'amount', 'id'])
ndf = DataFrame([['Alice', 100.0, 1],
['Bob', np.nan, 2],
[np.nan, 50.0, 3]], columns=['name', 'amount', 'id'])
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
dfbig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
def test_series_columnwise():
s = Series([1, 2, 3], name='a')
t = symbol('t', 'var * {a: int64}')
result = compute(t.a + 1, s)
assert_series_equal(s + 1, result)
def test_symbol():
tm.assert_frame_equal(compute(t, df), df)
def test_projection():
tm.assert_frame_equal(compute(t[['name', 'id']], df),
df[['name', 'id']])
def test_eq():
assert_series_equal(compute(t['amount'] == 100, df),
df['amount'] == 100)
def test_selection():
tm.assert_frame_equal(compute(t[t['amount'] == 0], df),
df[df['amount'] == 0])
tm.assert_frame_equal(compute(t[t['amount'] > 150], df),
df[df['amount'] > 150])
def test_arithmetic():
assert_series_equal(compute(t['amount'] + t['id'], df),
df.amount + df.id)
assert_series_equal(compute(t['amount'] * t['id'], df),
df.amount * df.id)
assert_series_equal(compute(t['amount'] % t['id'], df),
df.amount % df.id)
def test_join():
left = DataFrame(
[['Alice', 100], ['Bob', 200]], columns=['name', 'amount'])
right = DataFrame([['Alice', 1], ['Bob', 2]], columns=['name', 'id'])
lsym = symbol('L', 'var * {name: string, amount: int}')
rsym = symbol('R', 'var * {name: string, id: int}')
joined = join(lsym, rsym, 'name')
assert (dshape(joined.schema) ==
dshape('{name: string, amount: int, id: int}'))
result = compute(joined, {lsym: left, rsym: right})
expected = DataFrame([['Alice', 100, 1], ['Bob', 200, 2]],
columns=['name', 'amount', 'id'])
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(joined.fields)
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
left = DataFrame(left, columns=['x', 'y', 'z'])
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
right = DataFrame(right, columns=['x', 'y', 'w'])
lsym = symbol('lsym', 'var * {x: int, y: int, z: int}')
rsym = symbol('rsym', 'var * {x: int, y: int, w: int}')
j = join(lsym, rsym, ['x', 'y'])
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
expected = DataFrame(expected, columns=['x', 'y', 'z', 'w'])
result = compute(j, {lsym: left, rsym: right})
print(result)
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(j.fields)
def test_unary_op():
assert (compute(exp(t['amount']), df) == np.exp(df['amount'])).all()
def test_abs():
assert (compute(abs(t['amount']), df) == abs(df['amount'])).all()
def test_neg():
assert_series_equal(compute(-t['amount'], df),
-df['amount'])
@xfail(reason='Projection does not support arithmetic')
def test_neg_projection():
assert_series_equal(compute(-t[['amount', 'id']], df),
-df[['amount', 'id']])
def test_columns_series():
assert isinstance(compute(t['amount'], df), Series)
assert isinstance(compute(t['amount'] > 150, df), Series)
def test_reductions():
assert compute(mean(t['amount']), df) == 350 / 3
assert compute(count(t['amount']), df) == 3
assert compute(sum(t['amount']), df) == 100 + 200 + 50
assert compute(min(t['amount']), df) == 50
assert compute(max(t['amount']), df) == 200
assert compute(nunique(t['amount']), df) == 3
assert compute(nunique(t['name']), df) == 2
assert compute(any(t['amount'] > 150), df) is True
assert compute(any(t['amount'] > 250), df) is False
assert compute(var(t['amount']), df) == df.amount.var(ddof=0)
assert compute(var(t['amount'], unbiased=True), df) == df.amount.var()
assert compute(std(t['amount']), df) == df.amount.std(ddof=0)
assert compute(std(t['amount'], unbiased=True), df) == df.amount.std()
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[-1], df) == df.amount.iloc[-1]
def test_reductions_on_dataframes():
assert compute(count(t), df) == 3
assert shape(compute(count(t, keepdims=True), df)) == (1,)
def test_1d_reductions_keepdims():
series = df['amount']
for r in [sum, min, max, nunique, count, std, var]:
result = compute(r(t.amount, keepdims=True), {t.amount: series})
assert type(result) == type(series)
def test_distinct():
dftoobig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
d_t = distinct(tbig)
d_df = compute(d_t, dftoobig)
tm.assert_frame_equal(d_df, dfbig)
# Test idempotence
tm.assert_frame_equal(compute(d_t, d_df), d_df)
def test_distinct_on():
cols = ['name', 'sex', 'amount', 'id']
df = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=cols)
s = symbol('s', discover(df))
computed = compute(s.distinct('sex'), df)
tm.assert_frame_equal(
computed,
pd.DataFrame([['Alice', 'F', 100, 1],
['Drew', 'M', 100, 5]],
columns=cols),
)
def test_by_one():
result = compute(by(t['name'], total=t['amount'].sum()), df)
expected = df.groupby('name')['amount'].sum().reset_index()
expected.columns = ['name', 'total']
tm.assert_frame_equal(result, expected)
def test_by_two():
result = compute(by(tbig[['name', 'sex']],
total=sum(tbig['amount'])), dfbig)
expected = DataFrame([['Alice', 'F', 200],
['Drew', 'F', 100],
['Drew', 'M', 300]],
columns=['name', 'sex', 'total'])
tm.assert_frame_equal(result, expected)
def test_by_three():
expr = by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum())
result = compute(expr, dfbig)
expected = DataFrame([['Alice', 'F', 204],
['Drew', 'F', 104],
['Drew', 'M', 310]], columns=['name', 'sex', 'total'])
expected.columns = expr.fields
tm.assert_frame_equal(result, expected)
def test_by_four():
t = tbig[['sex', 'amount']]
expr = by(t['sex'], max=t['amount'].max())
result = compute(expr, dfbig)
expected = DataFrame([['F', 100],
['M', 200]], columns=['sex', 'max'])
tm.assert_frame_equal(result, expected)
def test_join_by_arcs():
df_idx = DataFrame([['A', 1],
['B', 2],
['C', 3]],
columns=['name', 'node_id'])
df_arc = DataFrame([[1, 3],
[2, 3],
[3, 1]],
columns=['node_out', 'node_id'])
t_idx = symbol('t_idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('t_arc', 'var * {node_out: int32, node_id: int32}')
joined = join(t_arc, t_idx, "node_id")
want = by(joined['name'], count=joined['node_id'].count())
result = compute(want, {t_arc: df_arc, t_idx: df_idx})
result_pandas = pd.merge(df_arc, df_idx, on='node_id')
gb = result_pandas.groupby('name')
expected = gb.node_id.count().reset_index().rename(columns={
'node_id': 'count'
})
tm.assert_frame_equal(result, expected)
assert list(result.columns) == ['name', 'count']
def test_join_suffixes():
df = pd.DataFrame(
list(dict((k, n) for k in ascii_lowercase[:5]) for n in range(5)),
)
a = symbol('a', discover(df))
b = symbol('b', discover(df))
suffixes = '_x', '_y'
joined = join(a, b, 'a', suffixes=suffixes)
expected = pd.merge(df, df, on='a', suffixes=suffixes)
result = compute(joined, {a: df, b: df})
tm.assert_frame_equal(result, expected)
def test_join_promotion():
a_data = pd.DataFrame([[0.0, 1.5], [1.0, 2.5]], columns=list('ab'))
b_data = pd.DataFrame([[0, 1], [1, 2]], columns=list('ac'))
a = symbol('a', discover(a_data))
b = symbol('b', discover(b_data))
joined = join(a, b, 'a')
assert joined.dshape == dshape('var * {a: float64, b: ?float64, c: int64}')
expected = pd.merge(a_data, b_data, on='a')
result = compute(joined, {a: a_data, b: b_data})
tm.assert_frame_equal(result, expected)
def test_sort():
tm.assert_frame_equal(compute(t.sort('amount'), df),
df.sort('amount'))
tm.assert_frame_equal(compute(t.sort('amount', ascending=True), df),
df.sort('amount', ascending=True))
tm.assert_frame_equal(compute(t.sort(['amount', 'id']), df),
df.sort(['amount', 'id']))
def test_sort_on_series_no_warning(recwarn):
expected = df.amount.order()
recwarn.clear()
assert_series_equal(compute(t['amount'].sort('amount'), df), expected)
# raises as assertion error if no warning occurs, same thing for below
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
assert_series_equal(compute(t['amount'].sort(), df), expected)
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
def test_field_on_series():
expr = symbol('s', 'var * int')
data = Series([1, 2, 3, 4], name='s')
assert_series_equal(compute(expr.s, data), data)
def test_head():
tm.assert_frame_equal(compute(t.head(1), df), df.head(1))
def test_tail():
tm.assert_frame_equal(compute(t.tail(1), df), df.tail(1))
def test_label():
expected = df['amount'] * 10
expected.name = 'foo'
assert_series_equal(compute((t['amount'] * 10).label('foo'), df),
expected)
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), df)
expected = df.rename(columns={'name': 'NAME', 'id': 'ID'})
tm.assert_frame_equal(result, expected)
def test_relabel_series():
result = compute(t.relabel({'name': 'NAME'}), df.name)
assert result.name == 'NAME'
ts = pd.date_range('now', periods=10).to_series().reset_index(drop=True)
tframe = | DataFrame({'timestamp': ts}) | pandas.DataFrame |
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = | pd.Timestamp('2020-11-02') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 14 19:01:45 2021
@author: David
"""
from pathlib import Path
from datetime import datetime as dt
import zipfile
import os.path
import numpy as np
import scipy.signal as sig
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
from matplotlib import gridspec
import seaborn as sea
import fig_util
from IPython.display import display, Image
SLATE = (0.15, 0.15, 0.15)
WD_ARR = {
1: 'Montag',
2: 'Dienstag',
3: 'Mittwoch',
4: 'Donnerstag',
5: 'Freitag',
6: 'Samstag',
7: 'Sonntag'
}
OUTPUT_DIR = '..\\output\\RNowcast\\anim\\'
OUTPUT_DIR = 'D:\\COVID-19\\output\\RNowcast\\anim\\'
ARCHIVE_FPATH = '..\\data\\RKI\\Nowcasting\\Nowcast_R_{:s}.csv'
ARCHIVE_ZIP_URL = 'https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung/archive/refs/heads/main.zip'
#'https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung/raw/main/Archiv/Nowcast_R_{:s}.csv'
SPECIFIC_DAY = None
#SPECIFIC_DAY = '2021-09-24'
#SPECIFIC_DAY = '2021-10-08'
#SPECIFIC_DAY = '2021-11-12'
INPUT_DATA_RANGE = ['2021-03-16', dt.now().strftime('%Y-%m-%d')]
PLOT_MAX_DATE = '2021-12-31'
DO_EXTRAPOLATION = False
if not SPECIFIC_DAY is None:
INPUT_DATA_RANGE[1] = SPECIFIC_DAY
dataset_date_range = pd.date_range(*INPUT_DATA_RANGE)
r_idx_min = dataset_date_range[0] - pd.DateOffset(days=4)
r_idx = pd.date_range(r_idx_min, dataset_date_range[-5].strftime('%Y-%m-%d'))
r_cols = pd.Int64Index(range(4, 4+7*6, 1))
Path(OUTPUT_DIR).mkdir(parents=True, exist_ok=True)
# %%
rep_tri = pd.DataFrame(
data=np.zeros((r_idx.size, r_cols.size)),
index=r_idx,
columns=r_cols)
datasets = {}
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
print(dataset_date_str)
#if os.path.isfile(ARCHIVE_FPATH.format(dataset_date_str)):
try:
data = pd.read_csv(
ARCHIVE_FPATH.format(dataset_date_str),
index_col = 'Datum',
parse_dates = True
)
except ValueError:
# two steps:
data = pd.read_csv(
ARCHIVE_FPATH.format(dataset_date_str),
parse_dates = True,
sep=';', decimal=',',
skip_blank_lines=False
)
extra_rows = data.index.size - data.index[data.Datum.isna()][0]
data = pd.read_csv(
ARCHIVE_FPATH.format(dataset_date_str),
index_col = 'Datum',
parse_dates = True,
sep=';', decimal=',',
date_parser=lambda x: dt.strptime(x, '%d.%m.%Y'),
skipfooter=extra_rows, encoding='UTF-8'
)
data.rename(columns={'Schätzer_Neuerkrankungen': 'PS_COVID_Faelle'},
inplace=True)
last_dataset = data.loc[:,['PS_COVID_Faelle']].copy()
last_dataset['Iso Weekdays'] = last_dataset.index.map(lambda d: d.isoweekday())
last_dataset['Date Offset'] = (dataset_date - last_dataset.index).days
datasets[dataset_date_str] = last_dataset
comm_rows = r_idx.intersection(data.index)
data = data.loc[comm_rows]
d_cols = (dataset_date-data.index).days
data['Offset'] = d_cols
comm_cols = d_cols.intersection(r_cols)
max_offset = comm_cols.max()
data = data.loc[data['Offset'] <= max_offset, ['Offset', 'PS_COVID_Faelle']]
data = data.pivot(columns='Offset', values='PS_COVID_Faelle')
data.fillna(0, inplace=True)
rep_tri.loc[data.index, comm_cols] += data.loc[:, comm_cols]
(na_cols, na_rows) = np.tril_indices(rep_tri.shape[0], -1)
if any(na_cols >= r_cols.size):
max_cols = np.nonzero(na_cols >= r_cols.size)[0][0]
na_cols = na_cols[:max_cols]
na_rows = na_rows[:max_cols]
rep_tri2 = rep_tri.to_numpy().copy()
rep_tri2[r_idx.size-1-na_rows, na_cols] = np.nan
rep_tri3 = rep_tri.copy()
rep_tri3.loc[:,:] = rep_tri2
rep_tri4 = rep_tri3.iloc[:-14, :].div(rep_tri3.apply(lambda s: s[pd.Series.last_valid_index(s)], axis=1), axis=0)
# %%
q10_dist = pd.DataFrame(index=r_cols, columns=range(1,7,1))
lq_dist = pd.DataFrame(index=r_cols, columns=range(1,7,1))
med_dist = pd.DataFrame(index=r_cols, columns=range(1,7,1))
uq_dist = pd.DataFrame(index=r_cols, columns=range(1,7,1))
q90_dist = pd.DataFrame(index=r_cols, columns=range(1,7,1))
max_days_offset = r_cols.max()
for i in range(7):
iwd = rep_tri4.index[i].isoweekday()
rep_tri5 = rep_tri4.iloc[i::7]
tri5_med = rep_tri5.median(axis=0)
rep_tri5 = rep_tri5.loc[(((rep_tri5-tri5_med) > 1) | (rep_tri5-tri5_med < -1)).sum(axis=1)==0]
rep_tri5 *= 100
test = rep_tri5.iloc[:,0:11].melt(var_name='Datenstand in "n Tage nach Datum des Nowcasts"', value_name='Nowcast Korrekturfaktor in %')
test = test.loc[~test['Nowcast Korrekturfaktor in %'].isna()]
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(2, 1, figure=fig,
height_ratios = [7, 1],
hspace = 0.1)
ax = fig.add_subplot(gs[0, 0])
fig.suptitle('COVID-19 - Variation des RKI Nowcasts der Fallzahlen über Datenstand-Alter nach Wochentag: {:s}'.format(WD_ARR[iwd]),
horizontalalignment='center',
verticalalignment='center',
fontsize=21, color=SLATE, y=0.91)
sea.violinplot(x='Datenstand in "n Tage nach Datum des Nowcasts"',
y='Nowcast Korrekturfaktor in %',
data=test,
scale="count")
ax.set_ylim([0, 160])
ax.yaxis.set_major_locator(MultipleLocator(20))
ax.yaxis.set_minor_locator(MultipleLocator(5))
ax.set_xlim([-1, 11])
ax.tick_params(which='minor', length=0, width=0, pad=10)
ax.tick_params(axis=u'both', labelsize=16, labelcolor = SLATE)
ax.grid(True, which='major', axis='both', linestyle='-', color=(0.85,0.85,0.85))
ax.grid(True, which='minor', axis='both', linestyle='-', color=(0.95,0.95,0.95))
ax.set_ylabel('Nowcast Korrekturfaktor in %', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax.set_xlabel('Datenstand in "n Tage nach Datum des Nowcasts"', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax2 = fig.add_subplot(gs[1, 0])
ax2.axis('off')
if dataset_date_range[0].year == dataset_date_range[-1].year:
Datenstand_range_str = (
dataset_date_range[0].strftime('%d.%m.-') +
dataset_date_range[-1].strftime('%d.%m.%Y') )
else:
Datenstand_range_str = (
dataset_date_range[0].strftime('%d.%m.%y-') +
dataset_date_range[-1].strftime('%d.%m.%Y') )
plt.text(0, 0.05,
'Datenquelle:\n' +
'<NAME>-Institut (RKI), an der Heiden, Matthias (2021): SARS-CoV-2-Nowcasting und -R-Schaetzung, Berlin: Zenodo. DOI:10.5281/zenodo.4680400\n'+
'URL: https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung ; ' +
'Abfragedatum/Datenstand: ' + Datenstand_range_str + ';\n' +
'Datenlizenz CC-BY 4.0 International; eigene Berechnung/eigene Darstellung',
fontsize=11.5)
if True:
exp_full_fname = '{:s}{:s}_{:d}_{:s}.png'.format(
OUTPUT_DIR + '..\\', 'Nowcast_Var', iwd, WD_ARR[iwd])
print('Saving ' + exp_full_fname)
try:
fig_util.set_size(fig, (1920.0/100.0, 1080.0/100.0), dpi=100, pad_inches=0.35)
except:
fig_util.force_fig_size(fig, (1920.0, 1080.0), dpi=100, pad_inches=0.35)
fig.savefig(exp_full_fname, dpi=100, bbox_inches='tight', pad_inches=0.35)
display(Image(filename=exp_full_fname))
plt.close()
else:
plt.show()
q10_dist.loc[:, iwd] = 0.01 * rep_tri5.quantile(0.1, axis=0)
lq_dist.loc[:, iwd] = 0.01 * rep_tri5.quantile(0.25, axis=0)
med_dist.loc[:, iwd] = 0.01 * rep_tri5.median(axis=0)
uq_dist.loc[:, iwd] = 0.01 * rep_tri5.quantile(0.75, axis=0)
q90_dist.loc[:, iwd] = 0.01 * rep_tri5.quantile(0.9, axis=0)
#input_matrix[np.tril_indices(input_matrix.shape[0], -1)] = np.nan
# %%
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
print(dataset_date_str)
last_dataset = datasets[dataset_date_str]
last_dataset['Med NowNowcast'] = last_dataset.apply(lambda r: r['PS_COVID_Faelle'] if r['Date Offset'] > max_days_offset else r['PS_COVID_Faelle'] / med_dist[r['Iso Weekdays']][r['Date Offset']], axis=1)
#last_dataset['Q1 NowNowcast'] = last_dataset.apply(lambda r: r['PS_COVID_Faelle'] if r['Date Offset'] > max_days_offset else r['PS_COVID_Faelle'] / lq_dist[r['Iso Weekdays']][r['Date Offset']], axis=1)
#last_dataset['Q3 NowNowcast'] = last_dataset.apply(lambda r: r['PS_COVID_Faelle'] if r['Date Offset'] > max_days_offset else r['PS_COVID_Faelle'] / uq_dist[r['Iso Weekdays']][r['Date Offset']], axis=1)
last_dataset['Med NowNowcast 7d MA'] = np.hstack((
np.full((3), np.nan),
sig.correlate(last_dataset['Med NowNowcast'], np.full((7), 1.0/7), method='direct', mode='valid'),
np.full((3), np.nan)))
# last_dataset['Q1 NowNowcast 7d MA'] = np.hstack((
# np.full((3), np.nan),
# sig.correlate(last_dataset['Q1 NowNowcast'], np.full((7), 1.0/7), method='direct', mode='valid'),
# np.full((3), np.nan)))
# last_dataset['Q3 NowNowcast 7d MA'] = np.hstack((
# np.full((3), np.nan),
# sig.correlate(last_dataset['Q3 NowNowcast'], np.full((7), 1.0/7), method='direct', mode='valid'),
# np.full((3), np.nan)))
last_dataset['Nowcast 7d MA'] = np.hstack((
np.full((3), np.nan),
sig.correlate(last_dataset['PS_COVID_Faelle'], np.full((7), 1.0/7), method='direct', mode='valid'),
np.full((3), np.nan)))
v = last_dataset['Med NowNowcast 7d MA'].to_numpy()
v = v[4:] / v[:-4]
v = np.hstack((
np.full((6), np.nan),
v[:-2]))
last_dataset['R (Med NowNowcast 7d MA)'] = v
v = 2.0**(sig.correlate(np.log2(v), np.full((7), 1.0/7), method='direct', mode='valid'))
v = np.hstack((
np.full((3), np.nan),
v,
np.full((3), np.nan)))
last_dataset['Rgeom (Med NowNowcast 7d MA)'] = v
# v1 = last_dataset['Q1 NowNowcast 7d MA'].to_numpy()
# v3 = last_dataset['Q3 NowNowcast 7d MA'].to_numpy()
# vmin = np.vstack((v1, v3)).max(axis=0)
# vmax = np.vstack((v1, v3)).max(axis=0)
# vlo = vmin[4:] / vmax[:-4]
# vhi = vmax[4:] / vmin[:-4]
# vlo = np.hstack((
# np.full((3), np.nan),
# vlo,
# np.full((1), np.nan)))
# vhi = np.hstack((
# np.full((3), np.nan),
# vhi,
# np.full((1), np.nan)))
# last_dataset['R (Q3 NowNowcast 7d MA)'] = vhi
# last_dataset['R (Q1 NowNowcast 7d MA)'] = vlo
# vlo = 2.0**(sig.correlate(np.log2(vlo), np.full((7), 1.0/7), method='direct', mode='valid'))
# vhi = 2.0**(sig.correlate(np.log2(vhi), np.full((7), 1.0/7), method='direct', mode='valid'))
# vlo = np.hstack((
# np.full((3), np.nan),
# vlo,
# np.full((3), np.nan)))
# vhi = np.hstack((
# np.full((3), np.nan),
# vhi,
# np.full((3), np.nan)))
# last_dataset['Rgeom (Q3 NowNowcast 7d MA)'] = vhi
# last_dataset['Rgeom (Q1 NowNowcast 7d MA)'] = vlo
v = last_dataset['Nowcast 7d MA'].to_numpy()
v = v[4:] / v[:-4]
v = np.hstack((
np.full((6), np.nan),
v[:-2]))
last_dataset['R (Nowcast 7d MA)'] = v
v = 2.0**(sig.correlate(np.log2(v), np.full((7), 1.0/7), method='direct', mode='valid'))
v = np.hstack((
np.full((3), np.nan),
v,
np.full((3), np.nan)))
last_dataset['Rgeom (Nowcast 7d MA)'] = v
datasets[dataset_date_str] = last_dataset
# %%
fidz = datasets[INPUT_DATA_RANGE[0]]['Rgeom (Med NowNowcast 7d MA)'].first_valid_index()
total_idz = datasets[INPUT_DATA_RANGE[1]]['Rgeom (Med NowNowcast 7d MA)'].index[12:-4].copy()
test = pd.DataFrame(index=total_idz, columns=dataset_date_range.copy())
for dataset_date in dataset_date_range:
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
cur_dataset = datasets[dataset_date_str]
comm_idz = total_idz.intersection(cur_dataset.index)
test.loc[comm_idz, dataset_date] = cur_dataset.loc[comm_idz, 'Rgeom (Med NowNowcast 7d MA)']
test_s = test.subtract(test.iloc[:, -1], axis=0)
# if np.isnan(test_s.iloc[0,0]):
# first_nnz_idx = np.nonzero(~test_s.iloc[:,0].isna().to_numpy())[0][0]
# test_s = test_s.iloc[first_nnz_idx:,:]
first_nz_idx = np.nonzero(test_s.iloc[:,0].isna().to_numpy())[0][0]-1
test_s = test_s.iloc[first_nz_idx:,:-1]
test_s['Datum'] = test_s.index.copy()
test_s = test_s.melt(value_name='Error', var_name='Report Date', id_vars='Datum').dropna()
test_s['Offset'] = (test_s['Report Date'] - test_s['Datum']).dt.days
test_s.drop(columns=['Report Date'], inplace=True)
test_s.loc[:, 'Error'] = pd.to_numeric(test_s.Error)
test_s = -test_s.pivot(index='Datum', columns='Offset', values='Error')
max_err = test_s.apply(lambda c: c.dropna().max(), axis=0)
min_err = test_s.apply(lambda c: c.dropna().min(), axis=0)
med_err = test_s.apply(lambda c: c.dropna().median(), axis=0)
q25_err = test_s.apply(lambda c: c.dropna().quantile(0.25), axis=0)
q75_err = test_s.apply(lambda c: c.dropna().quantile(0.75), axis=0)
q025_err = test_s.apply(lambda c: c.dropna().quantile(0.025), axis=0)
q975_err = test_s.apply(lambda c: c.dropna().quantile(0.975), axis=0)
iq50_err = (q75_err - q25_err)
iq95_err = (q975_err - q025_err)
#test2 = test.div(test.iloc[:,-1], axis=0)
#first_nz_idx = np.nonzero((test_s.iloc[:,0]!=1).to_numpy())[0][0]
# test2 = test2.iloc[first_nz_idx:,:]
# test2a = test2.iloc[:-(31+12), :]
# test3 = pd.DataFrame(index = test2a.index, columns = range(12, 100))
# for d in test2a.index:
# v = pd.DataFrame(data = test2a.loc[d, :].to_numpy().copy(),
# index = (test2a.columns - d).days,
# columns = ['data'])
# com_cols = test3.columns.intersection(v.index)
# test3.loc[d, com_cols] = v.loc[com_cols, 'data']-1
# error_band_md = test3.apply(lambda c: c.dropna().quantile(0.5) , axis=0)
# error_band_q1 = test3.apply(lambda c: c.dropna().quantile(0.25) , axis=0)
# error_band_q3 = test3.apply(lambda c: c.dropna().quantile(0.75) , axis=0)
# error_band_max = test3.apply(lambda c: c.dropna().max(), axis=0)
# error_band_min = test3.apply(lambda c: c.dropna().min(), axis=0)
# error_band_lo = error_band_md - 1.5 * (error_band_q3 - error_band_q1)
# error_band_hi = error_band_md + 1.5 * (error_band_q3 - error_band_q1)
# %%
band_data_med = pd.DataFrame(index = r_idx, columns=dataset_date_range)
band_data_min = pd.DataFrame(index = r_idx, columns=dataset_date_range)
band_data_max = pd.DataFrame(index = r_idx, columns=dataset_date_range)
band_data_iq95_lo = pd.DataFrame(index = r_idx, columns=dataset_date_range)
band_data_iq95_hi = pd.DataFrame(index = r_idx, columns=dataset_date_range)
band_data_iq50_lo = pd.DataFrame(index = r_idx, columns=dataset_date_range)
band_data_iq50_hi = pd.DataFrame(index = r_idx, columns=dataset_date_range)
max_num_entries = (dataset_date_range[-1]-dataset_date_range[0]).days
max_lut = max_err.index.max()
min_lut = max_err.index.min()
# max_err = test_s.apply(lambda c: c.dropna().max(), axis=0)
# min_err = test_s.apply(lambda c: c.dropna().min(), axis=0)
# med_err = test_s.apply(lambda c: c.dropna().median(), axis=0)
# q25_err = test_s.apply(lambda c: c.dropna().quantile(0.25), axis=0)
# q75_err = test_s.apply(lambda c: c.dropna().quantile(0.75), axis=0)
# iq_err = 1.5 * (q75_err - q25_err)
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
v = pd.DataFrame(datasets[dataset_date_str]['Rgeom (Med NowNowcast 7d MA)'].iloc[-max_num_entries:].dropna())
v.rename(columns={'Rgeom (Med NowNowcast 7d MA)': 'Data'}, inplace=True)
cur_idx = v.index
com_idx = r_idx.intersection(cur_idx)
if com_idx.size == 0:
continue
v = v.loc[com_idx]
cur_idx = v.index
v['Offset'] = (dataset_date - cur_idx).days
cur_idx = v.index
com_idx = r_idx.intersection(cur_idx)
# vmed = v['Data']
vmed = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else med_err[r['Offset']], axis=1)
vmax = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else max_err[r['Offset']], axis=1)
vmin = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else min_err[r['Offset']], axis=1)
vq25 = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else q25_err[r['Offset']], axis=1)
vq75 = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else q75_err[r['Offset']], axis=1)
vq025 = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else q025_err[r['Offset']], axis=1)
vq975 = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else q975_err[r['Offset']], axis=1)
band_data_med.loc[com_idx, dataset_date] = vmed.loc[com_idx]
band_data_min.loc[com_idx, dataset_date] = vmin.loc[com_idx]
band_data_max.loc[com_idx, dataset_date] = vmax.loc[com_idx]
band_data_iq50_lo.loc[com_idx, dataset_date] = vq25.loc[com_idx]
band_data_iq50_hi.loc[com_idx, dataset_date] = vq75.loc[com_idx]
band_data_iq95_lo.loc[com_idx, dataset_date] = vq025.loc[com_idx]
band_data_iq95_hi.loc[com_idx, dataset_date] = vq975.loc[com_idx]
# %%
plt.rc('axes', axisbelow=True)
if False:
# testX = test.subtract(test.iloc[:,-1], axis=0)
# band_max = testX.apply(lambda r: r.dropna().max(), axis=1).max()
# band_min = testX.apply(lambda r: r.dropna().min(), axis=1).min()
# band_q75 = testX.apply(lambda r: r.dropna().quantile(0.75), axis=1)
# band_q25 = testX.apply(lambda r: r.dropna().quantile(0.25), axis=1)
# band_iq = 1.5 * (band_q75 - band_q25).max()
# band_pm = np.max([-band_min, band_max])
fig = plt.figure(figsize=(16,9))
gs = gridspec.GridSpec(2, 1, figure=fig,
height_ratios = [14, 3],
hspace = 0.1)
ax = fig.add_subplot(gs[0, 0])
if dataset_date_range[0].year == dataset_date_range[-1].year:
Datenstand_range_str = (
dataset_date_range[0].strftime('%d.%m.-') +
dataset_date_range[-1].strftime('%d.%m.%Y') )
else:
Datenstand_range_str = (
dataset_date_range[0].strftime('%d.%m.%y-') +
dataset_date_range[-1].strftime('%d.%m.%Y') )
fig.suptitle('COVID-19 - Original Punktschätzer des RKI 7-Tage-R nach Erkrankungsdatum - {:s}'.format(
Datenstand_range_str),
horizontalalignment='center',
verticalalignment='center',
fontsize=21, color=SLATE, y=0.91)
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
xidz = datasets[dataset_date_str].index[-max_num_entries:]
v = datasets[dataset_date_str]['R (Nowcast 7d MA)'].iloc[-max_num_entries:]
#y1 = v * (1 - IQmax)
#y2 = v * (1 + IQmax)
#y1 = datasets[dataset_date_str]['R (Q1 NowNowcast 7d MA)'].iloc[-56:]
#y2 = datasets[dataset_date_str]['R (Q3 NowNowcast 7d MA)'].iloc[-56:]
#plt.fill_between(xidz, y1, y2, facecolor=(0.3, 0.3, 0.3), alpha=0.5)
plt.plot(v)
ax.set_ylim([0.6,1.5])
ax.yaxis.set_major_locator(MultipleLocator(0.1))
ax.yaxis.set_minor_locator(MultipleLocator(0.02))
#ax.set_xlim([r_idx[0], r_idx[-1]])
ax.set_xlim([
pd.to_datetime(r_idx[0]),
pd.to_datetime(PLOT_MAX_DATE)
])
date_form = DateFormatter("%d.%m.\n%Y")
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=2, byweekday=0))
ax.xaxis.set_minor_locator(mdates.DayLocator())
ax.tick_params(which='minor', length=0, width=0)
ax.tick_params(axis=u'both', labelsize=16, labelcolor = SLATE)
ax.grid(True, which='major', axis='both', linestyle='-', color=(0.85,0.85,0.85))
ax.grid(True, which='minor', axis='both', linestyle='-', color=(0.95,0.95,0.95))
ax.set_ylabel('7-Tage Reproduktionszahl R', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax.set_xlabel('Geschätztes Erkrankungsdatum', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax2 = fig.add_subplot(gs[1, 0])
ax2.axis('off')
plt.text(0, 0.05,
'Datenquelle:\n' +
'<NAME>-Institut (RKI), an der <NAME> (2021): SARS-CoV-2-Nowcasting und -R-Schaetzung, Berlin: Zenodo. DOI:10.5281/zenodo.4680400\n'+
'URL: https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung ; ' +
'Abfragedatum/Datenstand: ' + Datenstand_range_str + '; eigene Berechnung/eigene Darstellung; \n' +
'Datenlizenz CC-BY 4.0 International',
fontsize=11.5)
exp_full_fname = '{:s}{:s}.png'.format(
OUTPUT_DIR + '..\\', 'Nowcasts_RKI_orig')
print('Saving ' + exp_full_fname)
fig_util.set_size(fig, (1920.0/100.0, 1080.0/100.0), dpi=100, pad_inches=0.35)
fig.savefig(exp_full_fname, dpi=100, bbox_inches='tight', pad_inches=0.35)
display(Image(filename=exp_full_fname))
plt.close()
# testX = test.subtract(test.iloc[:,-1], axis=0)
# band_max = testX.apply(lambda r: r.dropna().max(), axis=1).max()
# band_min = testX.apply(lambda r: r.dropna().min(), axis=1).min()
# band_q75 = testX.apply(lambda r: r.dropna().quantile(0.75), axis=1)
# band_q25 = testX.apply(lambda r: r.dropna().quantile(0.25), axis=1)
# band_iq = 1.5 * (band_q75 - band_q25).max()
# band_pm = np.max([-band_min, band_max])
fig = plt.figure(figsize=(16,9))
gs = gridspec.GridSpec(2, 1, figure=fig,
height_ratios = [14, 3],
hspace = 0.1)
ax = fig.add_subplot(gs[0, 0])
if dataset_date_range[0].year == dataset_date_range[-1].year:
Datenstand_range_str = (
dataset_date_range[0].strftime('%d.%m.-') +
dataset_date_range[-1].strftime('%d.%m.%Y') )
else:
Datenstand_range_str = (
dataset_date_range[0].strftime('%d.%m.%y-') +
dataset_date_range[-1].strftime('%d.%m.%Y') )
fig.suptitle('COVID-19 - Punktschätzer$^{{*)}}$ des RKI 7-Tage-R nach Erkrankungsdatum - {:s}'.format(
Datenstand_range_str),
horizontalalignment='center',
verticalalignment='center',
fontsize=21, color=SLATE, y=0.91)
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
xidz = datasets[dataset_date_str].index[-max_num_entries:]
v = datasets[dataset_date_str]['Rgeom (Nowcast 7d MA)'].iloc[-max_num_entries:]
#y1 = v * (1 - IQmax)
#y2 = v * (1 + IQmax)
#y1 = datasets[dataset_date_str]['R (Q1 NowNowcast 7d MA)'].iloc[-56:]
#y2 = datasets[dataset_date_str]['R (Q3 NowNowcast 7d MA)'].iloc[-56:]
#plt.fill_between(xidz, y1, y2, facecolor=(0.3, 0.3, 0.3), alpha=0.5)
plt.plot(v)
ax.set_ylim([0.6,1.5])
ax.yaxis.set_major_locator(MultipleLocator(0.1))
ax.yaxis.set_minor_locator(MultipleLocator(0.02))
#ax.set_xlim([r_idx[0], r_idx[-1]])
ax.set_xlim([
pd.to_datetime(r_idx[0]),
pd.to_datetime(PLOT_MAX_DATE)
])
date_form = DateFormatter("%d.%m.\n%Y")
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=2, byweekday=0))
ax.xaxis.set_minor_locator(mdates.DayLocator())
ax.tick_params(which='minor', length=0, width=0)
ax.tick_params(axis=u'both', labelsize=16, labelcolor = SLATE)
ax.grid(True, which='major', axis='both', linestyle='-', color=(0.85,0.85,0.85))
ax.grid(True, which='minor', axis='both', linestyle='-', color=(0.95,0.95,0.95))
ax.set_ylabel('7-Tage Reproduktionszahl R', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax.set_xlabel('Geschätztes Erkrankungsdatum', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax2 = fig.add_subplot(gs[1, 0])
ax2.axis('off')
plt.text(0, 0.05,
'Datenquelle:\n' +
'<NAME>-Institut (RKI), an der Heiden, Matthias (2021): SARS-CoV-2-Nowcasting und -R-Schaetzung, Berlin: Zenodo. DOI:10.5281/zenodo.4680400\n'+
'URL: https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung ; ' +
'Abfragedatum/Datenstand: ' + Datenstand_range_str + '; eigene Berechnung/eigene Darstellung; \n' +
'Datenlizenz CC-BY 4.0 International '+
'$^{*)}$ gleitender geometrischer Mittelwert (Wurzel der Produkte)',
fontsize=11.5)
exp_full_fname = '{:s}{:s}.png'.format(
OUTPUT_DIR + '..\\', 'Nowcasts_RKI_geom')
print('Saving ' + exp_full_fname)
fig_util.set_size(fig, (1920.0/100.0, 1080.0/100.0), dpi=100, pad_inches=0.35)
fig.savefig(exp_full_fname, dpi=100, bbox_inches='tight', pad_inches=0.35)
display(Image(filename=exp_full_fname))
plt.close()
fig = plt.figure(figsize=(16,9))
gs = gridspec.GridSpec(2, 1, figure=fig,
height_ratios = [14, 3],
hspace = 0.1)
ax = fig.add_subplot(gs[0, 0])
fig.suptitle('COVID-19 - Wochentagkorrigierter Punktschätzer$^{{*)}}$ des RKI 7-Tage-R nach Erkrankungsdatum - {:s}'.format(
Datenstand_range_str),
horizontalalignment='center',
verticalalignment='center',
fontsize=21, color=SLATE, y=0.91)
# y1 = band_data_min.apply(lambda r: r.dropna().min(), axis=1).dropna()
# y2 = band_data_max.apply(lambda r: r.dropna().max(), axis=1).dropna()
# x = y1.index
# plt.fill_between(x, y1, y2, facecolor=(0.0, 0.0, 0.0), alpha=0.2)
# y1 = band_data_iq_min.apply(lambda r: r.dropna().min(), axis=1).dropna()
# y2 = band_data_iq_max.apply(lambda r: r.dropna().max(), axis=1).dropna()
# x = y1.index
# plt.fill_between(x, y1, y2, facecolor=(1.0, 0.0, 0.0), alpha=0.8)
# y1 = band_data_q25.apply(lambda r: r.dropna().min(), axis=1).dropna()
# y2 = band_data_q75.apply(lambda r: r.dropna().max(), axis=1).dropna()
# x = y1.index
# plt.fill_between(x, y1, y2, facecolor=(0.4, 0.4, 1.0), alpha=0.8)
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
xidz = datasets[dataset_date_str].index[-max_num_entries:]
v = datasets[dataset_date_str]['Rgeom (Med NowNowcast 7d MA)'].iloc[-max_num_entries:]
plt.plot(v) #, 'k-') #, linewidth=0.5)
ax.set_ylim([0.6,1.5])
ax.yaxis.set_major_locator(MultipleLocator(0.1))
ax.yaxis.set_minor_locator(MultipleLocator(0.02))
#ax.set_xlim([r_idx[0], r_idx[-1]])
ax.set_xlim([
pd.to_datetime(r_idx[0]),
pd.to_datetime(PLOT_MAX_DATE)
])
date_form = DateFormatter("%d.%m.\n%Y")
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=2, byweekday=0))
ax.xaxis.set_minor_locator(mdates.DayLocator())
ax.tick_params(which='minor', length=0, width=0)
ax.tick_params(axis=u'both', labelsize=16, labelcolor = SLATE)
ax.grid(True, which='major', axis='both', linestyle='-', color=(0.85,0.85,0.85))
ax.grid(True, which='minor', axis='both', linestyle='-', color=(0.95,0.95,0.95))
ax.set_ylabel('7-Tage Reproduktionszahl R', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax.set_xlabel('Geschätztes Erkrankungsdatum', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax2 = fig.add_subplot(gs[1, 0])
ax2.axis('off')
plt.text(0, 0.05,
'Datenquelle:\n' +
'<NAME>och-Institut (RKI), an der Heiden, Matthias (2021): SARS-CoV-2-Nowcasting und -R-Schaetzung, Berlin: Zenodo. DOI:10.5281/zenodo.4680400\n'+
'URL: https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung ; ' +
'Abfragedatum/Datenstand: ' + Datenstand_range_str + '; eigene Berechnung/eigene Darstellung;\n' +
'Datenlizenz CC-BY 4.0 International '+
'$^{*)}$ gleitender geometrischer Mittelwert (Wurzel der Produkte)',
fontsize=11.5)
exp_full_fname = '{:s}{:s}.png'.format(
OUTPUT_DIR + '..\\', 'Nowcasts_RKI_korr')
print('Saving ' + exp_full_fname)
fig_util.set_size(fig, (1920.0/100.0, 1080.0/100.0), dpi=100, pad_inches=0.35)
fig.savefig(exp_full_fname, dpi=100, bbox_inches='tight', pad_inches=0.35)
display(Image(filename=exp_full_fname))
plt.close()
# %%
ext_r_idx = pd.date_range(r_idx[0], r_idx[-1]- | pd.DateOffset(days=1) | pandas.DateOffset |
import requests
import pandas as pd
import json
rply = []
regions = [172]
for region in regions:
url = "https://api.bilibili.com/x/web-interface/ranking/region?rid=" + str(region) + "&day=7&original=0&jsonp=jsonp"
response = requests.get(url=url)
text = response.text
ob = json.loads(text)
videos = ob["data"]
bvid = []
aid = []
for video in videos:
bvid.append(video["bvid"])
aid.append(video["aid"])
for v in range(1, 11):
for i in range(1, 101):
url = "https://api.bilibili.com/x/v2/reply?jsonp=jsonp&pn=" + str(i) + "&type=1&oid=" + aid[v] + "&sort=2"
response = requests.get(url=url)
text = response.text
ob = json.loads(text)
replies = ob["data"]["replies"]
if replies is None:
break
for reply in replies:
reply_line = [reply["content"]["message"], reply["like"]]
rply.append(reply_line)
if i % 5 == 0:
print("Scanned " + str((v-1) * 100 + i) + " pages")
data_rply = | pd.DataFrame(data=rply, columns=["content", "num_like"]) | pandas.DataFrame |
import glob
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tqdm
def get_table_paths(root):
paths = glob.glob(os.path.join(root, "*", "*.pkl"))
if len(paths) == 0:
# flat, no folders for specific queries
paths = glob.glob(os.path.join(root, "*.pkl"))
return paths
def folder_to_num(folder):
return int(folder.replace("q", "")) if folder.startswith("q") else -1
def collect_results(results_root):
all_tables = get_table_paths(results_root)
acc = []
all_columns = set([])
for path in tqdm.tqdm(all_tables):
path_parts = path.split("/")
if len(path_parts) >= 2 and path_parts[-2].startswith("q"):
folder_name = path_parts[-2]
else:
folder_name = "unk_folder"
try:
df = pd.read_pickle(path)
df["folder"] = folder_name
if len(all_columns) == 0:
all_columns = all_columns.union(df.columns)
else:
all_columns = all_columns.intersection(df.columns)
acc.append(df)
except Exception as err:
print(err)
print("Failed to load {} results file".format(path))
# make sure only include columns all have
all_columns = list(all_columns)
acc = [d[all_columns] for d in acc]
df = pd.concat(acc, axis=0).reset_index(drop=True)
df["folder_num"] = df["folder"].map(folder_to_num)
return df
def performance_table(df):
summary_df = df.groupby([
"folder_num",
"dataset",
"name",
]).score.agg(["mean", "std"])
summary_df = summary_df.reset_index()
return summary_df.sort_values(["folder_num", "dataset", "name"])
def filter_incomplete(df):
# remove any queries where any system failed to finish
# should only compare cases where everyone finishes
df = df.copy()
df = df[~df["score"].isnull()]
all_systems = df["name"].unique()
num_systems = len(all_systems)
sys_cts = df.groupby(["folder_num", "dataset"])["name"].agg(lambda x: len(set(x)))
ok_entries = set(sys_cts[sys_cts == num_systems].index.values)
print("Keeping {}/{} folder/dataset combinations".format(len(ok_entries), sys_cts.shape[0]))
is_ok = [pair in ok_entries for pair in zip(df["folder_num"], df["dataset"])]
df = df[is_ok].reset_index(drop=True)
return df
def count_top_scores(summary_df, complete_only=False, min_diff=None):
summary_df = summary_df.sort_values("mean", ascending=False)
# only consider queries where all systems completed...
if complete_only:
summary_df = filter_incomplete(summary_df)
# top score by folder/dataset
top_df = summary_df.groupby(["folder_num", "dataset"]).head(1)
if min_diff is not None:
diffs = summary_df.groupby(
["folder_num",
"dataset"])["mean"].apply(lambda x: x.values[0] - x.values[1])
diffs = diffs >= min_diff
diffs = diffs.to_frame(name="sat_min_diff").reset_index()
top_df_ext = pd.merge(
top_df, diffs, how="left", on=["folder_num", "dataset"])
top_df = top_df_ext[top_df_ext["sat_min_diff"]].reset_index(drop=True)
# count of system entries for that folder (i..e # of datasets where it wins)
top_df = top_df.groupby(["folder_num", "name"]).size().to_frame(name="ct")
top_df = top_df.reset_index()
top_df = | pd.pivot(top_df, index="folder_num", columns="name", values="ct") | pandas.pivot |
"""Script to normalize test single cell RNA sequencing dataset
and output common subset genes as in the final training set.
"""
import pandas as pd
import sklearn
from sklearn.preprocessing import StandardScaler
info = {'GSE57982': {'filename': 'GSE57982_primaryFpkmMatrix.txt',
'idcol': 'geneSymbol', # column name of gene IDs
'rmcol': ['geneID']}, # columns to remove
'GSE62526': {'filename': 'GSE62526_Normalized_expression_values.txt',
'idcol': 'Gene',
'rmcol': []},
'GSE66117': {'filename': 'GSE66117_CLL_FPKM_values.txt',
'idcol': 'Gene',
'rmcol': ['Description']}
}
with open('final.txt') as fp:
# take the list of final deferentially expressed genes
cols = fp.readline().split()
cols.remove('tissue')
cols.remove('batch')
deg = | pd.DataFrame(data=cols, columns=['gene']) | pandas.DataFrame |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces_and_case():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
{"a": "Hi", "b": "hI ", "expected": True},
{"a": "HI", "b": "HI ", "expected": True},
{"a": "hi", "b": "hi ", "expected": True},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_compare_df_setter_bad():
df = | pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}]) | pandas.DataFrame |
"""Tabular data utils."""
import os
import warnings
from copy import copy
from typing import Optional, List, Tuple, Dict, Sequence, Union, Iterable
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from log_calls import record_history
from pandas import DataFrame
ReadableToDf = Union[str, np.ndarray, DataFrame, Dict[str, np.ndarray], 'Batch']
@record_history(enabled=False)
def get_filelen(fname: str) -> int:
"""Get length of csv file.
Args:
fname: file name.
Returns:
int.
"""
cnt_lines = -1
with open(fname, 'rb') as fin:
for line in fin:
if len(line.strip()) > 0:
cnt_lines += 1
return cnt_lines
@record_history(enabled=False)
def get_batch_ids(arr, batch_size):
"""
Args:
arr: Sequense.
batch_size: batch size.
Returns:
Generator.
"""
n = 0
while n < len(arr):
yield arr[n: n + batch_size]
n += batch_size
@record_history(enabled=False)
def get_file_offsets(file: str, n_jobs: Optional[int] = None, batch_size: Optional[int] = None
) -> Tuple[List[int], List[int]]:
"""
Args:
file: file path.
n_jobs: number of jobs for multiprocesiing.
batch_size: batch size.
Returns:
offsets tuple.
"""
assert n_jobs is not None or batch_size is not None, 'One of n_jobs or batch size should be defined'
lens = []
with open(file, 'rb') as f:
# skip header
header_len = len(f.readline())
# get row lens
length = 0
for row in f:
if len(row.strip()) > 0:
lens.append(length)
length += len(row)
lens = np.array(lens, dtype=np.int64) + header_len
if batch_size:
indexes = list(get_batch_ids(lens, batch_size))
else:
indexes = np.array_split(lens, n_jobs)
offsets = [x[0] for x in indexes]
cnts = [x.shape[0] for x in indexes]
return offsets, cnts
@record_history(enabled=False)
def _check_csv_params(read_csv_params: dict):
"""
Args:
read_csv_params:
Returns:
"""
for par in ['skiprows', 'nrows', 'index_col', 'header', 'names', 'chunksize']:
if par in read_csv_params:
read_csv_params.pop(par)
warnings.warn('Parameter {0} will be ignored in parallel mode'.format(par), UserWarning)
return read_csv_params
@record_history(enabled=False)
def read_csv_batch(file: str, offset, cnt, **read_csv_params):
"""
Args:
file:
offset:
cnt:
read_csv_params:
Returns:
"""
read_csv_params = copy(read_csv_params)
if read_csv_params is None:
read_csv_params = {}
try:
usecols = read_csv_params.pop('usecols')
except KeyError:
usecols = None
header = pd.read_csv(file, nrows=0, **read_csv_params).columns
with open(file, 'rb') as f:
f.seek(offset)
data = pd.read_csv(f, header=None, names=header, chunksize=None, nrows=cnt, usecols=usecols, **read_csv_params)
return data
@record_history(enabled=False)
def read_csv(file: str, n_jobs: int = 1, **read_csv_params) -> DataFrame:
"""
Args:
file:
n_jobs:
**read_csv_params:
Returns:
"""
if n_jobs == 1:
return pd.read_csv(file, **read_csv_params)
if n_jobs == -1:
n_jobs = os.cpu_count()
_check_csv_params(read_csv_params)
offsets, cnts = get_file_offsets(file, n_jobs)
with Parallel(n_jobs) as p:
res = p(delayed(read_csv_batch)(file, offset=offset, cnt=cnt, **read_csv_params)
for (offset, cnt) in zip(offsets, cnts))
res = pd.concat(res, ignore_index=True)
return res
@record_history(enabled=False)
class Batch:
"""
Class to wraps batch of data in different formats.
Default - batch of DataFrame.
"""
@property
def data(self) -> DataFrame:
"""Get data from Batch object
Returns:
"""
return self._data
def __init__(self, data):
self._data = data
class FileBatch(Batch):
"""
Batch of csv file
"""
@property
def data(self) -> DataFrame:
"""Get data from Batch object
Returns:
"""
data_part = read_csv_batch(self.file, cnt=self.cnt, offset=self.offset, **self.read_csv_params)
return data_part
def __init__(self, file, offset, cnt, read_csv_params):
self.file = file
self.offset = offset
self.cnt = cnt
self.read_csv_params = read_csv_params
@record_history(enabled=False)
class BatchGenerator:
"""
Abstract - generator of batches from data
"""
def __init__(self, batch_size, n_jobs):
"""
Args:
n_jobs: number of processes to handle
batch_size: batch size. Default is None, split by n_jobs
"""
if n_jobs == -1:
n_jobs = os.cpu_count()
self.n_jobs = n_jobs
self.batch_size = batch_size
def __getitem__(self, idx) -> Batch:
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
@record_history(enabled=False)
class DfBatchGenerator(BatchGenerator):
"""
Batch generator from DataFrames.
"""
def __init__(self, data: DataFrame, n_jobs: int = 1, batch_size: Optional[int] = None):
"""
Args:
data: pd.DataFrame.
n_jobs: number of processes to handle.
batch_size: batch size. Default is None, split by n_jobs.
"""
super().__init__(batch_size, n_jobs)
self.data = data
if self.batch_size is not None:
self.idxs = list(get_batch_ids(np.arange(data.shape[0]), batch_size))
else:
self.idxs = [x for x in np.array_split(np.arange(data.shape[0]), n_jobs) if len(x) > 0]
def __len__(self) -> int:
if self.batch_size is not None:
return int(np.ceil(self.data.shape[0] / self.batch_size))
return int(self.n_jobs)
def __getitem__(self, idx):
return Batch(self.data.iloc[self.idxs[idx]])
@record_history(enabled=False)
class FileBatchGenerator(BatchGenerator):
def __init__(self, file, n_jobs: int = 1, batch_size: Optional[int] = None, read_csv_params: dict = None):
"""
Args:
file:
n_jobs: number of processes to handle
batch_size: batch size. Default is None, split by n_jobs
read_csv_params: params of reading csv file. Look for pd.read_csv params
"""
super().__init__(batch_size, n_jobs)
self.file = file
self.offsets, self.cnts = get_file_offsets(file, n_jobs, batch_size)
if read_csv_params is None:
read_csv_params = {}
self.read_csv_params = read_csv_params
def __len__(self) -> int:
return len(self.cnts)
def __getitem__(self, idx):
return FileBatch(self.file, self.offsets[idx], self.cnts[idx], self.read_csv_params)
@record_history(enabled=False)
def read_data(data: ReadableToDf, features_names: Optional[Sequence[str]] = None, n_jobs: int = 1,
read_csv_params: Optional[dict] = None) -> Tuple[DataFrame, Optional[dict]]:
"""Get pd.DataFrame from different data formats
Args:
data: Dataset in formats:
- pd.DataFrame
- dict of np.ndarray
- path to csv, feather, parquet
features_names: Optional features names if np.ndarray
n_jobs: number of processes to read file
read_csv_params: params to read csv file
Returns:
"""
if read_csv_params is None:
read_csv_params = {}
# case - new process
if isinstance(data, Batch):
return data.data, None
if isinstance(data, DataFrame):
return data, None
# case - single array passed to inference
if isinstance(data, np.ndarray):
return DataFrame(data, columns=features_names), None
# case - dict of array args passed
if isinstance(data, dict):
df = DataFrame(data['data'], columns=features_names)
upd_roles = {}
for k in data:
if k != 'data':
name = '__{0}__'.format(k.upper())
assert name not in df.columns, 'Not supported feature name {0}'.format(name)
df[name] = data[k]
upd_roles[k] = name
return df, upd_roles
if isinstance(data, str):
if data.endswith('.feather'):
# TODO: check about feather columns arg
data = pd.read_feather(data)
if read_csv_params['usecols'] is not None:
data = data[read_csv_params['usecols']]
return data, None
if data.endswith('.parquet'):
return | pd.read_parquet(data, columns=read_csv_params['usecols']) | pandas.read_parquet |
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import nose
import numpy as np
from pandas import DataFrame, Series
from pandas.compat import range, lrange, iteritems
#from pandas.core.datetools import format as date_format
import pandas.io.sql as sql
import pandas.util.testing as tm
try:
import sqlalchemy
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
`SepalLength` REAL,
`SepalWidth` REAL,
`PetalLength` REAL,
`PetalWidth` REAL,
`Name` TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` TEXT,
`IntDateCol` INTEGER,
`FloatCol` REAL,
`IntCol` INTEGER,
`BoolCol` INTEGER,
`IntColWithNull` INTEGER,
`BoolColWithNull` INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'mysql': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'postgresql': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
"""
}
}
class PandasSQLTest(unittest.TestCase):
"""Base class with common private methods for
SQLAlchemy and fallback cases.
"""
def drop_table(self, table_name):
self._get_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _get_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv')
self.drop_table('iris')
self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with open(iris_csv_file, 'rU') as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table('types_test_data')
self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [(
'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False),
('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)]
for d in data:
self._get_exec().execute(ins, d)
def _count_rows(self, table_name):
result = self._get_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_sql("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.drop_table('test_frame1')
def _to_sql_fail(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
def _to_sql_replace(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _to_sql_append(self):
# Nuke table just in case
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
result = self.pandasSQL.read_sql('SELECT * FROM test_frame_roundtrip')
result.set_index('pandas_index', inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _tquery(self):
iris_results = self.pandasSQL.tquery("SELECT * FROM iris")
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
class TestSQLApi(PandasSQLTest):
"""Test the public API as it would be used
directly, including legacy names
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = 'sqlite'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
return sqlite3.connect(':memory:')
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_test1_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
"""Test legacy name read_frame"""
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
| sql.has_table('test_frame1', self.conn, flavor='sqlite') | pandas.io.sql.has_table |
import numpy as np
import pandas as pd
import TrajectoryDescriptor
import TrajectoryFeatures
class Trajectory:
def __init__(self, **kwargs):
self.labels = kwargs.get('labels', ['target'])
if kwargs['mood'] == 'df':
self.raw_data = kwargs.get('trajectory', pd.DataFrame())
if kwargs['mood'] == 'csv':
self.raw_data = self.load_data(kwargs)
self.rows_ = self.raw_data.shape[0]
self.stop_parameters = kwargs.get('stop_parameters', [100, 60, 60, 100])
self.has_alt = True
self.duration_features = []
self.speed_features = []
self.acc_features = []
self.jerk_features = []
self.brate_rate_features = []
self.brate_features = []
self.distance_features = []
self.bearing_features = []
self.label = ''
self.noise_no=0
self.polartheta = []
self.polarR = []
self.isPure = False
self.isInValid = False
self.filter = True
# self.labels = [] # "label1:11"
self.hasAlt = False # False: we do not have altitude in dataset
# self.descriptor=TrajectoryDescriptor.TrajectoryDescriptor(trajectory=self.raw_data, labels=self.labels)
#print("smothing..")
#self.raw_data,noise=self.g_hample(self.raw_data)
#self.noise_no=len(noise)
#if self.noise_no>0 :
# print("# noise points:",len(noise))
def rows(self):
return self.rows_
def prediction_actual(self, target):
self.raw_data.loc[:, target + '_prediction'] = self.stat_label()
return self.raw_data.loc[:, [target, target + '_prediction']]
def get_full_features_column_name(self):
"""
other = ['isInValid', 'isPure', 'target']
a2 = np.array(['min_', 'max_', 'mean', 'median', 'std', 'p10', 'p25', 'p50', 'p75', 'p90'])
a1 = np.array(['speed_'] * len(a2))
speed_features = map(''.join, zip(a1, a2))
a2 = np.array(['min_', 'max_', 'mean', 'median', 'std', 'p10', 'p25', 'p50', 'p75', 'p90'])
a1 = np.array(['distance_'] * len(a2))
distance_features = map(''.join, zip(a1, a2))
a2 = np.array(['min_', 'max_', 'mean', 'median', 'std', 'p10', 'p25', 'p50', 'p75', 'p90'])
a1 = np.array(['acc_'] * len(a2))
acc_features = map(''.join, zip(a1, a2))
a2 = np.array(['min_', 'max_', 'mean', 'median', 'std', 'p10', 'p25', 'p50', 'p75', 'p90'])
a1 = np.array(['bearing_'] * len(a2))
bearing_features = map(''.join, zip(a1, a2))
a2 = np.array(['min_', 'max_', 'mean', 'median', 'std', 'p10', 'p25', 'p50', 'p75', 'p90'])
a1 = np.array(['jerk_'] * len(a2))
jerk_features = map(''.join, zip(a1, a2))
a2 = np.array(['min_', 'max_', 'mean', 'median', 'std', 'p10', 'p25', 'p50', 'p75', 'p90'])
a1 = np.array(['brate_'] * len(a2))
brate_features = map(''.join, zip(a1, a2))
a2 = np.array(['min_', 'max_', 'mean', 'median', 'std', 'p10', 'p25', 'p50', 'p75', 'p90'])
a1 = np.array(['brate_rate_'] * len(a2))
brate_rate__features = map(''.join, zip(a1, a2))
ret = map(''.join, zip(distance_features, speed_features, acc_features, bearing_features,
jerk_features, brate_features, brate_rate__features, other))
"""
return self.descriptor.get_full_features_column_name()
def segment_features(self):
# other = [self.isInValid, self.isPure, self.stat_label()]
# return self.distance_features + self.speed_features + self.acc_features + self.bearing_features + self.jerk_features + self.brate_features + self.brate_rate_features + other
self.descriptor = TrajectoryDescriptor.TrajectoryDescriptor(trajectory=self.raw_data, labels=self.labels,
stop_parameters=self.stop_parameters)
ret = self.descriptor.describe()
return ret
"""
labels: List of labels for each point
lat: name of lat column in the dataframe
lon: name of lon column in the dataframe
alt: name of alt column in the dataframe
timeDate: name of time date column in the dataframe
src: source of the csv file for raw_data
"""
def return_row_data(self):
return self.raw_data
def load_data(self, **kwargs):
# lat='lat',lon='lon',alt='alt',timeDate='timeDate',labels=['label1'],src='~/gps_fe/bigdata2_8696/ex_traj/5428_walk_790.csv',seperator=','
print('loading...')
lat = kwargs.get('lat', "lat")
print(lat)
lon = kwargs.get('lon', "lon")
print(lon)
alt = kwargs.get('alt', None)
print(alt)
time_date = kwargs.get('timeDate', "timeDate")
print(time_date)
labels = kwargs.get('labels', "[label]")
print(labels)
src = kwargs.get('src', "~/gps_fe/bigdata2_8696/ex_traj/5428_walk_790.csv")
print(src)
separator = kwargs.get('separator', ",")
print(separator)
self.labels = labels
# input data needs lat,lon,alt,timeDate, [Labels]
self.raw_data = pd.read_csv(src, sep=separator, parse_dates=[time_date], index_col=time_date)
self.raw_data.rename(columns={lat: 'lat'}, inplace=True)
self.raw_data.rename(columns={lon: 'lon'}, inplace=True)
if alt is not None:
self.raw_data.rename(columns={alt: 'alt'}, inplace=True)
self.raw_data.rename(columns={time_date: 'timeDate'}, inplace=True)
# preprocessing
# removing NaN in lat and lon
self.raw_data = self.raw_data.loc[pd.notnull(self.raw_data.lat), :]
self.raw_data = self.raw_data.loc[pd.notnull(self.raw_data.lon), :]
for label in labels:
self.raw_data = self.raw_data.loc[pd.notnull(self.raw_data[label]), :]
print('Data loaded.')
return self.raw_data
def load_data_frame(self, data_frame, labels=None):
if labels is None:
labels = ['target']
self.labels = labels
self.raw_data = data_frame
# preprocessing
self.pre_processing(labels)
if (self.raw_data.shape[0] < 10):
return -1
return 0
def pre_processing(self, labels):
# removing NaN in lat and lon
self.raw_data = self.raw_data.loc[ | pd.notnull(self.raw_data.lat) | pandas.notnull |
# install imblearn package to a specific anaconda enviroment boston_house_price
# $ conda install -n boston_house_price -c conda-forge imbalanced-learn
# update imblearn package to a specific anaconda enviroment boston_house_price
# $ conda update -n boston_house_price -c glemaitre imbalanced-learn
# =============================================================
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from time import time
# Set a random seed
import random
seed = 42
random.seed(seed)
# Import supplementary visualization code visuals.py
import scripts.visuals as vs
# Load the Census dataset
path = '../data/'
train_data = path + 'census.csv'
test_data = path + 'test_census.csv'
data = pd.read_csv(train_data)
print(data.head(n=1))
print(data.shape)
# get the types of columns
print(data.dtypes)
# Pandas has a helpful select_dtypes function
# which we can use to build a new dataframe containing only the object columns.
obj_data = data.select_dtypes(include=['object']).copy()
# Before going any further, we have to check if there are null values in the data that we need to clean up.
print(obj_data[obj_data.isnull().any(axis=1)])
# TODO: Total number of records
n_records = data.shape[0]
# TODO: Number of records where individual's income is more than $50,000
# TODO: Number of records where individual's income is at most $50,000
# Method1:
n_at_most_50k, n_greater_50k = data.income.value_counts()
# Method2: (optional) -->
# n2_greater_50k = data[data['income']=='>50K'].shape[0]
# n2_at_most_50k = data[data['income']=='<=50K'].shape[0]
n_aux = data.loc[(data['capital-gain'] > 0) & (data['capital-loss'] > 0)].shape
# TODO: Percentage of individuals whose income is more than $50,000
greater_percent = (100*n_greater_50k)/n_records
# Print the results
print("Total number of records: {}".format(n_records))
print("Individuals making more than $50,000: {}".format(n_greater_50k))
print("Individuals making at most $50,000: {}".format(n_at_most_50k))
print("Percentage of individuals making more than $50,000: {}%".format(greater_percent))
# Split the data into features and target label
income_raw = data['income']
features_raw = data.drop('income', axis = 1)
# Visualize skewed continuous features of original data
vs.distribution(data)
# Log-transform the skewed features
skewed = ['capital-gain', 'capital-loss']
features_log_transformed = pd.DataFrame(data = features_raw)
features_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1))
# Visualize the new log distributions
vs.distribution(features_log_transformed, transformed = True)
# Import sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
# Initialize a scaler, then apply it to the features
scaler = MinMaxScaler() # default=(0, 1)
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
features_log_minmax_transform = pd.DataFrame(data = features_log_transformed)
features_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical])
# Show an example of a record with scaling applied
print(features_log_minmax_transform.head(n = 5))
# TODO: One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()
features_final = pd.get_dummies(features_log_minmax_transform)
# TODO: Encode the 'income_raw' data to numerical values
# Method1:
encoder = LabelEncoder()
income = pd.Series(encoder.fit_transform(income_raw))
# Method2:(optional) -->
income1 =income_raw.map({'<=50K':0, '>50K':1})
# Method3:(optional) -->
income2 =pd.get_dummies(income_raw)['>50K']
# Print the number of features after one-hot encoding
encoded = list(features_final.columns)
print("{} total features after one-hot encoding.".format(len(encoded)))
# Uncomment the following line to see the encoded feature names
print(encoded)
#-----------------
# @Raafat: Some techniques to deal imbalanced data:
# --> under sampling
from imblearn.under_sampling import CondensedNearestNeighbour
cnn = CondensedNearestNeighbour(random_state=42)
X_res, y_res = cnn.fit_sample(features_final[0:300], income[0:300])
print('not Resampled dataset shape {}'.format(income[0:300].value_counts()))
print('cnn Resampled dataset shape {}'.format(pd.Series(y_res).value_counts()))
from imblearn.under_sampling import RandomUnderSampler
rus = RandomUnderSampler(random_state=42)
X_res, y_res = rus.fit_sample(features_final[0:300], income[0:300])
print('rus Resampled dataset shape {}'.format(pd.Series(y_res).value_counts()))
from imblearn.under_sampling import TomekLinks
tl = TomekLinks(random_state=42)
X_res, y_res = tl.fit_sample(features_final[0:300], income[0:300])
print('tl Resampled dataset shape {}'.format(pd.Series(y_res).value_counts()))
# --> over sampling
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=42)
X_res, y_res = sm.fit_sample(features_final[0:300], income[0:300])
print('sm Resampled dataset shape {}'.format( | pd.Series(y_res) | pandas.Series |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, option_context
from pandas.core import common as com
from pandas.util import testing as tm
class TestCaching:
def test_slice_consolidate_invalidate_item_cache(self):
# this is chained assignment, but will 'work'
with option_context('chained_assignment', None):
# #3970
df = DataFrame({"aa": np.arange(5), "bb": [2.2] * 5})
# Creates a second float block
df["cc"] = 0.0
# caches a reference to the 'bb' series
df["bb"]
# repr machinery triggers consolidation
repr(df)
# Assignment to wrong series
df['bb'].iloc[0] = 0.17
df._clear_item_cache()
tm.assert_almost_equal(df['bb'][0], 0.17)
def test_setitem_cache_updating(self):
# GH 5424
cont = ['one', 'two', 'three', 'four', 'five', 'six', 'seven']
for do_ref in [False, False]:
df = DataFrame({'a': cont,
"b": cont[3:] + cont[:3],
'c': np.arange(7)})
# ref the cache
if do_ref:
df.loc[0, "c"]
# set it
df.loc[7, 'c'] = 1
assert df.loc[0, 'c'] == 0.0
assert df.loc[7, 'c'] == 1.0
# GH 7084
# not updating cache on series setting with slices
expected = DataFrame({'A': [600, 600, 600]},
index=date_range('5/7/2014', '5/9/2014'))
out = DataFrame({'A': [0, 0, 0]},
index=date_range('5/7/2014', '5/9/2014'))
df = DataFrame({'C': ['A', 'A', 'A'], 'D': [100, 200, 300]})
# loop through df to update out
six = Timestamp('5/7/2014')
eix = Timestamp('5/9/2014')
for ix, row in df.iterrows():
out.loc[six:eix, row['C']] = out.loc[six:eix, row['C']] + row['D']
tm.assert_frame_equal(out, expected)
| tm.assert_series_equal(out['A'], expected['A']) | pandas.util.testing.assert_series_equal |
import json
from collections import defaultdict
import numpy as np
import pandas as pd
from tqdm import tqdm
from models.ease import EASE
from models.knn_popular import KNNpopularity
from models.mf import MatrixFactorization
from utils.helpers import get_movies_by_ids, minmaxscaling
from utils.metrics import relevance, unexpectedness
class Chaining:
def __init__(self, train_data, ease_ratings, item_ratings, ease_control_items, k, n_components, l, n=10):
self.primitive_models = []
self.train_data = train_data
self.ease_ratings = ease_ratings
self.item_ratings = item_ratings
self.ease_control_items = ease_control_items
self.knn = KNNpopularity('knn', train_data, item_ratings)
self.mf = MatrixFactorization('mf', train_data, item_ratings)
self.ease = EASE(ease_ratings, l)
self.knn.fit({'k': k})
self.mf.fit({'n_components': n_components,
'random_state': 42})
def make_recommendations(self, test_data):
"""
Make recommendations for users from test_data.
:param test_data: pd.DataFrame with user profiles.
:return: dict {user_id: recommendations}.
"""
# todo normalize scores -> select n depending on mean score
# todo same recs from different models
# todo distinct recs from different models
n = [10, 10, 10]
recommendations = defaultdict(list)
self.ease.prepare_recommendations(self.ease_control_items)
for user_id, user_profile in test_data.iterrows(): # iterate over test users, user_profile is a tuple
# EASE
ease_scores = | pd.Series(self.ease.pred[user_id], index=test_data.columns) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed May 24 16:15:24 2017
Sponsors Club messaging functions
@author: tkc
"""
import pandas as pd
import smtplib
import numpy as np
import datetime
import tkinter as tk
import glob
import re
import math
import textwrap
from tkinter import filedialog
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pkg.SC_signup_functions import findcards
from openpyxl import load_workbook
import pkg.SC_config as cnf
def emailparent_tk(teams, season, year):
''' Inteface for non-billing email messages to parents (non-generic)
Message types include:
recruit - specific inquiry about player from last year not yet signed up; needs signupfile w/ recruits tab
assign - notify of team assignment, optional recruit for short team, CYC card notify; teams/cards/mastersignups
missinguni - ask about missing uniforms; missingunifile
unireturn - generic instructions for uniform return; mastersignups w/ unis issued
askforcards - check for CYC card on file and ask
other -- Generic single all team+coaches message (can have $SCHOOL, $GRADERANGE,$COACHINFO, $SPORT, $PLAYERLIST)
8/9/17 works for team assignments
TODO test recruit, missing unis, unireturn
args:
teams - df w/ active teams
season -'Winter', 'Fall' or 'Spring'
year - starting sport year i.e. 2019 for 2019-20 school year
'''
#%%
# first print out existing info in various lines
root = tk.Tk()
root.title('Send e-mail to parents')
messageframe=tk.LabelFrame(root, text='Message options')
unifilename=tk.StringVar()
try:
unifiles=glob.glob('missingunilist*') # find most recent uniform file name
if len(unifiles)>1:
unifile=findrecentfile(unifiles) # return single most recent file
else:
unifile=unifiles[0]
# find most recent missing uni file name
unifilename.set(unifile)
except: # handle path error
unifilename.set('missingunilist.csv')
recruitbool=tk.BooleanVar() # optional recruiting for short teams
emailtitle=tk.StringVar() # e-mail title
mtype=tk.StringVar() # coach message type
messfile=tk.StringVar() # text of e-mail message
transmessfile=tk.StringVar() # text of e-mail message for transfers
extravar=tk.StringVar() # use depends on message type... normally filename
extraname=tk.StringVar() # name for additional text entry box (various uses mostly filenames)
extraname.set('Extra_file_name.txt') # default starting choice
choice=tk.StringVar() # test or send -mail
def chooseFile(txtmess, ftypes):
''' tkinter file chooser (passes message string for window and expected
file types as tuple e.g. ('TXT','*.txt')
'''
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = txtmess, filetypes=[ ftypes] )
root.destroy() # closes pop up window
return full_path
def choose_message():
# choose existing message (.txt file)
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = 'Choose message file', filetypes=[ ('TXT','*.txt')] )
root.destroy() # closes pop up window
return full_path
# Functions to enable/disable relevant checkboxes depending on radiobutton choice
def Assignopts():
''' Display relevant choices for team assignment notification/cyc card/ short team recruiting '''
recruitcheck.config(state=tk.NORMAL)
extraentry.config(state=tk.DISABLED)
extraname.set('n/a')
messfile.set('parent_team_assignment.txt')
transmessfile.set('parent_team_transfer.txt')
emailtitle.set('Fall $SPORT for $FIRST')
def Recruitopts():
''' Display relevant choices for specific player recruiting'''
recruitcheck.config(state=tk.NORMAL)
extraentry.config(state=tk.DISABLED)
messfile.set('player_recruiting.txt')
transmessfile.set('n/a')
extraname.set('n/a')
emailtitle.set('Cabrini-Soulard sports for $FIRST this fall?')
def Missingopts():
''' Display relevant choices for ask parent for missing uniforms '''
recruitcheck.config(state=tk.DISABLED)
extraentry.config(state=tk.NORMAL)
messfile.set('finish_me.txt')
transmessfile.set('n/a')
extraname.set('Missing uni file name')
extravar.set('missing_uni.csv')
# TODO look up most recent uni file?
emailtitle.set("Please return $FIRST's $SPORT uniform!")
def Schedopts():
''' Display relevant choices for sending schedules (game and practice) to parents '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.NORMAL)
messfile.set('parent_game_schedule.txt')
transmessfile.set('n/a')
extraname.set('Game schedule file')
extravar.set('Cabrini_2017_schedule.csv')
emailtitle.set("Game schedule for Cabrini $GRADERANGE $GENDER $SPORT")
def Cardopts():
''' Display relevant choices for asking parent for missing CYC cards '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.DISABLED)
messfile.set('CYCcard_needed.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("CYC card needed for $FIRST")
def Otheropts():
''' Display relevant choices for other generic message to parents '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.NORMAL)
messfile.set('temp_message.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("Message from Cabrini Sponsors Club")
def Allopts():
''' Display relevant choices for generic message to all sports parents '''
recruitcheck.config(state=tk.DISABLED)
extraentry.config(state=tk.NORMAL)
messfile.set('temp_message.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("Message from Cabrini Sponsors Club")
# E-mail title and message file name
rownum=0
tk.Label(messageframe, text='Title for e-mail').grid(row=rownum, column=0)
titleentry=tk.Entry(messageframe, textvariable=emailtitle)
titleentry.config(width=50)
titleentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(messageframe, text='messagefile').grid(row=rownum, column=0)
messentry=tk.Entry(messageframe, textvariable=messfile)
messentry.config(width=50)
messentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(messageframe, text='Transfer messagefile').grid(row=rownum, column=0)
transmessentry=tk.Entry(messageframe, textvariable=transmessfile)
transmessentry.config(width=50)
transmessentry.grid(row=rownum, column=1)
rownum+=1
# Choose counts, deriv, both or peaks plot
tk.Radiobutton(messageframe, text='Team assignment', value='Assign', variable = mtype, command=Assignopts).grid(row=rownum, column=0)
tk.Radiobutton(messageframe, text='Recruit missing', value='Recruit', variable = mtype, command=Recruitopts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='Missing uni', value='Missing', variable = mtype, command=Missingopts).grid(row=rownum, column=2)
tk.Radiobutton(messageframe, text='Send schedule', value='Schedule', variable = mtype, command=Schedopts).grid(row=rownum, column=3)
rownum+=1
tk.Radiobutton(messageframe, text='Ask for cards', value='Cards', variable = mtype, command=Cardopts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='Other team message', value='Other', variable = mtype, command=Otheropts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='All sport parents', value='All', variable = mtype, command=Allopts).grid(row=rownum, column=2)
rownum+=1
tk.Label(messageframe, text=extraname.get()).grid(row=rownum, column=0)
extraentry=tk.Entry(messageframe, textvariable=extravar)
extraentry.grid(row=rownum, column=1)
# Extra file chooser button
# button arg includes file type extension .. get from messfile
try:
ft = extraname.get().split('.')[-1]
ftypes =("%s" %ft.upper(), "*.%s" %ft)
except:
ftypes =("CSV" , "*.*") # default to all files
# TODO fix extra file chooser
d=tk.Button(messageframe, text='Choose file', command=chooseFile('Choose extra file', ftypes) )
d.grid(row=rownum, column=2)
recruitcheck=tk.Checkbutton(messageframe, variable=recruitbool, text='Recruit more players for short teams?')
recruitcheck.grid(row=rownum, column=3) # can't do immediate grid or nonetype is returned
rownum+=1
messageframe.grid(row=0, column=0)
# Specific team selector section using checkboxes
teamframe=tk.LabelFrame(root, text='Team selector')
teamdict=shortnamedict(teams)
teamlist=[] # list of tk bools for each team
# Make set of bool/int variables for each team
for i, val in enumerate(teamdict):
teamlist.append(tk.IntVar())
if '#' not in val:
teamlist[i].set(1) # Cabrini teams checked by default
else:
teamlist[i].set(0) # transfer team
# make checkbuttons for each team
for i, val in enumerate(teamdict):
thisrow=i%5+1+rownum # three column setup
thiscol=i//5
thisname=teamdict.get(val,'')
tk.Checkbutton(teamframe, text=thisname, variable=teamlist[i]).grid(row=thisrow, column=thiscol)
rownum+=math.ceil(len(teamlist)/5)+2
# Decision buttons bottom row
def chooseall(event):
''' Select all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(1)
def clearall(event):
''' deselect all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(0)
def abort(event):
choice.set('abort')
root.destroy()
def test(event):
choice.set('test')
root.destroy()
def KCtest(event):
choice.set('KCtest')
root.destroy()
def send(event):
choice.set('send')
root.destroy()
rownum+=1
d=tk.Button(teamframe, text='All teams')
d.bind('<Button-1>', chooseall)
d.grid(row=rownum, column=0)
d=tk.Button(teamframe, text='Clear teams')
d.bind('<Button-1>', clearall)
d.grid(row=rownum, column=1)
teamframe.grid(row=1, column=0)
choiceframe=tk.LabelFrame(root)
d=tk.Button(choiceframe, text='Abort')
d.bind('<Button-1>', abort)
d.grid(row=rownum, column=2)
d=tk.Button(choiceframe, text='Test')
d.bind('<Button-1>', test)
d.grid(row=rownum, column=3)
d=tk.Button(choiceframe, text='KCtest')
d.bind('<Button-1>', KCtest)
d.grid(row=rownum, column=4)
d=tk.Button(choiceframe, text='Send')
d.bind('<Button-1>', send)
d.grid(row=rownum, column=5)
choiceframe.grid(row=2, column=0)
root.mainloop()
#%%
mychoice=choice.get()
if mychoice!='abort':
kwargs={}
if mychoice=='KCtest':
# this is a true send test but only to me
kwargs.update({'KCtest':True})
mychoice='send'
kwargs.update({'choice':mychoice}) # test or send
emailtitle=emailtitle.get()
messagefile='messages\\'+messfile.get()
# Handle selection of team subsets
selteams=[]
for i, val in enumerate(teamdict):
if teamlist[i].get()==1:
selteams.append(val)
# Filter teams based on checkbox input
teams=teams[teams['Team'].isin(selteams)]
# drop duplicates in case of co-ed team (m and f entries)
teams=teams.drop_duplicates(['Team','Sport'])
# Now deal with the different types of messages
#%%
if mtype.get()=='Schedule':
# Send practice and game schedules
try:
sched=pd.read_csv(extravar.get())
except:
print('Problem opening schedule and other required files for sending game schedules')
fname=filedialog.askopenfilename(title='Select schedule file.')
sched=pd.read_csv(fname)
# fields=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Fields')
fields=pd.read_csv(cnf._INPUT_DIR+'\\fields.csv')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel('Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_csv(cnf._INPUT_DIR+'\\coaches.csv')
# INTERNAL TESTING
# Mastersignups=Mastersignups[Mastersignups['Last']=='Croat']
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
# open and send master CYC schedule
sendschedule(teams, sched, fields, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Recruit':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
except:
print('Problem loading family contacts')
try: # Recruits stored in CSV
Recruits=pd.read_csv(cnf._OUTPUT_DIR+'\\%s%s_recruits.csv' %(season, year))
print('Loaded possible recruits from csv file')
except:
fname=filedialog.askopenfilename(title='Select recruits file.')
if fname.endswith('.csv'): # final move is query for file
Recruits=pd.read_csv(fname)
else:
print('Recruits file needed in csv format.')
return
emailrecruits(Recruits, famcontact, emailtitle, messagefile, **kwargs)
if mtype.get()=='Assign':
# Notify parents needs teams, mastersignups, famcontacts
if recruitbool.get():
kwargs.update({'recruit':True})
try:
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_csv(cnf._INPUT_DIR+'\\coaches.csv', encoding='cp437')
# INTERNAL TESTING
# Mastersignups=Mastersignups[Mastersignups['Last']=='Croat']
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
tranmessagefile='messages\\'+transmessfile.get()
with open(tranmessagefile, 'r') as file:
blanktransmess=file.read()
except:
print('Problem loading mastersignups, famcontacts')
return
notifyfamilies(teams, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, blanktransmess, **kwargs)
if mtype.get()=='Unis':
try:
missing=pd.read_csv(messfile.get(), encoding='cp437')
oldteams=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Oldteams') # loads all old teams in list
kwargs.update({'oldteams':oldteams,'missing':missing})
except:
print('Problem loading missingunis, oldteams')
return
# TODO Finish ask for missing uniforms script
askforunis(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Cards':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
except:
print('Problem loading famcontacts, mastersignups, or blank message')
return
# TODO Finish ask for missing uniforms script
askforcards(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Other':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches')
with open(messagefile, 'r') as file:
blankmess=file.read()
except:
print('Problem loading mastersignups, coaches, ')
return
# TODO Finish ask for missing uniforms script
sendteammessage(teams, year, Mastersignups, famcontact, coaches, emailtitle, blankmess, **kwargs)
if mtype.get()=='All':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_excel(cnf._INPUT_DIR+'\\coaches.csv')
with open(messagefile, 'r') as file:
blankmess=file.read()
except:
print('Problem loading mastersignups, coaches, ')
return
# TODO Finish ask for missing uniforms script
sendallmessage(season, year, Mastersignups, famcontact, coaches, emailtitle, blankmess, **kwargs)
return
''' TESTING of notifyfamilies
[sport, team, graderange, coachinfo, playerlist] =cabteamlist[6] i=6
index=thisteam.index[0]
row=thisteam.loc[index]
'''
def readMessage():
''' Choose text file from messages as template for email or log message (w/ find/replace
of team and individual info)
args: none
returns: string with contents of chosen TXT file
'''
def pickMessage():
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
full_path = tk.filedialog.askopenfilename(initialdir = cnf._INPUT_DIR+'\\messages\\', title = 'Choose blank email template',
filetypes=[ ('txt','*.txt')] )
root.destroy() # closes pop up window
return full_path
full_path = pickMessage()
with open(full_path,'r') as file:
blankmess = file.read()
return blankmess
def askforunis():
# TODO finish me
pass
def askforcards(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs):
''' Notifying players that need cards and ask for them via custom e-mail (one per player)
kwargs:
choice - 'send' or 'test'
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('parent_email_log.txt','w', encoding='utf-8')
# this years signups only (later match for sport and team)
Mastersignups=Mastersignups[Mastersignups['Year']==year]
# drop non-CYC K and 1 level teams
teams=teams[teams['Grade']>=2]
# Make list of sport/team/school/graderange
teamlist=[]
for index, row in teams.iterrows():
# get school
if '#' not in teams.loc[index]['Team']:
school='Cabrini'
else:
school=teams.loc[index]['Team'].split('#')[0]
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([teams.loc[index]['Sport'], teams.loc[index]['Team'], school,
teams.loc[index]['Graderange']])
# dict. with each team and its players
cards=findcards() # find all player cards
if not cards: # terminate if no cards are found (path error?)
print("Error opening CYC card image database")
return
# Drop all player nums found in cards
cardslist=list(cards.keys())
cardslist=[i for i in cardslist if '-' not in i]
cardslist=[int(i) for i in cardslist]
# Only keep signups without cards
Mastersignups=Mastersignups[~Mastersignups['Plakey'].isin(cardslist)]
CYCSUs=pd.DataFrame()
for i, [sport, team, school, graderange] in enumerate(teamlist):
CYCSUs=CYCSUs.append(Mastersignups[(Mastersignups['Sport']==sport) & (Mastersignups['Team']==team)])
# only one notice needed per player
CYCSUs=CYCSUs.drop_duplicates('Plakey')
CYCSUs=pd.merge(CYCSUs, famcontact, on='Famkey' , how='left', suffixes =('','_2'))
for index, row in CYCSUs.iterrows():
# Replace first name in e-mail title (default e-mail title is fall $SPORT for $FIRST)
thistitle=emailtitle.replace('$FIRST', row.First)
thistitle=thistitle.replace('$LAST', row.Last)
# custom message for individual player on this team
thismess=blankmess.replace('$FIRST', row.First)
thismess=thismess.replace('$LAST', row.Last)
recipients=getemailadds(row)
# Create custom email message (can have multiple sports in df)
if choice=='send':
# add From/To/Subject to actual e-mail
thisemail='From: Cabrini Sponsors Club <<EMAIL>>\nTo: '
thisemail+=', '.join(recipients)+'\nSubject: '+thistitle+'\n'
thisemail+=thismess
thisemail=thisemail.encode('utf-8')
for i,addr in enumerate(recipients): # Send message to each valid recipient in list
try:
smtpObj.sendmail('<EMAIL>', addr, thisemail)
print ('Message sent to ', addr)
except:
print('Message to ', addr, ' failed.')
if not recipients:
print('No email address for ', row.First, row.Last)
else: # Testing mode ... just write to log w/o e-mail header and such
logfile.write(thistitle+'\n')
logfile.write(thismess+'\n')
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# TODO fix this attempted close
# smtpObj.quit() # close SMTP connection
return
def sendallmessage(season, year, Mastersignups, famcontact, coaches, emailtitle, blankmess, **kwargs):
''' Top level messaging function for notifying families of team assignment/ CYC card
+ optional short-team-player-recruiting
via custom e-mail; one per player
currently not including SMS
kwargs:
choice - 'send' or 'test'
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('allparent_email_log.txt','w', encoding='utf-8')
# Get all email addresses from recent parents (default last 3 seasons)
recipients=makeemaillist(Mastersignups, famcontact, season, year, SMS=False)
# add all coach emails
coachemails=np.ndarray.tolist(coaches.Email.unique())
coachemails=[i for i in coachemails if '@' in i]
recipients.extend(coachemails)
recipients=set(recipients)
recipients=list(recipients) # unique only
# Create custom email message (can have multiple sports in df)
if choice=='send':
if 'KCtest' in kwargs: # internal only send test
recipients=['<EMAIL>','<EMAIL>']
msg=MIMEText(blankmess,'plain')
# msg = MIMEMultipart('alternative') # message container
msg['Subject'] = emailtitle
msg['From'] = '<NAME> <<EMAIL>>'
msg['To'] = '<NAME> <<EMAIL>>'
msg['Bcc']=','.join(recipients) # single e-mail or list
# Simultaneous send to all in recipient list
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
else: # Testing mode
tempstr='Test message to: '+', '.join(recipients)
logfile.write(tempstr+'\n')
logfile.write(blankmess)
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# smtpObj.quit() # close SMTP connection
return
def sendteammessage(teams, year, Mastersignups, famcontact, coaches, emailtitle, blankmess, **kwargs):
''' Top level messaging function for notifying families of team assignment/ CYC card
+ optional short-team-player-recruiting
via custom e-mail; one per player
currently not including SMS
kwargs:
choice - 'send' or 'test'
recruit - T or F -- add recruiting statement for short teams
mformat - not really yet used ... just sending as text not html
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('team_email_log.txt','w', encoding='utf-8')
# this years signups only (later match for sport and team)
Mastersignups=Mastersignups[Mastersignups['Year']==year]
# drop extra co-ed K or other entries
teams=teams.drop_duplicates(['Team'])
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
teamlist=[]
for index, row in myteams.iterrows():
# get school
if '#' not in myteams.loc[index]['Team']:
school='Cabrini'
try:
coachinfo=myteams.loc[index]['Fname']+' '+ myteams.loc[index]['Lname']+' ('+myteams.loc[index]['Email']+')'
except:
coachinfo=''
else:
school=myteams.loc[index]['Team'].split('#')[0]
coachinfo=''
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([row.Sport, row.Team, school,
gradetostring(row.Graderange), coachinfo, row.Playerlist])
# Separate notification for each signup is OK
for i, [sport, team, school, graderange, coach, playerlist] in enumerate(teamlist):
thisteam=Mastersignups[(Mastersignups['Sport']==sport) & (Mastersignups['Team']==team)]
thisteam=pd.merge(thisteam, famcontact, on='Famkey' , how='left', suffixes =('','_2'))
# Cabrini team base message
thisteammess=blankmess
thistitle=emailtitle
# Make team-specific replacements in message text and e-mail title
for j, col in enumerate(['$SPORT', '$TEAMNAME', '$SCHOOL', '$GRADERANGE', '$COACH', '$PLAYERLIST']):
thisteammess=thisteammess.replace(col, textwrap.fill(teamlist[i][j], width=100))
thistitle=thistitle.replace(col, teamlist[i][j])
# get coach emails
recipients=getcoachemails(team, teams, coaches, **{'asst':True})
# Now get all unique team email addresses (single message to coach and team)
recipients=getallteamemails(thisteam, recipients)
# Create custom email message (can have multiple sports in df)
if choice=='send':
msg=MIMEText(thisteammess,'plain')
# msg = MIMEMultipart('alternative') # message container
msg['Subject'] = emailtitle
msg['From'] = '<NAME> <<EMAIL>>'
# part2=MIMEText(thismess_html,'alternate')
msg['To']=','.join(recipients) # single e-mail or list
# Simultaneous send to all in recipient list
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
if not recipients:
print('No email addresses for team', team)
else: # Testing mode ... just write to log w/o e-mail header and such
logfile.write(thistitle+'\n')
logfile.write(thisteammess+'\n')
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# TODO fix this attempted close
# smtpObj.quit() # close SMTP connection
return
def makeemaillist(Mastersignups, famcontact, thisseason, thisyear, SMS=False):
'''Return active and inactive families (mainly for e-mail contact list
active if has player in 3 prior sport-seasons (includes current )
'''
# TODO generalize to n prior sports seasons
thisyearSU=Mastersignups[Mastersignups['Year']==thisyear] # take all form
lastyearSU=Mastersignups[Mastersignups['Year']==(thisyear-1)]
lastyearSU=lastyearSU[lastyearSU['Grade']!=8]
seasonlist=['Fall', 'Winter', 'Spring']
pos=seasonlist.index(thisseason)
activeseasons=seasonlist[pos:]
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
activesports=[]
for i, season in enumerate(activeseasons):
sportlist=sportsdict.get(season)
activesports.extend(sportlist)
lastyearSU=lastyearSU[lastyearSU['Sport'].isin(activesports)] # last year's signups incl.
allSU=pd.concat([thisyearSU,lastyearSU],ignore_index=True)
activefams=allSU.Famkey.unique()
emaillist=[]
match=famcontact[famcontact['Famkey'].isin(activefams)]
emails=match.Email1.unique()
emails=np.ndarray.tolist(emails)
emaillist.extend(emails)
emails=match.Email2.unique()
emails=np.ndarray.tolist(emails)
emaillist.extend(emails)
emails=match.Email3.unique()
emails=np.ndarray.tolist(emails)
emaillist.extend(emails)
emaillist=set(emaillist) # eliminate duplicates
emaillist=list(emaillist)
emaillist=[x for x in emaillist if str(x) != 'nan'] # remove nan
emaillist=[x for x in emaillist if str(x) != 'none'] # remove nan
if not SMS: # Drop SMS
emaillist=[x for x in emaillist if not str(x).startswith('314')]
emaillist=[x for x in emaillist if not str(x).startswith('1314')]
return emaillist
def getcabsch(sched, teams, coaches, fields, **kwargs):
''' Return Cabrini containing subset of teams from master schedule
manual save... can then feed csv to sendschedule
kwargs:
sport -- Soccer, VB or whatever
div--- division 5G
school - Cabrini
#TESTING sched=fullsched.copy()
'''
if 'school' in kwargs:
if kwargs.get('school','')=='Cabrini':
# drop transfer teams w/ #
teams=teams[~teams['Team'].str.contains('#')]
if 'sport' in kwargs:
sport=kwargs.get('sport','')
teams=teams[teams['Sport']==sport]
if 'div' in kwargs:
div=kwargs.get('div','')
grade=int(div[0])
if div[1].upper()=='G':
gender='f'
elif div[1].upper()=='B':
gender='m'
teams=teams[(teams['Grade']==grade) & (teams['Gender']==gender)]
# perform any team filtering
sched=sched.rename(columns={'Start':'Time','Venue':'Location','Sched Name':'Division',
'Visitor':'Away'})
teamdict=findschteams(sched, teams, coaches)
cabsched=pd.DataFrame()
for key, [div, schname] in teamdict.items():
match=sched[(sched['Division'].str.startswith(div)) & ((sched['Home'].str.contains(schname)) | (sched['Away'].str.contains(schname)))]
if 'Cabrini' not in schname:
newname=schname.split('/')[0]+'-Cabrini'
match['Home']=match['Home'].str.replace(schname,newname)
match['Away']=match['Away'].str.replace(schname,newname)
# add team column via assign
match=match.assign(Team=key)
# Why isn't team col being copied?
cabsched=cabsched.append(match, ignore_index=True)
print(len(match),' games for team', str(schname))
cabsched['Home']=cabsched['Home'].str.replace('St Frances','')
cabsched['Away']=cabsched['Away'].str.replace('St Frances','')
cabsched=cabsched.sort_values(['Division','Date','Time'])
# now sort
myCols=['Date','Time','Day','Location','Division','Home','Away','Team']
# add col if missing from CYC schedule
for miss in [i for i in myCols if i not in cabsched.columns]:
print(miss,'column missing from full CYC schedule')
cabsched[miss]=''
cabsched=cabsched[myCols] # set in above preferred order
flist=np.ndarray.tolist(cabsched.Location.unique())
missing=[s for s in flist if s not in fields['Location'].tolist()]
if len(missing)>0:
print('Address missing from fields table:',','.join(missing))
# convert to desired string format here (write-read cycle makes it a string anyway)
# cabsched.Time=cabsched.Time.apply(lambda x:datetime.time.strftime(x, format='%I:%M %p'))
#cabsched['Date']=cabsched['Date'].dt.strftime(date_format='%d-%b-%y')
return cabsched
def detectschchange(sched1, sched2):
'''Compare two schedule versions and return unique rows (changed games)
'''
# Convert both to datetime/timestamps if in string format (probably %m/%d/%Y)
if type(sched1.iloc[0]['Date'])==str:
try:
sched1['Date']=sched1['Date'].apply(lambda x:datetime.datetime.strptime(x, "%m/%d/%Y"))
except:
print('Problem with string to datetime conversion for', sched1.iloc[0]['Date'])
if type(sched2.iloc[0]['Date'])==str:
try:
sched2['Date']=sched2['Date'].apply(lambda x:datetime.datetime.strptime(x, "%m/%d/%Y"))
except:
print('Problem with string to datetime conversion for', sched2.iloc[0]['Date'])
if type(sched2.iloc[0]['Time'])==str:
try:
# convert to timestamp
sched2['Time']=sched2['Time'].apply(lambda x:datetime.datetime.strptime(x, "%H:%M:%S").time())
# convert to datetime.time
sched2['Time']=sched2['Time'].apply(lambda x:datetime.time(x))
except:
print('Problem with string to datetime conversion for', sched2.iloc[0]['Date'])
# all columns by default, false drops both duplicates leaving unique rows
bothsch=pd.concat([sched1,sched2])
alteredrows=bothsch.drop_duplicates(keep=False)
alteredrows=alteredrows.sort_values(['Date','Time','Division'])
return alteredrows
def makefieldtable(df, fields):
''' Make separate table of field addresses for all fields in
given team's schedule (called by sendschedule)'''
venues=np.ndarray.tolist(df.Location.unique())
venues=[s.strip() for s in venues]
ft=pd.DataFrame()
ft['Location']=venues
ft=pd.merge(ft, fields, how='left', on=['Location'])
ft=ft[['Location','Address']]
return ft
def notifyfamilies(teams, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, blanktransmess, **kwargs):
''' Top level messaging function for notifying families of team assignment/ CYC card
+ optional short-team-player-recruiting
via custom e-mail; one per player
currently not including SMS
kwargs:
choice - 'send' or 'test'
recruit - T or F -- add recruiting statement for short teams
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('parent_email_log.txt','w', encoding='utf-8')
# this years signups only (later match for sport and team)
Mastersignups=Mastersignups[Mastersignups['Year']==year]
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
teamlist=[]
for index, row in myteams.iterrows():
# get school
if '#' not in myteams.loc[index]['Team']:
school='Cabrini'
coachinfo=myteams.loc[index]['Fname']+' '+ myteams.loc[index]['Lname']+' ('+myteams.loc[index]['Email']+')'
else:
school=myteams.loc[index]['Team'].split('#')[0]
coachinfo=''
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([row.Sport.lower(), row.Team, school, gradetostring(row.Graderange),
coachinfo, row.Playerlist])
# dict. with each team and its players
cards=findcards() # find all player cards
if not cards: # terminate if no cards are found (path error?)
print("Error opening CYC card image database")
return
# Separate notification for each signup is OK
for i, [sport, team, school, graderange, coachinfo, playerlist] in enumerate(teamlist):
thisteam=Mastersignups[(Mastersignups['Sport']==sport) & (Mastersignups['Team']==team)]
thisteam=pd.merge(thisteam, famcontact, on='Famkey' , how='left', suffixes =('','_2'))
if '#' not in team:
# Cabrini team base message
thisteammess=blankmess
else: # base message for transferred players
thisteammess=blanktransmess
thisteamtitle=emailtitle
# Make team-specific replacements
for j, col in enumerate(['$SPORT', '$TEAMNAME', '$SCHOOL', '$GRADERANGE', '$COACH', '$PLAYERLIST']):
thisteammess=thisteammess.replace(col, textwrap.fill(teamlist[i][j], width=100))
thisteamtitle=thisteamtitle.replace(col, teamlist[i][j])
# Check if Cabrini team is short of players (max grade, sport, numplayers)
try:
recmess=makerecmess(team, thisteam['Grade'].max(), sport, len(thisteam))
except:
recmess='' # handles empty teams during testing
# Either blank inserted or generic needs more players request (same for whole team)
thisteammess=thisteammess.replace('$RECRUIT','\n'+recmess)
for index, row in thisteam.iterrows():
# Replace first name in e-mail title (default e-mail title is fall $SPORT for $FIRST)
thistitle=thisteamtitle.replace('$FIRST', row.First)
thistitle=thistitle.replace('$SPORT', row.Sport)
# Check for each players CYC card if necessary (also for older transfer teams)
thiscardmess=makecardmess(row, cards)
# custom message for individual player on this team
thismess=thisteammess.replace('$FIRST', row.First)
thismess=thismess.replace('$LAST', row.Last)
# message is blank if on file or not required and
thismess=thismess.replace('$CYCCARD', '\n'+thiscardmess)
recipients=getemailadds(row)
# Create custom email message (can have multiple sports in df)
if choice=='send':
# add From/To/Subject to actual e-mail
msg=MIMEText(blankmess,'plain')
# msg = MIMEMultipart('alternative') # message container
msg['Subject'] = thistitle
msg['From'] = 'Cabrini Sponsors Club <<EMAIL>>'
msg['To'] = 'Cabrini Sports Parents <<EMAIL>>'
msg['Bcc']=','.join(recipients) # single e-mail or list
# Simultaneous send to all in recipient list
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
thisemail='From: Cabrini Sponsors Club <<EMAIL>>\nTo: '
thisemail+=', '.join(recipients)+'\nSubject: '+thistitle+'\n'
thisemail+=thismess
thisemail=thisemail.encode('utf-8')
for i,addr in enumerate(recipients): # Send message to each valid recipient in list
try:
smtpObj.sendmail('<EMAIL>', addr, thisemail)
print ('Message sent to ', addr)
except:
print('Message to ', addr, ' failed.')
if not recipients:
print('No email address for ', row.First, row.Last)
else: # Testing mode ... just write to log w/o e-mail header and such
logfile.write(thistitle+'\n')
logfile.write(thismess+'\n')
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# TODO fix this attempted close
# smtpObj.quit() # close SMTP connection
return
def makecardmess(row, cards):
''' Determine if card is needed and add generic message to that effect (called by emailparent_tk, notifyparent)
row is Series
'''
cmess=("$FIRST $LAST needs a CYC ID card to play on this team and we do not have one in our files."
"If your child already has this ID card, please take a picture of it and e-mail to <EMAIL>."
"If you don't have one, you can get one online at: https://idcards.cycstl.net/ or at uniform night. "
"For this you need: 1) picture of the child 2) child's birth certificate (or birth document) and 3) $5 fee")
if str(row.Plakey) in cards:
return '' # already on file
# Now handle teams that don't need CYC cards (generally K or 1st)
if '-' not in row.Team: # non-CYC level teams and transfer teams
if '#' not in row.Team: # non-CYC cabrini team
return '' # junior team doesn't require card
else: # determine grade level for transfer team
tempstr=row.Team
tempstr=tempstr.split('#')[1][0:1]
tempstr=tempstr.replace('K','0')
try:
grade=int(tempstr)
if grade<2: # judge dowd or junior transfer team
return ''
except:
print("couldn't determine grade for transfer team")
return ''
# all remaining players need a card
cmess=cmess.replace('$FIRST',row.First)
cmess=cmess.replace('$LAST',row.Last)
cmess=textwrap.fill(cmess, width=100)
return cmess
'''TESTING
makerecmess('teamname', 2, 'T-ball', 14)
textwrap.fill(recmess, width=80)
'''
def makerecmess(team, grade, sport, numplayers):
''' Figure out if team is short of players (based on grade level, sport, Cabteam or not)
'''
recmess=('This team could use more players. If you know anyone who is interested,'
'please inform us at <EMAIL>.')
recmess=textwrap.fill(recmess, width=100)
if '#' in team: # no recruiting for transfer teams
return ''
if grade=='K':
grade=0
else:
grade=int(grade)
if sport=='VB': # 8 for all grades
if numplayers<8:
return recmess
if sport=='Soccer':
if grade>=5: # 11v11 so need 16
if numplayers<16:
return recmess
elif grade<=4 and grade>=2: # 8v8 from 2nd to 4th so 12 is OK
if numplayers<13:
return recmess
elif grade==1: # 7v7 so 11 is OK
if numplayers<12:
return recmess
else: # k is 6v6 so 10 is OK
if numplayers<11:
return recmess
if sport=='Basketball': # 5v5 for all grades so 10 is good
if numplayers<11:
return recmess
if sport=='T-ball': # 9v9 ish so 13 is good
if numplayers<14:
return recmess
if sport=='Baseball': # 9v9 ish so 13 is good
if numplayers<14:
return recmess
if sport=='Softball': # 9v9 ish so 13 is good
if numplayers<14:
return recmess
return ''
def emailcoach_tk(teams, coaches, gdrivedict):
''' tk interface for e-mails to team coaches
some required datasets (players, famcontact, mastersignups) are directly loaded depending on choice
message types (mtypes) are:
unis - send summary of missing uniforms to team coaches
contacts - send contacts and current google drive link
bills - send summary of outstanding bills
'''
root = tk.Tk()
root.title('Send e-mail to coaches')
unifilename=tk.StringVar()
try:
unifiles=glob.glob('missingunilist*') # find most recent uniform file name
if len(unifiles)>1:
unifile=findrecentfile(unifiles) # return single most recent file
else:
unifile=unifiles[0]
# find most recent missing uni file name
unifilename.set(unifile)
except: # handle path error
unifilename.set('missingunilist.csv')
billname=tk.StringVar() # file
try:
billfiles=glob.glob('billlist*')
if len(billfiles)>1:
billfile=findrecentfile(billfiles) # return single most recent file
else:
billfile=billfiles[0]
# find most recent billlist file name
billname.set(billfile)
except:
billname.set('billist.csv')
asstbool=tk.BooleanVar() # optional labelling of elements
emailtitle=tk.StringVar() # e-mail title
mtype=tk.StringVar() # coach message type
messfile=tk.StringVar() # text of e-mail message
choice=tk.StringVar() # test or send -mail
# Functions to enable/disable relevant checkboxes depending on radiobutton choice
def Uniopts():
''' Disable irrelevant checkboxes '''
billentry.config(state=tk.DISABLED)
unientry.config(state=tk.NORMAL)
messfile.set('coach_email_outstanding_unis.txt')
# clear current team selector... this autoloads oldteams
for i, val in enumerate(teamdict):
teamlist[i].set(0)
emailtitle.set('Return of uniforms for your Cabrini team')
def Contactopts():
''' Disable irrelevant checkboxes '''
billentry.config(state=tk.DISABLED)
unientry.config(state=tk.DISABLED)
messfile.set('coach_email_contacts.txt')
emailtitle.set('Contact list for your Cabrini team')
def Billopts():
''' Disable irrelevant checkboxes '''
billentry.config(state=tk.NORMAL)
unientry.config(state=tk.DISABLED)
messfile.set('coach_email_outstanding_bills.txt')
emailtitle.set('Fees still owed by your Cabrini team')
def Otheropts():
''' Display relevant choices for other generic message to parents '''
billentry.config(state=tk.DISABLED)
unientry.config(state=tk.DISABLED)
messfile.set('temp_message.txt')
emailtitle.set('Message from Sponsors Club')
# e-mail title and message file name
rownum=0
tk.Label(root, text='Title for e-mail').grid(row=rownum, column=0)
titleentry=tk.Entry(root, textvariable=emailtitle)
titleentry.config(width=30)
titleentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(root, text='messagefile').grid(row=rownum, column=0)
messentry=tk.Entry(root, textvariable=messfile)
messentry.config(width=30)
messentry.grid(row=rownum, column=1)
rownum+=1
# Choose counts, deriv, both or peaks plot (radio1)
tk.Radiobutton(root, text='Missing uniforms', value='Unis', variable = mtype, command=Uniopts).grid(row=rownum, column=0)
tk.Radiobutton(root, text='Send contact info', value='Contacts', variable = mtype, command=Contactopts).grid(row=rownum, column=1)
tk.Radiobutton(root, text='Send bill info', value='Bills', variable = mtype, command=Billopts).grid(row=rownum, column=2)
tk.Radiobutton(root, text='Other message', value='Other', variable = mtype, command=Otheropts).grid(row=rownum, column=3)
rownum+=1
asstcheck=tk.Checkbutton(root, variable=asstbool, text='Email asst coaches?')
asstcheck.grid(row=rownum, column=0) # can't do immediate grid or nonetype is returned
rownum+=1
tk.Label(root, text='Bill_list file name').grid(row=rownum, column=0)
billentry=tk.Entry(root, textvariable=billname)
billentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(root, text='Missing uni file name').grid(row=rownum, column=0)
unientry=tk.Entry(root, textvariable=unifilename)
unientry.grid(row=rownum, column=1)
rownum+=1
# insert team selector
# Specific team selector section using checkboxes
teamdict=shortnamedict(teams)
teamlist=[] # list of tk bools for each team
# Make set of bool/int variables for each team
for i, val in enumerate(teamdict):
teamlist.append(tk.IntVar())
if '#' not in val:
teamlist[i].set(1) # Cabrini teams checked by default
else:
teamlist[i].set(0) # transfer team
# make checkbuttons for each team
for i, val in enumerate(teamdict):
thisrow=i%5+1+rownum # three column setup
thiscol=i//5
thisname=teamdict.get(val,'')
tk.Checkbutton(root, text=thisname, variable=teamlist[i]).grid(row=thisrow, column=thiscol)
rownum+=math.ceil(len(teamlist)/5)+2
# Decision buttons bottom row
def chooseall(event):
''' Select all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(1)
def clearall(event):
''' deselect all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(0)
def abort(event):
choice.set('abort')
root.destroy()
def test(event):
choice.set('test')
root.destroy()
def KCtest(event):
choice.set('KCtest')
root.destroy()
def send(event):
choice.set('send')
root.destroy()
rownum+=1
d=tk.Button(root, text='All teams')
d.bind('<Button-1>', chooseall)
d.grid(row=rownum, column=0)
d=tk.Button(root, text='Clear teams')
d.bind('<Button-1>', clearall)
d.grid(row=rownum, column=1)
d=tk.Button(root, text='Abort')
d.bind('<Button-1>', abort)
d.grid(row=rownum, column=2)
d=tk.Button(root, text='Test')
d.bind('<Button-1>', test)
d.grid(row=rownum, column=3)
d=tk.Button(root, text='KCtest')
d.bind('<Button-1>', KCtest)
d.grid(row=rownum, column=4)
d=tk.Button(root, text='Send')
d.bind('<Button-1>', send)
d.grid(row=rownum, column=5)
root.mainloop()
if choice.get()!='abort':
kwargs={}
if choice.get()=='KCtest':
kwargs.update({'KCtest':True})
kwargs.update({'choice':'send'})
else:
kwargs.update({'choice':choice.get()}) # send, KCtest (internal) or test (to log file)
if asstbool.get()==True:
kwargs.update({'asst':True}) # Optional send to asst. coaches if set to True
emailtitle=emailtitle.get()
messagefile='messages\\'+messfile.get()
# Handle selection of team subsets
selteams=[]
for i, val in enumerate(teamdict):
if teamlist[i].get()==1:
selteams.append(val)
# Filter teams based on checkbox input
teams=teams[teams['Team'].isin(selteams)]
teams=teams.drop_duplicates(['Team','Sport'])
if mtype.get()=='Contacts':
mtype='contacts'
try:
Mastersignups = pd.read_csv('master_signups.csv', encoding='cp437')
players= pd.read_csv('players.csv', encoding='cp437')
famcontact= pd.read_csv('family_contact.csv', encoding='cp437')
kwargs.update({'SUs':Mastersignups,'players':players,'famcontact':famcontact})
except:
print('Problem loading mastersignups, players, famcontact')
return
elif mtype.get()=='Bills':
mtype='bills'
try:
Mastersignups = pd.read_csv('master_signups.csv', encoding='cp437')
billlist=pd.read_csv(billfile.get(), encoding='cp437')
kwargs.update({'bills':billlist, 'SUs':Mastersignups})
kwargs.update({'SUs':Mastersignups,'players':players,'famcontact':famcontact})
except:
print('Problem loading billlist, mastersignups')
return
elif mtype.get()=='Unis':
mtype='unis'
try:
missing=pd.read_csv(unifilename.get(), encoding='cp437')
oldteams=pd.read_excel('Teams_coaches.xlsx', sheetname='Oldteams') # loads all old teams in list
kwargs.update({'oldteams':oldteams,'missing':missing})
except:
print('Problem loading missingunis, oldteams')
return
elif mtype.get()=='Other':
# nothing special to load?
pass
emailcoaches(teams, coaches, mtype, emailtitle, messagefile, gdrivedict, **kwargs)
return
def maketextsched(sched,teams, coaches, fields, messagefile, logfile, **kwargs):
''' Concise textable game schedule for cell only people from extracted Cabrini schedule'''
# Convert dates/ times from timestamp to desired string formats for proper output
if type(sched.iloc[0]['Time'])==datetime.time:
sched.Time=sched.Time.apply(lambda x:datetime.time.strftime(x, format='%I:%M %p'))
else:
print('Time format is', type(sched.iloc[0]['Time']))
if type(sched.iloc[0]['Date'])==datetime.time:
sched['Date']=sched['Date'].dt.strftime(date_format='%d-%b-%y')
else:
print('Date format is', type(sched.iloc[0]['Date']))
if 'div' in kwargs:
div=kwargs.get('div','')
grade=int(div[0])
if div[1].upper()=='G':
gender='f'
elif div[1].upper()=='B':
gender='m'
teams=teams[(teams['Grade']==grade) & (teams['Gender']==gender)]
log=open(logfile,'w', encoding='utf-8')
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
teamlist=[]
# Open generic message header
with open('messages\\'+messagefile, 'r') as file:
blankmess=file.read()
for index, row in myteams.iterrows():
# get school
if '#' not in myteams.loc[index]['Team']:
school='Cabrini'
try:
coachinfo=myteams.loc[index]['Fname']+' '+ myteams.loc[index]['Lname']+' ('+myteams.loc[index]['Email']+')'
except:
coachinfo=''
else:
school=myteams.loc[index]['Team'].split('#')[0]
coachinfo=''
# Get gender
if row.Gender.lower()=='f':
gender='girls'
elif row.Gender.lower()=='m':
gender='boys'
else:
print('Problem finding team gender')
grrang=str(myteams.loc[index]['Graderange'])
if len(grrang)==2:
grrang=grrang[0]+'-'+grrang[1]
if grrang.endswith('2'):
grrang+='nd'
elif grrang.endswith('3'):
grrang+='rd'
else:
grrang+='th'
grrang=grrang.replace('0','K')
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([myteams.loc[index]['Sport'], myteams.loc[index]['Team'], school,
grrang, gender, coachinfo, myteams.loc[index]['Playerlist']])
# get dictionary of teams found/matched in CYC schedule
for i, [sport, team, school, graderange, gender, coachinfo, playerlist] in enumerate(teamlist):
# Either have cabrini only schedule or full CYC schedule
if 'Team' in sched:
thissched=sched[sched['Team']==team].copy()
thissched=thissched[['Date','Time','Day', 'Location']]
else:
print("Couldn't find schedule for", school, str(graderange), sport, team)
continue
if len(thissched)==0:
print('Games not found for ', team)
continue
# TODO construct textable message in log
games=''
for index, row in thissched.iterrows():
# output date, day, time, location
games+=row.Date+' '+row.Day+' '+row.Time+' '+row.Location+'\n'
thismess=blankmess.replace('$SCHEDULE', games)
thismess=thismess.replace('$GRADERANGE', graderange)
thismess=thismess.replace('$GENDER', gender)
thismess=thismess.replace('$SPORT', sport)
# now create/ insert location and address table
thisft=makefieldtable(thissched, fields)
myfields=''
for index, row in thisft.iterrows():
# output date, day, time, location
myfields+=row.Location+' '+row.Address+'\n'
thismess=thismess.replace('$FIELDTABLE', myfields)
log.write(thismess+'\n')
log.close()
return
''' TESTING
teamnamedict=findschteams(sched, teams, coaches)
'''
''' TESTING
sched=pd.read_csv('Cabrini_Bball2018_schedule.csv')
sport, team, school, graderange, gender, coachinfo, playerlist=teamlist[0] i=0
recipients=['<EMAIL>','<EMAIL>']
'''
def sendschedule(teams, sched, fields, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, **kwargs):
''' Top level messaging function for notifying families of team assignment/ CYC card
+ optional short-team-player-recruiting
via custom e-mail; one per player
currently not including SMS
kwargs:
choice - 'send' or 'test' (defaults to test)
recruit - T or F -- add recruiting statement for short teams
mformat - not really yet used ... just sending as text not html
'''
# convert date- time from extracted schedule to desired str format
# type will generally be string (if reloaded) or timestamp (if direct from prior script)
''' if already string just keep format the same, if timestamp or datetime then convert below
if type(sched.iloc[0]['Time'])==str:
sched.Time=pd.to_datetime(sched.Time, format='%H:%M:%S') # convert string to timestamp
'''
if type(sched.iloc[0]['Time'])!=str:
# Then convert timestamp to datetime to desired string format
sched.Time=sched.Time.apply(lambda x:pd.to_datetime(x).strftime(format='%I:%M %p'))
if type(sched.iloc[0]['Date'])==str:
try:
sched.Date=pd.to_datetime(sched.Date, format='%m/%d/%Y')
except:
try:
sched.Date=pd.to_datetime(sched.Date, format='%Y-%m-%d')
except:
print('Difficulty converting date with format', type(sched.iloc[0]['Date']))
# convert to desired date string format
sched['Date']=sched['Date'].dt.strftime(date_format='%d-%b-%y')
choice=kwargs.get('choice','test')
if choice=='send' or choice=='KCtest':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open(cnf._OUTPUT_DIR+'\\parent_email_log.txt','w', encoding='utf-8')
#%%
# this years signups only (later match for sport and team)
Mastersignups=Mastersignups[Mastersignups['Year']==year]
# Should be only one entry per coach
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
teamlist=[]
for index, row in myteams.iterrows():
# get school
if '#' not in myteams.loc[index]['Team']:
school='Cabrini'
try:
coachinfo=row.Fname+' '+ row.Lname+' ('+row.Email+')'
except:
coachinfo=''
else:
school=row.Team.split('#')[0]
coachinfo=''
# Get gender
if row.Gender.lower()=='f':
gender='girl'
elif row.Gender.lower()=='m':
gender='boys'
else:
print('Problem finding team gender')
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([row.Sport, row.Team, school, gradetostring(row.Graderange),
gender, coachinfo, row.Playerlist])
# get dictionary of teams found/matched in CYC schedule
teamnamedict=findschteams(sched, teams, coaches)
# TESTING sport, team, school, graderange, gender, coachinfo, playerlist=teamlist[i] i=1
#%%
for i, [sport, team, school, graderange, gender, coachinfo, playerlist] in enumerate(teamlist):
# Either have cabrini only schedule or full CYC schedule
if 'Team' in sched:
thissched=sched[sched['Team']==team].copy()
# shorten team name
thissched['Home']=thissched['Home'].str.split('/').str[0]
thissched['Away']=thissched['Away'].str.split('/').str[0]
thissched['Home']=thissched['Home'].str.strip()
thissched['Away']=thissched['Away'].str.strip()
# Times/dates already reformatted
thissched=thissched[['Date','Time','Day','Home','Away','Location']]
else: # handle if an unsorted CYC schedule (not Cab only)
if team in teamnamedict:
[div,schname]=teamnamedict.get(team,'')
thissched=getgameschedule(div,schname, sched)
thissched=thissched[['Date','Time','Day','Division','Home','Away','Location']]
else:
print("Couldn't find schedule for", school, str(graderange), sport, team)
continue
if len(thissched)==0:
print('Games not found for ', team)
continue
thisteam=Mastersignups[(Mastersignups['Sport']==sport) & (Mastersignups['Team']==team)]
thisteam=pd.merge(thisteam, famcontact, on='Famkey' , how='left', suffixes =('','_2'))
# Make all team-specific replacements in message body and email title
thisteammess=blankmess
thistitle=emailtitle
# have to use caution due to $TEAMTABLE (common) and $TEAMNAME (rarely used)
for j, col in enumerate(['$SPORT', '$TEAMNAME', '$SCHOOL', '$GRADERANGE', '$GENDER', '$COACH', '$PLAYERLIST']):
if j!='$SPORT':
val=teamlist[i][j]
else: # lower-case sport name for replace
val=teamlist[i][j].lower()
try:
thisteammess=thisteammess.replace(col, textwrap.fill(val, width=100))
thistitle=thistitle.replace(col, val)
except:
print("Problem with teamname", val)
continue
# Convert thissched to string table and insert into message
thisteammess=thisteammess.replace('$SCHEDULE', thissched.to_string(index=False, justify='left'))
#Make and insert field table
thisft=makefieldtable(thissched, fields)
thisteammess=thisteammess.replace('$FIELDTABLE', thisft.to_string(index=False, justify='left'))
# Get coach emails
recipients=getcoachemails(team, teams, coaches, **{'asst':True})
# Now get all unique team email addresses (single message to coach and team)...drops nan
recipients=getallteamemails(thisteam, recipients)
if choice=='KCtest': # internal send test
recipients=['<EMAIL>','<EMAIL>']
choice='send'
# Create custom email message (can have multiple sports in df)
if choice=='send':
try: # single simultaneous e-mail to all recipients
msg=MIMEText(thisteammess,'plain')
msg['Subject'] = thistitle
msg['From'] = 'Cabrini Sponsors Club <<EMAIL>>'
msg['To']=','.join(recipients)
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
except:
print('Message to ', team, 'failed.')
if not recipients:
print('No email addresses for team ', team)
else: # Testing mode ... just write to log w/o e-mail header and such
logfile.write(thistitle+'\n')
logfile.write(thisteammess+'\n')
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# TODO fix this attempted close
# smtpObj.quit() # close SMTP connection
#%%
return
# TESTING
#%%
def makegcals(sched, teams, coaches, fields, season, year, duration=1, **kwargs):
''' Turn standard CYC calendar into google calendar
description: 1-2 girls soccer vs opponent
kwargs:
div - get only calendar for given division
school - Cabrini ... drop transfer teams w/ #
splitcals - separate calendar for each team (default True),
'''
#TODO ... Test after alteration of address field
if 'school' in kwargs:
if kwargs.get('school','')=='Cabrini':
# drop transfer teams w/ #
teams=teams[~teams['Team'].str.contains('#')]
if 'div' in kwargs:
div=kwargs.get('div','')
grade=int(div[0])
if div[1].upper()=='G':
gender='f'
elif div[1].upper()=='B':
gender='m'
teams=teams[(teams['Grade']==grade) & (teams['Gender']==gender)]
# ensure correct formats for separate date and time columns
if type(sched.iloc[0]['Date'])==str:
try: # format could be 10/18/2018 0:00
sched.Date=sched.Date.str.split(' ').str[0]
sched.Date=pd.to_datetime(sched.Date, format='%m/%d/%Y')
except:
pass
try:
sched.Date=pd.to_datetime(sched.Date, format='%m/%d/%Y')
except:
pass
try:
sched.Date=pd.to_datetime(sched.Date, format='%Y-%m-%d')
except:
print('Problem converting date format of ', sched.iloc[0]['Date'])
# gcal needs %m/%d/%y (not zero padded)
sched['Date']=sched['Date'].dt.strftime(date_format='%m/%d/%Y')
''' Reformat of time shouldn't be required i.e. 4:30 PM
if type(sched.iloc[0]['Time'])==str:
try:
sched.Time=pd.to_datetime(sched.Time, format='%H:%M %p')
except:
try:
sched.Time=pd.to_datetime(sched.Time, format='%H:%M:%S') # convert string to timestamp
except:
print('Failed conversion of time column... check format')
'''
# Common reformatting of all gcals
sched=sched.rename(columns={'Date':'Start Date','Time':'Start Time'})
# Calculate end time while still a timestamp
sched['End Time']=pd.to_datetime(sched['Start Time']) + datetime.timedelta(hours=1)
sched['End Time']=pd.to_datetime(sched['End Time'])
sched['End Time']=sched['End Time'].apply(lambda x:pd.to_datetime(x).strftime('%I:%M %p'))
# Then convert timestamp to datetime to desired string format
sched['Start Time']=sched['Start Time'].apply(lambda x: | pd.to_datetime(x) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 14:13:48 2018
@author: <NAME>
Read and assemble corpus annotations (phone, word, speaker, etc.)
into a nice pandas dataframe with one entry per phone in the corpus.
Main function is read.
"""
import numpy as np
import pandas as pd
import io
import os.path as path
import yaml
def fix_word_column(alignment_file, out_file, verbose=False):
"""
There is a bug in the abkhazia code generating the "Word" column of the alignment file.
We fix it post-hoc here.
Input:
- alignment_file: buggy file
- out_file: the path where we'll save the corrected alignment file.
"""
# go through the lines and whenever we find a <noise> label bring
# all the following word column content for the current utterance up by one line.
columns = ['utt_id', 'start', 'stop', 'confidence', 'phone', 'word', 'orig_word']
d = {col : [] for col in columns}
with io.open(alignment_file) as fh:
curr_utt = None
shift = 0
for i, line in enumerate(fh):
if verbose and i % 1000000 == 0:
print("Processed {} phones".format(i))
tokens = line.strip().split()
assert len(tokens) >= 5, tokens
utt_id = tokens[0]
if curr_utt != utt_id:
for _ in range(shift):
d['word'].append(None)
shift = 0
curr_utt = utt_id
for i, col in enumerate(columns[:-2]):
d[col].append(tokens[i])
if len(tokens) > 5:
assert len(tokens) == 6, tokens
word = tokens[5]
d['orig_word'].append(word)
if word == '<noise>':
shift+=1
else:
d['word'].append(word)
else:
d['word'].append(None)
d['orig_word'].append(None)
# print corrected version to out_file
with io.open(out_file, 'w') as fh:
for utt, start, stop, conf, phon, word in zip(*[d[col] for col in columns[:-1]]):
if word is None:
fh.write("{} {} {} {} {}\n".format(utt, start, stop, conf, phon))
else:
fh.write("{} {} {} {} {} {}\n".format(utt, start, stop, conf, phon, word))
def load_silences(silence_file):
with io.open(silence_file) as fh:
silences = [line.strip() for line in fh]
return silences
def remove_trailing_silences(alignment_df, silences, verbose=0):
# remove trailing silences/noise at the end of utterances
if verbose:
print("Looking for utterance-final silences in {} utterances".format(len(alignment_df.groupby('utt'))))
trailing_sil_indices = []
for i, (utt, utt_df) in enumerate(alignment_df.groupby('utt')):
if verbose and i % 10000 == 0:
print("Done {} utterances".format(i))
nb_words = np.max(utt_df['word_pos']) + 1
not_last_word_df = utt_df[utt_df['word_pos'] < nb_words-1]
last_word_df = utt_df[utt_df['word_pos'] == nb_words-1]
last_word_df = last_word_df.sort_values('phone_pos', ascending=False)
non_silences = np.where([not(e in silences) for e in last_word_df['phone']])[0]
assert len(non_silences) > 0, (utt_df, last_word_df)
trailing_sil_indices = trailing_sil_indices + list(last_word_df.index[:non_silences[0]])
trailing_sil_indices.sort()
if verbose:
print("Removing {} utterance-final silences".format(len(trailing_sil_indices)))
alignment_df = alignment_df.drop(trailing_sil_indices)
return alignment_df
def load_alignment(alignment_file, silences, verbose=0):
"""Create a DataFrame containing alignment information"""
# utt_position not considered because I'm not sure if the order of the utterances in the alignment file
# make any particular sense in the first place
alignment = {'utt': [], 'start': [], 'stop': [],
'phone': [], 'phone_pos': [],
'word': [], 'word_pos': [],
'prev_phone': [], 'next_phone': [],
'confidence': []}
phone_seq = [] # collect phone sequence with utterance break markers to fill prev-phone and next-phone
with io.open(alignment_file) as fh:
current_utt = None
for i, line in enumerate(fh):
if verbose and i % 1000000 == 0:
print("Processed {} phones".format(i))
tokens = line.strip().split()
assert len(tokens) in [5, 6], tokens
utt, tokens = tokens[0], tokens[1:]
if utt != current_utt:
if alignment['prev_phone']:
del alignment['prev_phone'][-1]
alignment['prev_phone'].append('SIL')
alignment['next_phone'].append('SIL')
add_next_phone = False
current_utt = utt
if len(tokens) == 5:
word_position = 0
else:
word_position = -1 # Silence or noise at utterance beginning
word = None
phone_position = 0
if len(tokens) == 5:
tokens, word = tokens[:-1], tokens[-1]
word_position = word_position + 1
phone_position = 0
else:
phone_position = phone_position + 1
start, stop, confidence_score, phone = tokens
start, stop, confdence_score = float(start), float(stop), float(confidence_score)
alignment['utt'].append(utt)
alignment['start'].append(start)
alignment['stop'].append(stop)
alignment['phone'].append(phone)
alignment['phone_pos'].append(phone_position)
alignment['word'].append(word)
alignment['word_pos'].append(word_position)
alignment['prev_phone'].append(phone)
if add_next_phone:
alignment['next_phone'].append(phone)
else:
add_next_phone = True
alignment['confidence'].append(confidence_score)
phone_seq.append(phone)
alignment['prev_phone'] = alignment['prev_phone'][:-1]
alignment['next_phone'] = alignment['next_phone'][1:] + ['SIL']
df = pd.DataFrame(alignment)
# drop utterance-initial silences
ind = df.index[[e is None for e in df['word']]]
if verbose:
print("Removing {} utterance-initial silences".format(len(ind)))
df = df.drop(ind)
# drop utterance-final silences
df = remove_trailing_silences(df, silences, verbose)
return df
def get_utterances(segments_file):
"""get the list of utt_ids"""
with io.open(segments_file) as fh:
utts = [line.strip().split()[0] for line in fh]
return utts
def filter_utts(alignment_df, utts):
# return relevant part of alignment
alignment_df = pd.concat([df for utt, df in alignment_df.groupby('utt') if utt in utts])
return alignment_df
def add_speaker_info(corpus, utt2spk_file):
spk_df = {'utt': [], 'spk': []}
with io.open(utt2spk_file) as fh:
for line in fh:
utt, spk = line.strip().split()
spk_df['utt'].append(utt)
spk_df['spk'].append(spk)
spk_df = pd.DataFrame(spk_df)
corpus = | pd.merge(corpus, spk_df, on='utt') | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# I'm going to share YET ANOTHER simple but working tsp solutions optimization technique.
#
# The idea is straightforward: if you have THREE cities in the path that are 'close' (in distance terms) to each other you can try to split the path by them and try different permutations of resulting chunks hoping that the decrease of the total penalty in this part of the path will be greater than the increase of the length caused by the permutation itself.
#
# Also instead of just permutating the chunks of the path you can try to revert the order of some chunks but I won't do it in this kernel.
# 1. Import all that we need.
# In[ ]:
import numpy as np
import pandas as pd
import numba
from sympy import isprime, primerange
from math import sqrt
from sklearn.neighbors import KDTree
from tqdm import tqdm
from itertools import combinations, permutations
from functools import lru_cache
# 2. Read input data and define some arrays that we'll need later.
# In[ ]:
cities = | pd.read_csv('../input/traveling-santa-2018-prime-paths/cities.csv', index_col=['CityId']) | pandas.read_csv |
from nose.plugins.attrib import attr
import os
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import trackpy
from trackpy import plots
from trackpy.utils import suppress_plotting, fit_powerlaw
from trackpy.tests.common import StrictTestCase
import nose
# Quiet warnings about Axes not being compatible with tight_layout
import warnings
warnings.filterwarnings("ignore", message="This figure includes Axes that are not compatible with tight_layout")
path, _ = os.path.split(os.path.abspath(__file__))
try:
import pims
except ImportError:
PIMS_AVAILABLE = False
else:
PIMS_AVAILABLE = True
def _skip_if_no_pims():
if not PIMS_AVAILABLE:
raise nose.SkipTest('PIMS not installed. Skipping.')
class TestPlots(StrictTestCase):
def setUp(self):
# older matplotlib may raise an invalid error
np.seterr(invalid='ignore')
self.sparse = pd.read_pickle(os.path.join(path, 'data',
'sparse_trajectories.df'))
@attr('slow')
def test_labeling_sparse_trajectories(self):
suppress_plotting()
plots.plot_traj(self.sparse, label=True)
def test_ptraj_empty(self):
suppress_plotting()
f = lambda: plots.plot_traj(DataFrame(columns=self.sparse.columns))
self.assertRaises(ValueError, f)
def test_ptraj_unicode_labels(self):
# smoke test
plots.plot_traj(self.sparse, mpp=0.5)
def test_ptraj_t_column(self):
suppress_plotting()
df = self.sparse.copy()
cols = list(df.columns)
cols[cols.index('frame')] = 'arbitrary name'
df.columns = cols
plots.plot_traj(df, t_column='arbitrary name')
def test_annotate(self):
suppress_plotting()
f = DataFrame({'x': [0, 1], 'y': [0, 1], 'frame': [0, 0],
'mass': [10, 20]})
frame = np.random.randint(0, 255, (5, 5))
# Basic usage
plots.annotate(f, frame)
plots.annotate(f, frame, color='r')
# Coloring by threshold
plots.annotate(f, frame, split_category='mass',
split_thresh=15, color=['r', 'g'])
plots.annotate(f, frame, split_category='mass',
split_thresh=[15], color=['r', 'g'])
plots.annotate(f, frame, split_category='mass',
split_thresh=[15, 25], color=['r', 'g', 'b'])
# Check that bad parameters raise an error.
# Too many colors
bad_call = lambda: plots.annotate(
f, frame, split_category='mass', split_thresh=15, color=['r', 'g', 'b'])
self.assertRaises(ValueError, bad_call)
# Not enough colors
bad_call = lambda: plots.annotate(
f, frame, split_category='mass', split_thresh=15, color=['r'])
self.assertRaises(ValueError, bad_call)
bad_call = lambda: plots.annotate(
f, frame, split_category='mass', split_thresh=15, color='r')
self.assertRaises(ValueError, bad_call)
# Nonexistent column name for split_category
bad_call = lambda: plots.annotate(
f, frame, split_category='not a column', split_thresh=15, color='r')
self.assertRaises(ValueError, bad_call)
# 3D image
bad_call = lambda: plots.annotate(f, frame[np.newaxis, :, :])
self.assertRaises(ValueError, bad_call)
def test_annotate3d(self):
_skip_if_no_pims()
suppress_plotting()
f = DataFrame({'x': [0, 1], 'y': [0, 1], 'z': [0, 1], 'frame': [0, 0],
'mass': [10, 20]})
frame = np.random.randint(0, 255, (5, 5, 5))
plots.annotate3d(f, frame)
plots.annotate3d(f, frame, color='r')
# 2D image
bad_call = lambda: plots.annotate3d(f, frame[0])
self.assertRaises(ValueError, bad_call)
# Rest of the functionality is covered by annotate tests
def test_fit_powerlaw(self):
# smoke test
suppress_plotting()
em = | Series([1, 2, 3], index=[1, 2, 3]) | pandas.Series |
import json
import os
import random
from random import sample
import numpy as np
import numpy.random
import re
from collections import Counter
import inspect
import pandas as pd
import matplotlib.pyplot as plt
import requests
from IPython.display import HTML
import seaborn as sns
import networkx as nx
from pylab import rcParams
try:
from wordcloud import WordCloud
except ImportError:
print("wordcloud er ikke installert, kan ikke lage ordskyer")
#************** For defining wordbag search
def dict2pd(dictionary):
res = pd.DataFrame.from_dict(dictionary).fillna(0)
s = (res.mean(axis=0))
s = s.rename('snitt')
res = res.append(s)
return res.sort_values(by='snitt', axis=1, ascending=False).transpose()
def def2dict(ddef):
res = dict()
defs = ddef.split(';')
for d in defs:
lex = d.split(':')
if len(lex) == 2:
#print('#'.join(lex))
hyper = lex[0].strip()
occurrences = [x.strip() for x in lex[1].split(',')]
res[hyper] = occurrences
for x in res:
for y in res[x]:
if y.capitalize() not in res[x]:
res[x].append(y.capitalize())
return res
def wordbag_eval(wordbag, urns):
if type(urns) is list:
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
else:
urns = urns
else:
urns = [urns]
param = dict()
param['wordbags'] = wordbag
param['urns'] = urns
r = requests.post("https://api.nb.no/ngram/wordbags", json = param)
return dict2pd(r.json())
def wordbag_eval_para(wordbag, urns):
if type(urns) is list:
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
else:
urns = urns
else:
urns = [urns]
param = dict()
param['wordbags'] = wordbag
param['urns'] = urns
r = requests.post("https://api.nb.no/ngram/wordbags_para", json = param)
return r.json()
def get_paragraphs(urn, paras):
"""Return paragraphs for urn"""
param = dict()
param['paragraphs'] = paras
param['urn'] = urn
r = requests.get("https://api.nb.no/ngram/paragraphs", json=param)
return dict2pd(r.json())
### ******************* wordbag search end
def ner(text = None, dist=False):
"""Analyze text for named entities - set dist = True will return the four values that go into decision"""
r = []
if text != None:
r = requests.post("https://api.nb.no/ngram/ner", json={'text':text,'dist':dist})
return r.json()
#**** names ****
def check_navn(navn, limit=2, remove='Ja Nei Nå Dem De Deres Unnskyld Ikke Ah Hmm <NAME> Jaja Jaha'.split()):
"""Removes all items in navn with frequency below limit and words in all case as well as all words in list 'remove'"""
r = {x:navn[x] for x in navn if navn[x] > limit and x.upper() != x and not x in remove}
return r
def sentences(urns, num=300):
if isinstance(urns[0], list):
urns = [str(x[0]) for x in urns]
params = {'urns':urns,
'num':num}
res = requests.get("https://api.nb.no/ngram/sentences", params=params)
return res.json()
def names(urn, ratio = 0.3, cutoff = 2):
""" Return namens in book with urn. Returns uni- , bi-, tri- and quadgrams """
if type(urn) is list:
urn = urn[0]
r = requests.get('https://api.nb.no/ngram/names', json={'urn':urn, 'ratio':ratio, 'cutoff':cutoff})
x = r.json()
result = (
Counter(x[0][0]),
Counter({tuple(x[1][i][0]):x[1][i][1] for i in range(len(x[1]))}),
Counter({tuple(x[2][i][0]):x[2][i][1] for i in range(len(x[2]))}),
Counter({tuple(x[3][i][0]):x[3][i][1] for i in range(len(x[3]))})
)
return result
def name_graph(name_struct):
m = []
for n in name_struct[0]:
m.append(frozenset([n]))
for n in name_struct[1:]:
m += [frozenset(x) for x in n]
G = []
for x in m:
for y in m:
if x < y:
G.append((' '.join(x), ' '.join(y)))
N = []
for x in m:
N.append(' '.join(x))
Gg = nx.Graph()
Gg.add_nodes_from(N)
Gg.add_edges_from(G)
return Gg
def aggregate_urns(urnlist):
"""Sum up word frequencies across urns"""
if isinstance(urnlist[0], list):
urnlist = [u[0] for u in urnlist]
r = requests.post("https://api.nb.no/ngram/book_aggregates", json={'urns':urnlist})
return r.json()
# Norweigan word bank
def word_variant(word, form):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/variant_form", params={'word':word, 'form':form})
return r.json()
def word_paradigm(word):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/paradigm", params = {'word': word})
return r.json()
def word_form(word):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/word_form", params = {'word': word})
return r.json()
def word_lemma(word):
""" Find lemma form for a given word form """
r = requests.get("https://api.nb.no/ngram/word_lemma", params = {'word': word})
return r.json()
def word_freq(urn, words):
""" Find frequency of words within urn """
params = {'urn':urn, 'words':words}
r = requests.post("https://api.nb.no/ngram/freq", json=params)
return dict(r.json())
def tot_freq(words):
""" Find total frequency of words """
params = {'words':words}
r = requests.post("https://api.nb.no/ngram/word_frequencies", json=params)
return dict(r.json())
def book_count(urns):
params = {'urns':urns}
r = requests.post("https://api.nb.no/ngram/book_count", json=params)
return dict(r.json())
def sttr(urn, chunk=5000):
r = requests.get("https://api.nb.no/ngram/sttr", json = {'urn':urn, 'chunk':chunk})
return r.json()
def totals(top=200):
r = requests.get("https://api.nb.no/ngram/totals", json={'top':top})
return dict(r.json())
def navn(urn):
if type(urn) is list:
urn = urn[0]
r = requests.get('https://api.nb.no/ngram/tingnavn', json={'urn':urn})
return dict(r.json())
def digibokurn_from_text(T):
"""Return URNs as 13 digits (any sequence of 13 digits is counted as an URN)"""
return re.findall("(?<=digibok_)[0-9]{13}", T)
def urn_from_text(T):
"""Return URNs as 13 digits (any sequence of 13 digits is counted as an URN)"""
return re.findall("[0-9]{13}", T)
def metadata(urn=None):
urns = pure_urn(urn)
#print(urns)
r = requests.post("https://api.nb.no/ngram/meta", json={'urn':urns})
return r.json()
def pure_urn(data):
"""Convert URN-lists with extra data into list of serial numbers.
Args:
data: May be a list of URNs, a list of lists with URNs as their
initial element, or a string of raw texts containing URNs
Any pandas dataframe or series. Urns must be in the first column of dataframe.
Returns:
List[str]: A list of URNs. Empty list if input is on the wrong
format or contains no URNs
"""
korpus_def = []
if isinstance(data, list):
if not data: # Empty list
korpus_def = []
if isinstance(data[0], list): # List of lists
try:
korpus_def = [str(x[0]) for x in data]
except IndexError:
korpus_def = []
else: # Assume data is already a list of URNs
korpus_def = [str(int(x)) for x in data]
elif isinstance(data, str):
korpus_def = [str(x) for x in urn_from_text(data)]
elif isinstance(data, (int, np.integer)):
korpus_def = [str(data)]
elif isinstance(data, pd.DataFrame):
col = data.columns[0]
urns = pd.to_numeric(data[col])
korpus_def = [str(int(x)) for x in urns.dropna()]
elif isinstance(data, pd.Series):
korpus_def = [str(int(x)) for x in data.dropna()]
return korpus_def
#### N-Grams from fulltext updated
def unigram(word, period=(1950, 2020), media = 'bok', ddk=None, topic=None, gender=None, publisher=None, lang=None, trans=None):
r = requests.get("https://api.nb.no/ngram/unigrams", params={
'word':word,
'ddk':ddk,
'topic':topic,
'gender':gender,
'publisher':publisher,
'lang':lang,
'trans':trans,
'period0':period[0],
'period1':period[1],
'media':media
})
return frame(dict(r.json()))
def bigram(first,second, period=(1950, 2020), media = 'bok', ddk=None, topic=None, gender=None, publisher=None, lang=None, trans=None):
r = requests.get("https://api.nb.no/ngram/bigrams", params={
'first':first,
'second':second,
'ddk':ddk,
'topic':topic,
'gender':gender,
'publisher':publisher,
'lang':lang,
'trans':trans,
'period0':period[0],
'period1':period[1],
'media':media
})
return frame(dict(r.json()))
def book_counts(period=(1800, 2050)):
r = requests.get("https://api.nb.no/ngram/book_counts", params={
'period0':period[0],
'period1':period[1],
})
return frame(dict(r.json()))
####
def difference(first, second, rf, rs, years=(1980, 2000),smooth=1, corpus='bok'):
"""Compute difference of difference (first/second)/(rf/rs)"""
try:
a_first = nb_ngram(first, years=years, smooth=smooth, corpus=corpus)
a_second = nb_ngram(second, years=years, smooth=smooth, corpus=corpus)
a = a_first.join(a_second)
b_first = nb_ngram(rf, years=years, smooth=smooth, corpus=corpus)
b_second = nb_ngram(rs, years=years, smooth=smooth, corpus=corpus)
if rf == rs:
b_second.columns = [rs + '2']
b = b_first.join(b_second)
s_a = a.mean()
s_b = b.mean()
f1 = s_a[a.columns[0]]/s_a[a.columns[1]]
f2 = s_b[b.columns[0]]/s_b[b.columns[1]]
res = f1/f2
except:
res = 'Mangler noen data - har bare for: ' + ', '.join([x for x in a.columns.append(b.columns)])
return res
def df_combine(array_df):
"""Combine one columns dataframes"""
import pandas as pd
cols = []
for i in range(len(a)):
#print(i)
if array_df[i].columns[0] in cols:
array_df[i].columns = [array_df[i].columns[0] + '_' + str(i)]
cols.append(array_df[i].columns[0])
return pd.concat(a, axis=1, sort=True)
def col_agg(df, col='sum'):
c = df.sum(axis=0)
c = pd.DataFrame(c)
c.columns = [col]
return c
def row_agg(df, col='sum'):
c = df.sum(axis=1)
c = pd.DataFrame(c)
c.columns = [col]
return c
def get_freq(urn, top=50, cutoff=3):
"""Get frequency list for urn"""
if isinstance(urn, list):
urn = urn[0]
r = requests.get("https://api.nb.no/ngram/urnfreq", json={'urn':urn, 'top':top, 'cutoff':cutoff})
return Counter(dict(r.json()))
####=============== GET URNS ==================##########
def book_corpus(words = None, author = None,
title = None, subtitle = None, ddk = None, subject = None,
period=(1100, 2020),
gender=None,
lang = None,
trans= None,
limit=20 ):
return frame(book_urn(words, author, title, subtitle, ddk, subject, period, gender, lang, trans, limit),
"urn author title year".split())
def book_urn(words = None, author = None,
title = None, subtitle = None, ddk = None, subject = None,
period=(1100, 2020),
gender=None,
lang = None,
trans= None,
limit=20 ):
"""Get URNs for books with metadata"""
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'period'}
query['year'] = period[0]
query['next'] = period[1] - period[0]
return get_urn(query)
def unique_urns(korpus, newest=True):
author_title = {(c[1],c[2]) for c in korpus}
corpus = {(c[0], c[1]):[d for d in korpus if c[0] == d[1] and c[1]==d[2]] for c in author_title }
for c in corpus:
corpus[c].sort(key=lambda c: c[3])
if newest == True:
res = [corpus[c][-1] for c in corpus]
else:
res = [corpus[c][0] for c in corpus]
return res
def refine_book_urn(urns = None, words = None, author = None,
title = None, ddk = None, subject = None, period=(1100, 2020), gender=None, lang = None, trans= None, limit=20 ):
"""Refine URNs for books with metadata"""
# if empty urns nothing to refine
if urns is None or urns == []:
return []
# check if urns is a metadata list, and pick out first elements if that is the case
if isinstance(urns[0], list):
urns = [x[0] for x in urns]
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'period' and i != 'urns'}
query['year'] = period[0]
query['next'] = period[1] - period[0]
#print(query)
return refine_urn(urns, query)
def best_book_urn(word = None, author = None,
title = None, ddk = None, subject = None, period=(1100, 2020), gender=None, lang = None, trans= None, limit=20 ):
"""Get URNs for books with metadata"""
if word is None:
return []
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'period' and i != 'word'}
query['year'] = period[0]
query['next'] = period[1] - period[0]
return get_best_urn(word, query)
def get_urn(metadata=None):
"""Get urns from metadata"""
if metadata is None:
metadata = {}
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 100
if not 'year' in metadata:
metadata['year'] = 1900
r = requests.get('https://api.nb.no/ngram/urn', json=metadata)
return r.json()
def refine_urn(urns, metadata=None):
"""Refine a list urns using extra information"""
if metadata is None:
metadata = {}
metadata['urns'] = urns
if not ('words' in metadata):
metadata['words'] = []
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 520
if not 'year' in metadata:
metadata['year'] = 1500
r = requests.post('https://api.nb.no/ngram/refineurn', json=metadata)
return r.json()
def get_best_urn(word, metadata=None):
"""Get the best urns from metadata containing a specific word"""
metadata['word'] = word
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 600
if not 'year' in metadata:
metadata['year'] = 1500
r = requests.get('https://api.nb.no/ngram/best_urn', json=metadata)
return r.json()
def get_papers(top=5, cutoff=5, navn='%', yearfrom=1800, yearto=2020, samplesize=100):
"""Get newspapers"""
div = lambda x, y: (int(x/y), x % y)
chunks = 20
# split samplesize into chunks, go through the chunks and then the remainder
(first, second) = div(samplesize, chunks)
r = []
# collect chunkwise
for i in range(first):
r += requests.get("https://api.nb.no/ngram/avisfreq", json={'navn':navn, 'top':top, 'cutoff':cutoff,
'yearfrom':yearfrom, 'yearto':yearto,'samplesize':chunks}
).json()
# collect the remainder
r += requests.get("https://api.nb.no/ngram/avisfreq", json={'navn':navn, 'top':top, 'cutoff':cutoff,
'yearfrom':yearfrom, 'yearto':yearto,'samplesize':second}
).json()
return [dict(x) for x in r]
def urn_coll(word, urns=[], after=5, before=5, limit=1000):
"""Find collocations for word in a set of book URNs. Only books at the moment"""
if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element
urns = [u[0] for u in urns]
r = requests.post("https://api.nb.no/ngram/urncoll", json={'word':word, 'urns':urns,
'after':after, 'before':before, 'limit':limit})
res = pd.DataFrame.from_dict(r.json(), orient='index')
if not res.empty:
res = res.sort_values(by=res.columns[0], ascending = False)
return res
def urn_coll_words(words, urns=None, after=5, before=5, limit=1000):
"""Find collocations for a group of words within a set of books given by a list of URNs. Only books at the moment"""
coll = pd.DataFrame()
if urns != None:
if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element
urns = [u[0] for u in urns]
colls = Counter()
if isinstance(words, str):
words = words.split()
res = Counter()
for word in words:
try:
res += Counter(
requests.post(
"https://api.nb.no/ngram/urncoll",
json={
'word':word,
'urns':urns,
'after':after,
'before':before,
'limit':limit}
).json()
)
except:
True
coll = pd.DataFrame.from_dict(res, orient='index')
if not coll.empty:
coll = coll.sort_values(by=coll.columns[0], ascending = False)
return coll
def get_aggregated_corpus(urns, top=0, cutoff=0):
res = Counter()
if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element
urns = [u[0] for u in urns]
for u in urns:
#print(u)
res += get_freq(u, top = top, cutoff = cutoff)
return pd.DataFrame.from_dict(res, orient='index').sort_values(by=0, ascending = False)
def compare_word_bags(bag_of_words, another_bag_of_words, first_freq = 0, another_freq = 1, top=100, first_col = 0, another_col= 0):
"""Compare two columns taken from two or one frame. Parameters x_freq are frequency limits used to cut down candidate words
from the bag of words. Compare along the columns where first_col and another_col are column numbers. Typical situation is that
bag_of_words is a one column frame and another_bag_of_words is another one column frame. When the columns are all from one frame,
just change column numbers to match the columns"""
diff = bag_of_words[bag_of_words > first_freq][bag_of_words.columns[first_col]]/another_bag_of_words[another_bag_of_words > another_freq][another_bag_of_words.columns[another_col]]
return frame(diff, 'diff').sort_values(by='diff', ascending=False)[:top]
def collocation(
word,
yearfrom=2010,
yearto=2018,
before=3,
after=3,
limit=1000,
corpus='avis',
lang='nob',
title='%',
ddk='%',
subtitle='%'):
"""Defined collects frequencies for a given word"""
data = requests.get(
"https://api.nb.no/ngram/collocation",
params={
'word':word,
'corpus':corpus,
'yearfrom':yearfrom,
'before':before,
'after':after,
'limit':limit,
'yearto':yearto,
'title':title,
'ddk':ddk,
'subtitle':subtitle}).json()
return pd.DataFrame.from_dict(data['freq'], orient='index')
def collocation_data(words, yearfrom = 2000, yearto = 2005, limit = 1000, before = 5, after = 5, title = '%', corpus='bok'):
"""Collocation for a set of words sum up all the collocations words is a list of words or a blank separated string of words"""
import sys
a = dict()
if isinstance(words, str):
words = words.split()
for word in words:
print(word)
try:
a[word] = collocation(
word,
yearfrom = yearfrom, yearto = yearto, limit = limit,
corpus = corpus, before = before,
after = after, title = title
)
a[word].columns = [word]
except:
print(word, ' feilsituasjon', sys.exc_info())
result = pd.DataFrame()
for w in a:
result = result.join(a[w], how='outer')
return pd.DataFrame(result.sum(axis=1)).sort_values(by=0, ascending=False)
class CollocationCorpus:
from random import sample
def __init__(self, corpus = None, name='', maximum_texts = 500):
urns = pure_urn(corpus)
if len(urns) > maximum_texts:
selection = random(urns, maximum_texts)
else:
selection = urns
self.corpus_def = selection
self.corpus = get_aggregated_corpus(self.corpus_def, top=0, cutoff=0)
def summary(self, head=10):
info = {
'corpus_definition':self.corpus[:head],
'number_of_words':len(self.corpus)
}
return info
def collocation_old(word, yearfrom=2010, yearto=2018, before=3, after=3, limit=1000, corpus='avis'):
data = requests.get(
"https://api.nb.no/ngram/collocation",
params={
'word':word,
'corpus':corpus,
'yearfrom':yearfrom,
'before':before,
'after':after,
'limit':limit,
'yearto':yearto}).json()
return pd.DataFrame.from_dict(data['freq'], orient='index')
def heatmap(df, color='green'):
return df.fillna(0).style.background_gradient(cmap=sns.light_palette(color, as_cmap=True))
def get_corpus_text(urns, top = 0, cutoff=0):
k = dict()
if isinstance(urns, list):
# a list of urns, or a korpus with urns as first element
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
else:
# assume it is a single urn, text or number
urns = [urns]
for u in urns:
#print(u)
k[u] = get_freq(u, top = top, cutoff = cutoff)
df = pd.DataFrame(k)
res = df.sort_values(by=df.columns[0], ascending=False)
return res
def normalize_corpus_dataframe(df):
colsums = df.sum()
for x in colsums.index:
#print(x)
df[x] = df[x].fillna(0)/colsums[x]
return True
def show_korpus(korpus, start=0, size=4, vstart=0, vsize=20, sortby = ''):
"""Show corpus as a panda dataframe
start = 0 indicates which dokument to show first, dataframe is sorted according to this
size = 4 how many documents (or columns) are shown
top = 20 how many words (or rows) are shown"""
if sortby != '':
val = sortby
else:
val = korpus.columns[start]
return korpus[korpus.columns[start:start+size]].sort_values(by=val, ascending=False)[vstart:vstart + vsize]
def aggregate(korpus):
"""Make an aggregated sum of all documents across the corpus, here we use average"""
return pd.DataFrame(korpus.fillna(0).mean(axis=1))
def convert_list_of_freqs_to_dataframe(referanse):
"""The function get_papers() returns a list of frequencies - convert it"""
res = []
for x in referanse:
res.append( dict(x))
result = pd.DataFrame(res).transpose()
normalize_corpus_dataframe(result)
return result
def get_corpus(top=0, cutoff=0, navn='%', corpus='avis', yearfrom=1800, yearto=2020, samplesize=10):
if corpus == 'avis':
result = get_papers(top=top, cutoff=cutoff, navn=navn, yearfrom=yearfrom, yearto=yearto, samplesize=samplesize)
res = convert_list_of_freqs_to_dataframe(result)
else:
urns = get_urn({'author':navn, 'year':yearfrom, 'neste':yearto-yearfrom, 'limit':samplesize})
res = get_corpus_text([x[0] for x in urns], top=top, cutoff=cutoff)
return res
class Cluster:
def __init__(self, word = '', filename = '', period = (1950,1960) , before = 5, after = 5, corpus='avis', reference = 200,
word_samples=1000):
if word != '':
self.collocates = collocation(word, yearfrom=period[0], yearto = period[1], before=before, after=after,
corpus=corpus, limit=word_samples)
self.collocates.columns = [word]
if type(reference) is pd.core.frame.DataFrame:
reference = reference
elif type(reference) is int:
reference = get_corpus(yearfrom=period[0], yearto=period[1], corpus=corpus, samplesize=reference)
else:
reference = get_corpus(yearfrom=period[0], yearto=period[1], corpus=corpus, samplesize=int(reference))
self.reference = aggregate(reference)
self.reference.columns = ['reference_corpus']
self.word = word
self.period = period
self.corpus = corpus
else:
if filename != '':
self.load(filename)
def cluster_set(self, exponent=1.1, top = 200, aslist=True):
combo_corp = self.reference.join(self.collocates, how='outer')
normalize_corpus_dataframe(combo_corp)
korpus = compute_assoc(combo_corp, self.word, exponent)
korpus.columns = [self.word]
if top <= 0:
res = korpus.sort_values(by=self.word, ascending=False)
else:
res = korpus.sort_values(by=self.word, ascending=False).iloc[:top]
if aslist == True:
res = HTML(', '.join(list(res.index)))
return res
def add_reference(self, number=20):
ref = get_corpus(yearfrom=self.period[0], yearto=self.period[1], samplesize=number)
ref = aggregate(ref)
ref.columns = ['add_ref']
normalize_corpus_dataframe(ref)
self.reference = aggregate(self.reference.join(ref, how='outer'))
return True
def save(self, filename=''):
if filename == '':
filename = "{w}_{p}-{q}.json".format(w=self.word,p=self.period[0], q = self.period[1])
model = {
'word':self.word,
'period':self.period,
'reference':self.reference.to_dict(),
'collocates':self.collocates.to_dict(),
'corpus':self.corpus
}
with open(filename, 'w', encoding = 'utf-8') as outfile:
print('lagrer til:', filename)
outfile.write(json.dumps(model))
return True
def load(self, filename):
with open(filename, 'r') as infile:
try:
model = json.loads(infile.read())
#print(model['word'])
self.word = model['word']
self.period = model['period']
self.corpus = model['corpus']
self.reference = pd.DataFrame(model['reference'])
self.collocates = pd.DataFrame(model['collocates'])
except:
print('noe gikk galt')
return True
def search_words(self, words, exponent=1.1):
if type(words) is str:
words = [w.strip() for w in words.split()]
df = self.cluster_set(exponent=exponent, top=0, aslist=False)
sub= [w for w in words if w in df.index]
res = df.transpose()[sub].transpose().sort_values(by=df.columns[0], ascending=False)
return res
def wildcardsearch(params=None):
if params is None:
params = {'word': '', 'freq_lim': 50, 'limit': 50, 'factor': 2}
res = requests.get('https://api.nb.no/ngram/wildcards', params=params)
if res.status_code == 200:
result = res.json()
else:
result = {'status':'feil'}
resultat = pd.DataFrame.from_dict(result, orient='index')
if not(resultat.empty):
resultat.columns = [params['word']]
return resultat
def sorted_wildcardsearch(params):
res = wildcardsearch(params)
if not res.empty:
res = res.sort_values(by=params['word'], ascending=False)
return res
def make_newspaper_network(key, wordbag, titel='%', yearfrom='1980', yearto='1990', limit=500):
if type(wordbag) is str:
wordbag = wordbag.split()
r = requests.post("https://api.nb.no/ngram/avisgraph", json={
'key':key,
'words':wordbag,
'yearto':yearto,
'yearfrom':yearfrom,
'limit':limit})
G = nx.Graph()
if r.status_code == 200:
G.add_weighted_edges_from([(x,y,z) for (x,y,z) in r.json() if z > 0 and x != y])
else:
print(r.text)
return G
def make_network(urn, wordbag, cutoff=0):
if type(urn) is list:
urn = urn[0]
if type(wordbag) is str:
wordbag = wordbag.split()
G = make_network_graph(urn, wordbag, cutoff)
return G
def make_network_graph(urn, wordbag, cutoff=0):
r = requests.post("https://api.nb.no/ngram/graph", json={'urn':urn, 'words':wordbag})
G = nx.Graph()
G.add_weighted_edges_from([(x,y,z) for (x,y,z) in r.json() if z > cutoff and x != y])
return G
def make_network_name_graph(urn, tokens, tokenmap=None, cutoff=2):
if isinstance(urn, list):
urn = urn[0]
# tokens should be a list of list of tokens. If it is list of dicts pull out the keys (= tokens)
if isinstance(tokens[0], dict):
tokens = [list(x.keys()) for x in tokens]
r = requests.post("https://api.nb.no/ngram/word_graph", json={'urn':urn, 'tokens':tokens, 'tokenmap':tokenmap})
#print(r.text)
G = nx.Graph()
G.add_weighted_edges_from([(x,y,z) for (x,y,z) in r.json() if z > cutoff and x != y])
return G
def token_convert_back(tokens, sep='_'):
""" convert a list of tokens to string representation"""
res = [tokens[0]]
for y in tokens:
res.append([tuple(x.split(sep)) for x in y])
l = len(res)
for x in range(1, 4-l):
res.append([])
return res
def token_convert(tokens, sep='_'):
""" convert back to tuples """
tokens = [list(x.keys()) for x in tokens]
tokens = [[(x,) for x in tokens[0]], tokens[1], tokens[2], tokens[3]]
conversion = []
for x in tokens:
conversion.append([sep.join(t) for t in x])
return conversion
def token_map_to_tuples(tokens_as_strings, sep='_', arrow='==>'):
tuples = []
for x in tokens_as_strings:
token = x.split(arrow)[0].strip()
mapsto = x.split(arrow)[1].strip()
tuples.append((tuple(token.split(sep)), tuple(mapsto.split(sep))))
#tuples = [(tuple(x.split(arrow).strip()[0].split(sep)), tuple(x.split(arrow)[1].strip().split(sep))) for x in tokens_as_strings]
return tuples
def token_map(tokens, strings=False, sep='_', arrow= '==>'):
""" tokens as from nb.names()"""
if isinstance(tokens[0], dict):
# get the keys(), otherwise it is already just a list of tokens up to length 4
tokens = [list(x.keys()) for x in tokens]
# convert tokens to tuples and put them all in one list
tokens = [(x,) for x in tokens[0]] + tokens[1] + tokens[2] + tokens[3]
tm = []
#print(tokens)
for token in tokens:
if isinstance(token, str):
trep = (token,)
elif isinstance(token, list):
trep = tuple(token)
token = tuple(token)
else:
trep = token
n = len(trep)
#print(trep)
if trep[-1].endswith('s'):
cp = list(trep[:n-1])
cp.append(trep[-1][:-1])
cp = tuple(cp)
#print('copy', cp, trep)
if cp in tokens:
#print(trep, cp)
trep = cp
larger = [ts for ts in tokens if set(ts) >= set(trep)]
#print(trep, ' => ', larger)
larger.sort(key=lambda x: len(x), reverse=True)
tm.append((token,larger[0]))
res = tm
if strings == True:
res = [sep.join(x[0]) + ' ' + arrow + ' ' + sep.join(x[1]) for x in tm]
return res
def draw_graph_centrality(G, h=15, v=10, fontsize=20, k=0.2, arrows=False, font_color='black', threshold=0.01):
node_dict = nx.degree_centrality(G)
subnodes = dict({x:node_dict[x] for x in node_dict if node_dict[x] >= threshold})
x, y = rcParams['figure.figsize']
rcParams['figure.figsize'] = h, v
pos =nx.spring_layout(G, k=k)
ax = plt.subplot()
ax.set_xticks([])
ax.set_yticks([])
G = G.subgraph(subnodes)
nx.draw_networkx_labels(G, pos, font_size=fontsize, font_color=font_color)
nx.draw_networkx_nodes(G, pos, alpha=0.5, nodelist=subnodes.keys(), node_size=[v * 1000 for v in subnodes.values()])
nx.draw_networkx_edges(G, pos, alpha=0.7, arrows=arrows, edge_color='lightblue', width=1)
rcParams['figure.figsize'] = x, y
return True
def combine(clusters):
"""Make new collocation analyses from data in clusters"""
colls = []
collocates = clusters[0].collocates
for c in clusters[1:]:
collocates = collocates.join(c.collocates, rsuffix='-' + str(c.period[0]))
return collocates
def cluster_join(cluster):
clusters = [cluster[i] for i in cluster]
clst = clusters[0].cluster_set(aslist=False)
for c in clusters[1:]:
clst = clst.join(c.cluster_set(aslist=False), rsuffix = '_'+str(c.period[0]))
return clst
def serie_cluster(word, startår, sluttår, inkrement, before=5, after=5, reference=150, word_samples=500):
tidscluster = dict()
for i in range(startår, sluttår, inkrement):
tidscluster[i] = Cluster(
word,
corpus='avis',
period=(i, i + inkrement - 1),
before=after,
after=after,
reference=reference,
word_samples=word_samples)
print(i, i+inkrement - 1)
return tidscluster
def save_serie_cluster(tidscluster):
for i in tidscluster:
tidscluster[i].save()
return 'OK'
def les_serie_cluster(word, startår, sluttår, inkrement):
tcluster = dict()
for i in range(startår, sluttår, inkrement):
print(i, i+inkrement - 1)
tcluster[i] = Cluster(filename='{w}_{f}-{t}.json'.format(w=word, f=i,t=i+inkrement - 1))
return tcluster
def make_cloud(json_text, top=100, background='white', stretch=lambda x: 2**(10*x), width=500, height=500, font_path=None):
pairs0 = Counter(json_text).most_common(top)
pairs = {x[0]:stretch(x[1]) for x in pairs0}
wc = WordCloud(
font_path=font_path,
background_color=background,
width=width,
#color_func=my_colorfunc,
ranks_only=True,
height=height).generate_from_frequencies(pairs)
return wc
def draw_cloud(sky, width=20, height=20, fil=''):
plt.figure(figsize=(width,height))
plt.imshow(sky, interpolation='bilinear')
figplot = plt.gcf()
if fil != '':
figplot.savefig(fil, format='png')
return
def cloud(pd, column='', top=200, width=1000, height=1000, background='black', file='', stretch=10, font_path=None):
if column == '':
column = pd.columns[0]
data = json.loads(pd[column].to_json())
a_cloud = make_cloud(data, top=top,
background=background, font_path=font_path,
stretch=lambda x: 2**(stretch*x), width=width, height=height)
draw_cloud(a_cloud, fil=file)
return
def make_a_collocation(word, period=(1990, 2000), before=5, after=5, corpus='avis', samplesize=100, limit=2000):
collocates = collocation(word, yearfrom=period[0], yearto=period[1], before=before, after=after,
corpus=corpus, limit=limit)
collocates.columns = [word]
reference = get_corpus(yearfrom=period[0], yearto=period[1], samplesize=samplesize)
ref_agg = aggregate(reference)
ref_agg.columns = ['reference_corpus']
return ref_agg
def compute_assoc(coll_frame, column, exponent=1.1, refcolumn = 'reference_corpus'):
return pd.DataFrame(coll_frame[column]**exponent/coll_frame.mean(axis=1))
class Corpus:
def __init__(self, filename = '', target_urns = None, reference_urns = None, period = (1950,1960), author='%',
title='%', ddk='%', gender='%', subject='%', reference = 100, max_books=100):
params = {
'year':period[0],
'next': period[1]-period[0],
'subject':subject,
'ddk':ddk,
'author':author,
#'gender':gender, ser ikke ut til å virke for get_urn - sjekk opp APIet
'title':title,
'limit':max_books,
'reference':reference
}
self.params = params
self.coll = dict()
self.coll_graph = dict()
if filename == '':
if target_urns != None:
målkorpus_def = target_urns
else:
målkorpus_def = get_urn(params)
#print("Antall bøker i målkorpus ", len(målkorpus_def))
if isinstance(målkorpus_def[0], list):
målkorpus_urn = [str(x[0]) for x in målkorpus_def]
#print(målkorpus_urn)
else:
målkorpus_urn = målkorpus_def
if len(målkorpus_urn) > max_books and max_books > 0:
target_urn = list(numpy.random.choice(målkorpus_urn, max_books))
else:
target_urn = målkorpus_urn
if reference_urns != None:
referansekorpus_def = reference_urns
else:
# select from period, usually used only of target is by metadata
referansekorpus_def = get_urn({'year':period[0], 'next':period[1]-period[0], 'limit':reference})
#print("<NAME> i referanse: ", len(referansekorpus_def))
# referansen skal være distinkt fra målkorpuset
referanse_urn = [str(x[0]) for x in referansekorpus_def]
self.reference_urn = referanse_urn
self.target_urn = target_urn
# make sure there is no overlap between target and reference
#
referanse_urn = list(set(referanse_urn) - set(target_urn))
målkorpus_txt = get_corpus_text(target_urn)
normalize_corpus_dataframe(målkorpus_txt)
if referanse_urn != []:
referanse_txt = get_corpus_text(referanse_urn)
normalize_corpus_dataframe(referanse_txt)
combo = målkorpus_txt.join(referanse_txt)
else:
referanse_txt = målkorpus_txt
combo = målkorpus_txt
self.combo = combo
self.reference = referanse_txt
self.target = målkorpus_txt
self.reference = aggregate(self.reference)
self.reference.columns = ['reference_corpus']
## dokumentfrekvenser
mål_docf = pd.DataFrame(pd.DataFrame(målkorpus_txt/målkorpus_txt).sum(axis=1))
combo_docf = pd.DataFrame(pd.DataFrame(combo/combo).sum(axis=1))
ref_docf = pd.DataFrame(pd.DataFrame(referanse_txt/referanse_txt).sum(axis=1))
### Normaliser dokumentfrekvensene
normalize_corpus_dataframe(mål_docf)
normalize_corpus_dataframe(combo_docf)
normalize_corpus_dataframe(ref_docf)
self.målkorpus_tot = aggregate(målkorpus_txt)
self.combo_tot = aggregate(combo)
self.mål_docf = mål_docf
self.combo_docf = combo_docf
self.lowest = self.combo_tot.sort_values(by=0)[0][0]
else:
self.load(filename)
return
def difference(self, freq_exp=1.1, doc_exp=1.1, top = 200, aslist=True):
res = pd.DataFrame(
(self.målkorpus_tot**freq_exp/self.combo_tot)*(self.mål_docf**doc_exp/self.combo_docf)
)
res.columns = ['diff']
if top > 0:
res = res.sort_values(by=res.columns[0], ascending=False).iloc[:top]
else:
res = res.sort_values(by=res.columns[0], ascending=False)
if aslist == True:
res = HTML(', '.join(list(res.index)))
return res
def save(self, filename):
model = {
'params':self.params,
'target': self.målkorpus_tot.to_json(),
'combo': self.combo_tot.to_json(),
'target_df': self.mål_docf.to_json(),
'combo_df': self.combo_docf.to_json()
}
with open(filename, 'w', encoding = 'utf-8') as outfile:
outfile.write(json.dumps(model))
return True
def load(self, filename):
with open(filename, 'r') as infile:
try:
model = json.loads(infile.read())
#print(model['word'])
self.params = model['params']
#print(self.params)
self.målkorpus_tot = pd.read_json(model['target'])
#print(self.målkorpus_tot[:10])
self.combo_tot = pd.read_json(model['combo'])
self.mål_docf = pd.read_json(model['target_df'])
self.combo_docf = pd.read_json(model['combo_df'])
except:
print('noe gikk galt')
return True
def collocations(self, word, after=5, before=5, limit=1000):
"""Find collocations for word in a set of book URNs. Only books at the moment"""
r = requests.post(
"https://api.nb.no/ngram/urncoll",
json={
'word': word,
'urns': self.target_urn,
'after': after,
'before': before,
'limit': limit
}
)
temp = pd.DataFrame.from_dict(r.json(), orient='index')
normalize_corpus_dataframe(temp)
self.coll[word] = temp.sort_values(by = temp.columns[0], ascending = False)
return True
def conc(self, word, before=8, after=8, size=10, combo=0):
if combo == 0:
urns = self.target_urn + self.reference_urn
elif combo == 1:
urns = self.target_urn
else:
urns = self.reference_urn
if len(urns) > 300:
urns = list(numpy.random.choice(urns, 300, replace=False))
return get_urnkonk(word, {'urns':urns, 'before':before, 'after':after, 'limit':size})
def sort_collocations(self, word, comparison = None, exp = 1.0, above = None):
if comparison == None:
comparison = self.combo_tot[0]
try:
res = pd.DataFrame(self.coll[word][0]**exp/comparison)
except KeyError:
print('Constructing a collocation for {w} with default parameters.'.format(w=word))
self.collocations(word)
res = pd.DataFrame(self.coll[word][0]**exp/comparison)
if above == None:
above = self.lowest
res = res[self.combo_tot > above]
return res.sort_values(by = 0, ascending = False)
def search_collocations(self, word, words, comparison = None, exp = 1.0):
if comparison == None:
comparison = self.combo_tot[0]
try:
res = pd.DataFrame(self.coll[word][0]**exp/comparison)
except KeyError:
print('Constructing a collocation for {w} with default parameters.'.format(w=word))
self.collocations(word)
res = pd.DataFrame(self.coll[word][0]**exp/comparison)
search_items = list(set(res.index) & set(words))
return res.transpose()[search_items].transpose().sort_values(by = 0, ascending = False)
def summary(self, head=10):
info = {
'parameters':self.params,
'target_urn':self.target_urn[:head],
'reference urn':self.reference_urn[:head],
}
return info
def search_words(self, words, freq_exp=1.1, doc_exp=1.1):
if type(words) is str:
words = [w.strip() for w in words.split()]
df = self.difference(freq_exp = freq_exp, doc_exp=doc_exp,top=0, aslist=False)
sub = [w for w in words if w in df.index]
res = df.transpose()[sub].transpose().sort_values(by=df.columns[0], ascending=False)
return res
def make_collocation_graph(self, target_word, top = 15, before = 4, after = 4, limit = 1000, exp=1):
"""Make a cascaded network of collocations"""
self.collocations(target_word, before=before, after=after, limit=limit)
coll = self.sort_collocations(target_word, exp = exp)
target_graf = dict()
edges = []
for word in coll[:top].index:
edges.append((target_word, word))
if word.isalpha():
self.collocations(word, before=before, after=after, limit=limit)
for w in self.sort_collocations(word, exp = exp)[:top].index:
if w.isalpha():
edges.append((word, w))
target_graph = nx.Graph()
target_graph.add_edges_from(edges)
self.coll_graph[target_word] = target_graph
return target_graph
def vekstdiagram(urn, params=None):
if params is None:
params = {}
# if urn is the value of get_urn() it is a list
# otherwise it just passes
if type(urn) is list:
urn = urn[0]
para = params
para['urn']= urn
r = requests.post('https://api.nb.no/ngram/vekstdiagram', json = para)
return pd.DataFrame(r.json())
def plot_book_wordbags(urn, wordbags, window=5000, pr = 100):
"""Generate a diagram of wordbags in book """
return plot_sammen_vekst(urn, wordbags, window=window, pr=pr)
def plot_sammen_vekst(urn, ordlister, window=5000, pr = 100):
"""Plott alle seriene sammen"""
rammer = []
c = dict()
if isinstance(ordlister, list):
if isinstance(ordlister[0], list):
for l in ordlister:
if l != []:
c[l[0]] = l
else:
c[ordlister[0]] = ordlister
else:
c = ordlister
for key in c:
vekst = vekstdiagram(urn, params = {'words': c[key], 'window':window, 'pr': pr} )
vekst.columns = [key]
rammer.append(vekst)
return | pd.concat(rammer) | pandas.concat |
import matplotlib.pyplot as plt
from cycler import cycler
from itertools import product
#
import seaborn as sns
import pandas as pd
import numpy as np
import vg
import pdb
import os
import dill as pickle
from scipy import constants, ndimage
##
from electropy.charge import Charge
from electropy.volume import *
with open('./paddle_solution.pickle', 'rb') as f:
paddleSolution = pickle.load(f)
solnPerElectrode = paddleSolution['solnPerElectrode']
activatingKernel = paddleSolution['activatingKernel']
dummySoln = solnPerElectrode[list(solnPerElectrode.keys())[0]]
def solnForWaveform(eesWvf):
data = potentialSolution(
[],
x_range=[dummySoln.xi[0], dummySoln.xi[-1]],
y_range=[dummySoln.yi[0], dummySoln.yi[-1]],
z_range=[0, dummySoln.zi[-1]],
h=dummySoln.h, verbose=1)
for rowIdx in eesWvf.index:
elecName = rowIdx[0]
if eesWvf[rowIdx] != 0:
data.hessianPhi += eesWvf[rowIdx] * solnPerElectrode[elecName].hessianPhi
data.phi += eesWvf[rowIdx] * solnPerElectrode[elecName].phi
return data
inputPath = 'E:\\Neural Recordings\\scratch\\202009231400-Peep\\default\\stim\\_emg_XS_export.h5'
with | pd.HDFStore(inputPath, 'r') | pandas.HDFStore |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 18:54:29 2019
@author: suvodeepmajumder
"""
import sys
sys.path.append("..")
from pygit2 import clone_repository
from pygit2 import GIT_SORT_TOPOLOGICAL, GIT_SORT_REVERSE,GIT_MERGE_ANALYSIS_UP_TO_DATE,GIT_MERGE_ANALYSIS_FASTFORWARD,GIT_MERGE_ANALYSIS_NORMAL,GIT_RESET_HARD
from pygit2 import Repository
import shutil,os
import pygit2
from git_log import git2repo
import os
import re
import shlex
import numpy as np
import pandas as pd
from glob2 import glob, iglob
import subprocess as sp
import understand as und
from pathlib import Path
from pdb import set_trace
import sys
from collections import defaultdict
from utils.utils import utils
import platform
from os.path import dirname as up
from multiprocessing import Pool, cpu_count
import threading
from multiprocessing import Queue
from threading import Thread
import random
import string
#from main.utils.utils.utils import printProgressBar
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
#print(type(self._target))
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
class MetricsGetter(object):
"""
Generate class, file, function, object oriented metrics for a project.
Parameters
----------
sources_path: str or pathlib.PosixPath
Notes
-----
The class is designed to run in conjunction with a context manager.
"""
def __init__(self,repo_url,repo_name,repo_lang,code_path):
self.repo_url = repo_url
self.repo_name = repo_name
self.repo_lang = repo_lang
#self.repo_obj = git2repo.git2repo(self.repo_url,self.repo_name)
self.root_dir = code_path
print("root:",self.root_dir)
if platform.system() == 'Darwin' or platform.system() == 'Linux':
self.repo_path = self.root_dir+ '/commit_guru/ingester/CASRepos/git/' + self.repo_name
self.file_path = up(self.root_dir) + '/data/commit_guru/' + self.repo_name + '.csv'
#self.committed_file = up(os.getcwd()) + '/data/committed_files/' + self.repo_name + '_committed_file.pkl'
self.und_file = up(self.root_dir) + '/data/understand_files/' + self.repo_name + '_understand.csv'
else:
self.repo_path = up(os.getcwd()) + '\\temp_repo\\' + self.repo_name
self.file_path = up(os.getcwd()) + '\\data\\commit_guru\\' + self.repo_name + '.pkl'
#self.committed_file = up(os.getcwd()) + '\\data\\committed_files\\' + self.repo_name + '_committed_file.pkl'
self.buggy_clean_pairs = self.read_commits()
#self.buggy_clean_pairs = self.buggy_clean_pairs[0:5]
# Reference current directory, so we can go back after we are done.
self.cwd = Path('/tmp/smajumd3/')
#self.repo = self.clone_repo()
# Generate path to store udb files
#self.udb_path = self.cwd.joinpath(".temp", "udb")
self.udb_path = self.cwd.joinpath("temp", "udb/"+self.repo_name)
# Create a folder to hold the udb files
if not self.udb_path.is_dir():
os.makedirs(self.udb_path)
def read_commits(self):
df = pd.read_csv(self.file_path)
# print(df)
df = df[df['contains_bug'] == True]
df = df.reset_index('drop' == True)
self.commits = []
commits = []
for i in range(df.shape[0]):
try:
committed_files = []
if df.loc[i,'parent_hashes'] == None:
continue
bug_fixing_commit = df.loc[i,'parent_hashes']
bug_existing_commit = df.loc[i,'commit_hash']
files_changed = df.loc[i,'fileschanged']
#print(files_changed)
files_changed = files_changed.split(',')
files_changed = list(filter(('CAS_DELIMITER').__ne__, files_changed))
self.commits.append(bug_existing_commit)
#language = "Python"
language = self.repo_lang
if bug_fixing_commit == None:
print(df.iloc[i,0])
continue
for row in files_changed:
if language == "Java" or language == "C++" or language == "C":
if len(row.split('src/')) == 1:
continue
committed_files.append(row.split('src/')[1].replace('/','.').rsplit('.',1)[0])
elif language == "Python" :
committed_files.append(row['file_path'].replace('/', '.').rsplit('.', 1)[0])
elif language == "Fortran" :
committed_files.append(row['file_path'].replace('/', '.').rsplit('.', 1)[0])
else:
print("Language under construction")
commits.append([bug_existing_commit,bug_fixing_commit,committed_files])
except Exception as e:
print(e)
continue
return commits
def get_defective_pair_metrics(self):
"""
Use the understand tool's API to generate metrics
Notes
-----
+ For every clean and buggy pairs of hashed, do the following:
1. Get the diff of the files changes
2. Checkout the snapshot at the buggy commit
3. Compute the metrics of the files in that commit.
4. Next, checkout the snapshot at the clean commit.
5. Compute the metrics of the files in that commit.
"""
metrics_dataframe = pd.DataFrame()
print(len(self.buggy_clean_pairs))
for i in range(len(self.buggy_clean_pairs)):
try:
buggy_hash = self.buggy_clean_pairs[i][0]
clean_hash = self.buggy_clean_pairs[i][1]
files_changed = self.buggy_clean_pairs[i][2]
# if len((files_changed)) == 0:
# continue
print(i,self.repo_name,(buggy_hash, clean_hash))
# Go the the cloned project path
buggy_und_file = self.udb_path.joinpath("{}_{}.udb".format(self.repo_name+buggy_hash, "buggy"))
#print(self.buggy_und_file)
db_buggy = und.open(str(buggy_und_file))
#continue
print((db_buggy.metrics()))
metrics = db_buggy.metric(db_buggy.metrics())
print(metrics)
break
#print("Files",set(files_changed))
for file in db_buggy.ents("Class"):
# print directory name
# print(file,file.longname(), file.kind())
#language = "Python"
language = self.repo_lang
if language == "Java" or language == "C++" or language == "C":
r = re.compile(str(file.longname()))
newlist = list(filter(r.search, list(set(files_changed))))
elif language == "Python" :
if file.library() == "Standard":
continue
temp_str = file.longname().split(".")[-2]
r = re.compile(str(temp_str))
newlist = list(filter(r.search, list(set(files_changed))))
elif language == "Fortran" :
if file.library() == "Standard":
continue
t3 = file.longname()
t7 = file.refs()
t8 = file.ref()
comp = str(file).split(".")[0]
# print("-------Here is the library : ",file.library())
# r = re.compile(str(file.longname()))
# temp_str = file.longname().split(".")[-2]
r = re.compile(comp)
newlist = list(filter(r.search, list(set(files_changed))))
else:
newlist = []
print("Language under construction")
if len(newlist) > 0:
metrics = file.metric(file.metrics())
print(len(file.metrics()))
metrics["commit_hash"] = buggy_hash
metrics["Name"] = file.longname()
metrics["Bugs"] = 1
metrics_dataframe = metrics_dataframe.append(
pd.Series(metrics), ignore_index=True)
else:
metrics = file.metric(file.metrics())
print(len(file.metrics()))
metrics["commit_hash"] = buggy_hash
metrics["Name"] = file.longname()
metrics["Bugs"] = 0
metrics_dataframe = metrics_dataframe.append(
| pd.Series(metrics) | pandas.Series |
from scipy.sparse import csc_matrix,csr_matrix,coo_matrix
from scipy.sparse.linalg import lsqr
from numpy import linalg as LA
import datetime
import csv
import numpy as np
import os
import matplotlib.pyplot as plt
dir_path = os.path.dirname(os.path.realpath(__file__))
import time
import pandas as pd
from os import listdir
from os.path import isfile, join
from os import walk
########################
### Plotting results ###
########################
# LSQR-CUDA data
outpath = "./output/"
outputspath = "../results/2021109/output"
now = datetime.datetime.now()
now = now.strftime('%Y-%m-%dT%H%M')
inpath = "../results/2021109/input"
inputs = listdir(inpath)
inputs.sort()
mats = [m for m in inputs if ".mat" in m]
vecs = [v for v in inputs if ".vec" in v]
# Sparse plotting
LSQRCUDA = pd.read_csv(outputspath+"/2021-11-09T2101_LSQR-CUDA.csv")
DEVICE = pd.read_csv(outputspath+"/deviceProps.csv")
# python data
scipylsqr = pd.read_csv(outpath + "/2021-11-10T0956_LSQR_python.csv")
# Load and parse LSQR-CUDA data
name = DEVICE['DEVICE_NAME'][0]
implementations = ['Cpp-DENSE','CUDA-DENSE','CUDA-SPARSE','CUBLAS-DENSE','CUSPARSE-SPARSE']
dfs = []
for i in implementations:
temp = LSQRCUDA[LSQRCUDA['IMPLEMENTATION']==i].drop(columns='IMPLEMENTATION').drop(columns='A_COLUMNS').drop(columns='SPARSITY')
temp = temp.rename(columns={"TIME(ms)":i})
temp[i] = temp[i].mul(1/1000)
dfs.append(temp)
BASELINE = scipylsqr[scipylsqr['IMPLEMENTATION']=='scipy-lsqr'].drop(columns='IMPLEMENTATION').drop(columns='A_COLUMNS').drop(columns='SPARSITY')
BASELINE = BASELINE.rename(columns={"TIME(ms)":"scipy-lsqr"})
BASELINE["scipy-lsqr"] = BASELINE["scipy-lsqr"].mul(1/1000)
CPPDENSE = dfs[0]
CUDADENSE = dfs[1]
CUDASPARSE = dfs[2]
CUBLASDENSE = dfs[3]
CUSPARSE = dfs[4]
all = pd.merge(CUBLASDENSE,CUSPARSE,on="A_ROWS")
all = pd.merge(CUDASPARSE,all,on="A_ROWS")
all = pd.merge(CUDADENSE,all,on="A_ROWS")
all = pd.merge(CPPDENSE,all,how='right',on="A_ROWS")
all = pd.merge(BASELINE,all,how='right',on="A_ROWS")
#all.to_csv("../results/"+now+"_RUNTIMES.csv",index=False,float_format='%.5f')
# Sparse implementation plots
sparse = pd.merge(CUSPARSE,CUDASPARSE,on="A_ROWS")
sparse = pd.merge(BASELINE,sparse,on="A_ROWS")
fig = sparse.plot(x='A_ROWS', xlabel="# of Rows in Square Matrix A", ylabel="RUNTIME (s)",title=name+" - Sparse Inputs",grid=True).get_figure()
#fig.savefig("../results/"+now+"_1000-8000_SPARSE-INPUTS.png")
#csvtimes = "../results/"+now+"_SPARSETIMES.csv"
#sparse.to_csv(csvtimes,index=False,float_format='%.8f')
# Sparse speedups
CUDASPARSE_SPEEDUP = (sparse["scipy-lsqr"]/sparse["CUDA-SPARSE"])
CUDASPARSE_SPEEDUP = CUDASPARSE_SPEEDUP.rename("CUDA-SPARSE")
CUSPARSE_SPEEDUP = (sparse["scipy-lsqr"]/sparse["CUSPARSE-SPARSE"])
CUSPARSE_SPEEDUP = CUSPARSE_SPEEDUP.rename("CUSPARSE-SPARSE")
ROWS_SPEEDUP = sparse["A_ROWS"]
ROWS_SPEEDUP = ROWS_SPEEDUP.rename("A_ROWS")
csvsparsespeedups = "../results/"+now+"_SPARSE-SPEEDUPS.csv"
SPEEDUPS = pd.concat([ROWS_SPEEDUP,CUDASPARSE_SPEEDUP,CUSPARSE_SPEEDUP], axis=1)
#SPEEDUPS.to_csv(csvsparsespeedups,index=False,float_format='%.8f')
## Dense implementation plots
dense = pd.merge(CUDADENSE,CUBLASDENSE,on="A_ROWS")
dense = pd.merge(CPPDENSE,dense,on="A_ROWS")
fig = dense.plot(x='A_ROWS', xlabel="# of Rows in Square Matrix A", ylabel="RUNTIME (s)",title=name+" - Dense Inputs",grid=True).get_figure()
#fig.savefig("../results/"+now+"_1000-8000_DENSE-INPUTS.png")
#####################################################
### Calculation of root mean squared error (rmse) ###
#####################################################
# Baseline data
PYTHONOUTS = []
for (dirpath, dirnames, filenames) in walk(outpath):
PYTHONOUTS.extend(filenames)
break
PYTHONOUTS.sort()
PYTHONOUTS = [v for v in PYTHONOUTS if ".vec" in v]
LSQROUTS = []
for (dirpath, dirnames, filenames) in walk(outputspath):
LSQROUTS.extend(filenames)
break
LSQROUTS.sort()
LSQROUTS = [v for v in LSQROUTS if ".vec" in v]
csvrsmepath = "../results/" + now + "_RMSE.csv"
f = open(csvrsmepath, 'w')
writer = csv.writer(f)
writer.writerow(['IMPLEMENTATION','A_ROWS','RMSE'])
f.close()
maxRmse=0
for pyout in PYTHONOUTS:
rows = int(float(pyout.split("_")[0]))
rowsStr = str(rows)
outs = [v for v in LSQROUTS if rowsStr in v]
p = np.loadtxt(outpath+"/"+pyout,dtype=np.double)
for o in outs:
l = np.loadtxt(outputspath+"/"+o,dtype=np.double)
rmse = np.sqrt(np.mean((l-p)**2))
if (rmse>maxRmse):
maxRmse=rmse
# write to csv
implementation = o.split(".")[0].split("_")[3]
rowrsmecsv = [implementation,rowsStr,f'{rmse:.8f}']
with open(csvrsmepath, 'a') as fd:
writer=csv.writer(fd)
writer.writerow(rowrsmecsv)
errors = pd.read_csv(csvrsmepath)
I0 = errors[errors['IMPLEMENTATION']=='Cpp-DENSE'].drop(columns='IMPLEMENTATION')
I0=I0.rename(columns={"RMSE":"Cpp-DENSE"})
I1 = errors[errors['IMPLEMENTATION']=='CUDA-DENSE'].drop(columns='IMPLEMENTATION')
I1=I1.rename(columns={"RMSE":"CUDA-DENSE"})
I2 = errors[errors['IMPLEMENTATION']=='CUDA-SPARSE-80'].drop(columns='IMPLEMENTATION')
I2=I2.rename(columns={"RMSE":"CUDA-SPARSE"})
I3 = errors[errors['IMPLEMENTATION']=='CUBLAS-DENSE'].drop(columns='IMPLEMENTATION')
I3=I3.rename(columns={"RMSE":"CUBLAS-DENSE"})
I4 = errors[errors['IMPLEMENTATION']=='CUSPARSE-SPARSE-80'].drop(columns='IMPLEMENTATION')
I4=I4.rename(columns={"RMSE":"CUSPARSE-SPARSE"})
I = pd.merge(I3,I4,on="A_ROWS")
I = pd.merge(I2,I,on="A_ROWS")
I = pd.merge(I1,I,on="A_ROWS")
I = | pd.merge(I0,I,how='right',on="A_ROWS") | pandas.merge |
import pickle
from os import path, makedirs
import time
import pandas as pd
import numpy as np
def printx(output):
print(output, flush = True)
def save_to_pickle(var, filename='temp', root_path='./'):
'''
Save variable to pickle file defaultly in current dir
'''
#fn = path.join(root_path, filename)
fn = filename
if path.exists(path.split(fn)[0]) == False:
makedirs(path.split(fn)[0])
#printx("saving {} into {}".format(filename, root_path))
printx("saving into {}".format(filename))
f = open(fn, 'wb')
pickle.dump(var, f)
f.close()
return var
def load_from_pickle(filename='temp', root_path='./'):
'''
Load pickle file defaultly from current dir
'''
#printx("loading {} from {}".format(filename, root_path))
#f = open(path.join(root_path, filename), 'rb')
printx("loading {}".format(filename))
f = open(filename, 'rb')
var = pickle.load(f)
f.close()
return var
def readData(file_path1, file_path2):
# Read file and combine
data1 = | pd.read_csv(file_path1, header=None, sep=' ', low_memory=False) | pandas.read_csv |
from requests import get, exceptions
from bs4 import BeautifulSoup
from datetime import datetime
from pandas import DataFrame, read_excel
from time import sleep
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
def get_label(soup):
artist = soup.find("span", attrs={"itemprop": "byArtist"}).a.contents[0].strip()
releaser = soup.find("p", attrs={"id": "band-name-location"}).find("span", attrs={"class": "title"}).contents[0].strip()
label_tag = soup.find("span", attrs={"class": "back-to-label-name"})
if label_tag:
return label_tag.contents[0].strip()
else:
return releaser if artist.lower() != releaser.lower() else None
def get_tags(soup):
tags = []
for tag in soup.findAll("a", attrs={"class": "tag"}):
tags.append(tag.contents[0])
return tags
def get_soup(url):
try:
release_request = get(url)
return BeautifulSoup(release_request.text, "html.parser")
except exceptions.ConnectionError:
sleep(5.0)
return get_soup(url)
def parse_release(url):
soup = get_soup(url)
if soup.find("h2", attrs={"class": "trackTitle"}):
title = soup.find("h2", attrs={"class": "trackTitle"}).contents[0].strip()
artist = soup.find("span", attrs={"itemprop": "byArtist"}).a.contents[0].strip()
releasedate_str = soup.find("meta", attrs={"itemprop": "datePublished"})["content"]
releasedate = datetime(int(releasedate_str[0:4]), int(releasedate_str[4:6]), int(releasedate_str[6:8])).date()
formats_raw = soup.findAll("li", attrs={"class": "buyItem"})
label = get_label(soup)
tags = get_tags(soup)
if len(soup.find("span", attrs={"class": "location"}).contents) > 0:
location = soup.find("span", attrs={"class": "location"}).contents[0].strip()
formats = []
for format_raw in formats_raw:
if format_raw.h3.button:
secondary_text = format_raw.h3.find("div", attrs={"class": "merchtype secondaryText"})
format = secondary_text.contents[0].strip() if secondary_text else format_raw.h3.button.span.contents[0]
formats.append(format)
return {
"title": title,
"artist": artist,
"date": releasedate,
"url": url,
"formats": formats,
"tags": tags,
"location": location,
"label": label
}
url = "https://bandcamp.com/tag/{0}?page={1}&sort_field=date"
with open("cities.txt", "r") as f:
cities = [city.lower() for city in f.read().split("\n")]
start_urls = [url.format(city.lower(), i) for i in range(1, 11, 1) for city in cities]
data = | read_excel("data.xlsx") | pandas.read_excel |
import pandas as pd
import pandas.testing as pdt
import numpy as np
import qiime2
from qiime2.plugin.testing import TestPluginBase
from q2_types.feature_data import DNAFASTAFormat
from genome_sampler.sample_neighbors import (
sample_neighbors, _clusters_from_vsearch_out, _sample_cluster,
_generate_weights)
class TestSubsampleNeighbors(TestPluginBase):
package = 'genome_sampler.tests'
_N_TEST_ITERATIONS = 50
def setUp(self):
super().setUp()
focal_seqs1 = self.get_data_path('focal-seqs-1.fasta')
self.focal_seqs1 = DNAFASTAFormat(focal_seqs1, 'r')
context_seqs1 = self.get_data_path('context-seqs-1.fasta')
self.context_seqs1 = DNAFASTAFormat(context_seqs1, 'r')
context_md1 = self.get_data_path('context-metadata-1.tsv')
self.context_md1 = qiime2.Metadata.load(context_md1)
focal_seqs2 = self.get_data_path('focal-seqs-2.fasta')
self.focal_seqs2 = DNAFASTAFormat(focal_seqs2, 'r')
context_seqs2 = self.get_data_path('context-seqs-2.fasta')
self.context_seqs2 = DNAFASTAFormat(context_seqs2, 'r')
context_md2 = self.get_data_path('context-metadata-2.tsv')
self.context_md2 = qiime2.Metadata.load(context_md2)
def test_sample_neighbors_no_locale(self):
sel = sample_neighbors(self.focal_seqs1,
self.context_seqs1,
percent_id=0.98,
samples_per_cluster=2)
exp_inclusion = pd.Series([True, True, False, False, True, False],
index=['c1', 'c2', 'c3', 'c4', 'c5', 'c6'],
name='inclusion')
exp_metadata = pd.DataFrame(index=['c1', 'c2', 'c3', 'c4', 'c5', 'c6'])
exp_metadata.index.name = 'id'
exp_metadata = qiime2.Metadata(exp_metadata)
| pdt.assert_series_equal(sel.inclusion, exp_inclusion) | pandas.testing.assert_series_equal |
from pythologist.measurements import Measurement
import pandas as pd
import numpy as np
import math, sys
class Cartesian(Measurement):
@staticmethod
def _preprocess_dataframe(cdf,subsets,step_pixels,max_distance_pixels,*args,**kwargs):
def _hex_coords(frame_shape,step_pixels):
halfstep = int(step_pixels/2)
vstep = int(step_pixels*0.85)
coordinates = []
for i,y in enumerate(range(0,frame_shape[0]+step_pixels,vstep)):
#iterate over the x coords
for x in range(halfstep if i%2==1 else 0,frame_shape[1]+step_pixels,step_pixels):
coordinates.append((x,y))
return pd.DataFrame(coordinates,columns=['frame_x','frame_y'])
frames = cdf.groupby(cdf.frame_columns).first()[['frame_shape']]
frames['frame_coords'] = frames['frame_shape'].apply(lambda shape: _hex_coords(shape,step_pixels))
allcoords = []
for i,r in frames.iterrows():
#if 'verbose' in kwargs and kwargs['verbose']: sys.stderr.write("Reading frame\n"+str(r)+"\n\n")
idf = pd.DataFrame([i],columns=frames.index.names)
idf['_key'] = 1
coords = r['frame_coords']
coords['_key'] = 1
coords = idf.merge(coords,on='_key').drop(columns='_key')
coords['frame_shape'] = 0
coords['frame_shape'] = coords['frame_shape'].apply(lambda x: r['frame_shape'])
allcoords.append(coords)
allcoords = pd.concat(allcoords)
allcoords['step_pixels'] = step_pixels
allcoords = allcoords.reset_index(drop=True)
allcoords.index.name = 'coord_id'
### Capture distances
full = []
for frame_id in allcoords['frame_id'].unique():
fcdf = cdf.loc[cdf['frame_id']==frame_id]
fcdf = fcdf.dropna(subset=['phenotype_label'])
primary = fcdf.copy()
phenotypes = cdf.phenotypes
# get the frame's CellDataFrame
if subsets is not None:
phenotypes = []
subs = []
for subset_logic in subsets:
sub = fcdf.subset(subset_logic,update=True)
subs.append(sub)
phenotypes.append(subset_logic.label)
fcdf = pd.concat(subs)
# get the frame's hex coordinates
coords = allcoords.loc[allcoords['frame_id']==frame_id].copy().reset_index()
counts = _get_proximal_points(fcdf,coords,
fcdf.frame_columns,
phenotypes,
max_distance_pixels)
totals = _get_proximal_points(primary,coords,
primary.frame_columns,
cdf.phenotypes,
max_distance_pixels)
totals = totals.groupby(cdf.frame_columns+['coord_id','frame_x','frame_y']).sum()[['count']].rename(columns={'count':'total'}).\
reset_index()
counts = counts.merge(totals,on=cdf.frame_columns+['coord_id','frame_x','frame_y'])
counts['count'] = counts['count'].astype(int)
counts['total'] = counts['total'].astype(int)
counts['fraction'] = counts.apply(lambda x: np.nan if x['total']==0 else x['count']/x['total'],1)
full.append(counts)
full = pd.concat(full).reset_index(drop=True)
full['max_distance_pixels'] = max_distance_pixels
return full
def rgb_dataframe(self,max_quantile_color=0.95,red=None,green=None,blue=None):
df2 = self.copy()
d1 = df2.groupby(['frame_id','phenotype_label']).\
min()[['fraction']].reset_index().rename(columns={'fraction':'minimum'})
d1['minimum'] = 0
d2 = df2.groupby(['frame_id','phenotype_label']).\
max()[['fraction']].reset_index().rename(columns={'fraction':'maximum'})
d3 = df2.groupby(['frame_id','phenotype_label']).\
apply(lambda x: x['fraction'].quantile(max_quantile_color)).reset_index().\
rename(columns={0:'p95'})
df2 = d1.merge(d2,on=['frame_id','phenotype_label'],how='outer').merge(df2,on=['frame_id','phenotype_label'],how='outer').\
merge(d3,on=['frame_id','phenotype_label'],how='outer')
df2 = df2.fillna(0)
df2['maximum'] = df2['p95']
df2['fraction'] = df2.apply(lambda x: x['fraction'] if x['fraction'] < x['p95'] else x['p95'],1)
df2['range'] = df2['maximum'].subtract(df2['minimum'])
df2.loc[df2['range']<=0,'range'] =1
df2['standardized'] = (df2['fraction'].subtract(df2['minimum'])).divide(df2['range']).multiply(255).astype(int)
rangetop = df2[self.cdf.frame_columns+['phenotype_label','p95']].drop_duplicates().\
rename(columns={'p95':'range_top'})
df3 = df2.set_index(self.cdf.frame_columns+['coord_id','frame_x','frame_y','frame_shape','step_pixels'])[['phenotype_label','standardized']].\
pivot(columns='phenotype_label')
df3.columns = df3.columns.droplevel(0)
df3 = df3.reset_index()
df3['zero'] = 0
#return df3
if red is None: red = 'zero'
if green is None: green = 'zero'
if blue is None: blue = 'zero'
df3['color'] = df3.apply(lambda x: (x[red],x[green],x[blue]),1)
df3['color_str'] = df3.apply(lambda x: '#%02x%02x%02x' % x['color'],1).astype(str)
df3 = df3.sort_values(['frame_id','frame_y','frame_x']).reset_index(drop=True)
dcols = df3[['color','color_str']].drop_duplicates()
df3['color_str'] = | pd.Categorical(df3['color_str'],categories=dcols['color_str']) | pandas.Categorical |
"""
Created on Oct 7, 2013
@author: mmendez
"""
import pandas as pd
import os
def group(config_file, group_and_comparisons, group_id):
header = []
genes_counts = []
group = [group
for group in group_and_comparisons['group_definitions']
if group_id == group['id']][0]
for dataset in config_file['datasets']:
df = | pd.read_csv(dataset['summary'], sep='\t') | pandas.read_csv |
import numpy as np
import pandas as pd
from numpy import inf, nan
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pandas import DataFrame, Series, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry.point import Point
from pymove import MoveDataFrame
from pymove.utils import integration
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
list_random_banks = [
[39.984094, 116.319236, 1, 'bank'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bancos_postos'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'bancos_PAE'],
[39.984710, 116.319865, 6, 'bancos_postos'],
[39.984674, 116.319810, 7, 'bancos_agencias'],
[39.984623, 116.319773, 8, 'bancos_filiais'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
]
list_random_bus_station = [
[39.984094, 116.319236, 1, 'transit_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'transit_station'],
[39.984211, 116.319389, 4, 'pontos_de_onibus'],
[39.984217, 116.319422, 5, 'transit_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_random_bar_restaurant = [
[39.984094, 116.319236, 1, 'restaurant'],
[39.984198, 116.319322, 2, 'restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar'],
[39.984217, 116.319422, 5, 'bar'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
]
list_random_parks = [
[39.984094, 116.319236, 1, 'pracas_e_parques'],
[39.984198, 116.319322, 2, 'park'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'park'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'pracas_e_parques'],
]
list_random_police = [
[39.984094, 116.319236, 1, 'distritos_policiais'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'distritos_policiais'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
list_pois = [
[39.984094, 116.319236, 1, 'policia', 'distrito_pol_1'],
[39.991013, 116.326384, 2, 'policia', 'policia_federal'],
[40.01, 116.312615, 3, 'comercio', 'supermercado_aroldo'],
[40.013821, 116.306531, 4, 'show', 'forro_tropykalia'],
[40.008099, 116.31771100000002, 5, 'risca-faca',
'rinha_de_galo_world_cup'],
[39.985704, 116.326877, 6, 'evento', 'adocao_de_animais'],
[39.979393, 116.3119, 7, 'show', 'dia_do_municipio']
]
# Testes de Unions
def test_union_poi_bank():
pois_df = DataFrame(
data=list_random_banks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'banks'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'banks'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'banks'],
[39.984710, 116.319865, 6, 'banks'],
[39.984674, 116.319810, 7, 'banks'],
[39.984623, 116.319773, 8, 'banks'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
integration.union_poi_bank(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bus_station():
pois_df = DataFrame(
data=list_random_bus_station,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bus_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bus_station'],
[39.984211, 116.319389, 4, 'bus_station'],
[39.984217, 116.319422, 5, 'bus_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bus_station(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bar_restaurant():
pois_df = DataFrame(
data=list_random_bar_restaurant,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bar-restaurant'],
[39.984198, 116.319322, 2, 'bar-restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar-restaurant'],
[39.984217, 116.319422, 5, 'bar-restaurant'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bar_restaurant(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_parks():
pois_df = DataFrame(
data=list_random_parks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'parks'],
[39.984198, 116.319322, 2, 'parks'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'parks'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'parks'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_parks(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_police():
pois_df = DataFrame(
data=list_random_police,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'police'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'police'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_police(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_join_colletive_areas():
move_df = MoveDataFrame(
data=list_move,
)
move_df['geometry'] = move_df.apply(lambda x: Point(x['lon'], x['lat']), axis=1)
expected = move_df.copy()
indexes_ac = np.linspace(0, move_df.shape[0], 5, dtype=int)
area_c = move_df[move_df.index.isin(indexes_ac)].copy()
integration.join_collective_areas(move_df, area_c, inplace=True)
expected[VIOLATING] = [True, False, True, False, True, False, True, False, False]
assert_frame_equal(move_df, expected)
def test__reset_and_creates_id_and_lat_lon():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, True
)
)
id_expected = np.full(9, '', dtype='object_')
tag_expected = np.full(9, '', dtype='object_')
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
lat_expected = np.full(7, np.Infinity, dtype=np.float64)
lon_expected = np.full(7, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, True
)
)
lat_expected = np.full(9, np.Infinity, dtype=np.float64)
lon_expected = np.full(9, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
def test__reset_set_window__and_creates_event_id_type():
list_events = [
[39.984094, 116.319236, 1,
Timestamp('2008-10-24 01:57:57'), 'show do tropykalia'],
[39.991013, 116.326384, 2,
Timestamp('2008-10-24 00:22:01'), 'evento da prefeitura'],
[40.01, 116.312615, 3,
Timestamp('2008-10-25 00:21:01'), 'show do seu joao'],
[40.013821, 116.306531, 4,
Timestamp('2008-10-26 00:22:01'), 'missa']
]
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
list_win_start = [
'2008-10-22T17:23:05.000000000', '2008-10-22T22:07:26.000000000',
'2008-10-22T22:20:16.000000000', '2008-10-22T22:33:06.000000000',
'2008-10-22T23:28:33.000000000', '2008-10-23T11:20:45.000000000',
'2008-10-23T11:32:14.000000000', '2008-10-23T11:52:01.000000000',
'2008-10-23T13:27:57.000000000'
]
win_start_expected = Series(pd.to_datetime(list_win_start), name=DATETIME)
list_win_end = [
'2008-10-23T18:23:05.000000000', '2008-10-23T23:07:26.000000000',
'2008-10-23T23:20:16.000000000', '2008-10-23T23:33:06.000000000',
'2008-10-24T00:28:33.000000000', '2008-10-24T12:20:45.000000000',
'2008-10-24T12:32:14.000000000', '2008-10-24T12:52:01.000000000',
'2008-10-24T14:27:57.000000000'
]
win_end_expected = Series(pd.to_datetime(list_win_end), name=DATETIME)
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
type_expected = np.full(9, '', dtype='object_')
id_expected = np.full(9, '', dtype='object_')
window_starts, window_ends, current_distances, event_id, event_type = (
integration._reset_set_window__and_creates_event_id_type(
move_df, pois, 45000, DATETIME
)
)
assert_series_equal(window_starts, win_start_expected)
assert_series_equal(window_ends, win_end_expected)
assert_array_almost_equal(current_distances, dist_expected)
assert_array_equal(event_id, id_expected)
assert_array_equal(event_type, type_expected)
def test_reset_set_window_and_creates_event_id_type_all():
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, | Timestamp('2008-10-23 10:37:26') | pandas.Timestamp |
import pandas as pd
import networkx as nx
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
#funtions
def degree(G,f):
"""
Adds a column to the dataframe f with the degree of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
degree_dic = nx.degree_centrality(G)
degree_df = pd.DataFrame(data = {'name': list(degree_dic.keys()), 'degree': list(degree_dic.values()) })
f = pd.merge(f, degree_df, on='name')
return f
def centrality(G,f):
"""
Adds a column to the dataframe f with the centrality of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
centrality_dic = nx.degree_centrality(G)
centrality_df = pd.DataFrame(data = {'name': list(centrality_dic.keys()), 'centrality': list(centrality_dic.values()) })
f = pd.merge(f, centrality_df, on='name')
return f
def betweenness(G,f):
"""
Adds a column to the dataframe f with the betweenness of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
betweenness_dic = nx.betweenness_centrality(G)
betweenness_df = pd.DataFrame(data = {'name': list(betweenness_dic.keys()), 'betweenness': list(betweenness_dic.values()) })
f = pd.merge(f, betweenness_df, on='name')
return f
def pagerank(G,f):
"""
Adds a column to the dataframe f with the pagerank of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
pagerank_dic = nx.pagerank(G)
pagerank_df = pd.DataFrame(data = {'name': list(pagerank_dic.keys()), 'pagerank': list(pagerank_dic.values()) })
f = pd.merge(f, pagerank_df, on='name')
return f
def clustering(G,f):
"""
Adds a column to the dataframe f with the clustering coeficient of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
clustering_dic = nx.clustering(G)
clustering_df = pd.DataFrame(data = {'name': list(clustering_dic.keys()), 'clustering': list(clustering_dic.values()) })
f = pd.merge(f, clustering_df, on='name')
return f
def communities_greedy_modularity(G,f):
"""
Adds a column to the dataframe f with the community of each node.
The communitys are detected using greedy modularity.
G: a networkx graph.
f: a pandas dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
communities_dic = nx.algorithms.community.greedy_modularity_communities(G)
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_greedy_modularity': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
f = | pd.merge(f, communities_df, on='name') | pandas.merge |
"""
author: <NAME>, <EMAIL>
date: 3/2021
This script takes in a polygon GIS feature class of 1 to X features and uses a bunch of
pre-generated GIS data describing various things that are important to determining
solar site suitability and spits out a table of all those things for each site, one line per
input site. Those variables include: distance to transmission lines of various capacities,
slope on the site, and landuse on the site.
this must be run with ArcGIS python 2 build system from ESRI as installed with ArcGIS 10.8
"""
import arcpy
from arcpy import env
from arcpy.sa import *
import pandas as pan
arcpy.CheckOutExtension("spatial")
# environments and paths
ws = "D:\\workspace\\solarSiteAnalysis"
arcpy.env.workspace = ws
arcpy.env.overwriteOutput = "True"
# OUPUT
outputTables = ws + "\\Python\\Output"
targetStyle = pan.read_csv(ws + "\\Python\\SiteProfileOutputTemplateTwo.csv")
tempDF = pan.DataFrame(targetStyle)
temp = ws + "\\Python\\temp.gdb"
# Processing variables
eData = "D:\\workspace\\gis_data\\DEM_1to3"
stateElev = eData + "DEM_Mosaic_WV_Statewide_1to3m_UTM17_p2020.tif"
regionSlope = eData + "\\StatewideElev.gdb"
counties = ws + "\\RESOURCES\\county_24k_utm83_NEW.shp"
owner = ''
county = ''
OriginalOID = 0
regionList = {
eData + "\\WV_central_region_northernPiece.shp": regionSlope + "\\CentNorthSlope",
eData + "\\WV_central_region_southernPiece.shp": regionSlope + "\\CentSouthSlope",
eData + "\\WV_east_region.shp": regionSlope + "\\EastSlope",
eData + "\\WV_south_region_northern.shp": regionSlope + "\\SouthNorthSlope",
eData + "\\WV_south_region_southern.shp": regionSlope + "\\SouthSouthSlope"
}
currentRegion = temp + "\\currentRegion"
lulc = ws + "\\RESOURCES\\WV_LULC_NAIP.tif"
lulcGood = ws + "\\RESOURCES\\WV_LULC_NAIP_2016_reclassedToGoodForSolar_correct.tif"
floodMath = ws + "\\RESOURCES\\DFIRM_FloodZones_AAE_AsNoDataAndOne.tif"
minePermits = ws + "\\RESOURCES\\mining_reclamation_permit_boundary.shp"
# Processing storage and other references
slopeAreasAllLULC = {
"0-5 percent": 0,
"5-10 percent": 0,
"10-15 percent": 0,
"15-20 percent": 0,
"Over 20 percent": 0
}
slopeAreasGoodLULC = {
"0-5 percent": 0,
"5-10 percent": 0,
"10-15 percent": 0,
"15-20 percent": 0,
"Over 20 percent": 0
}
FieldNameToSlopeCat = {
"0-5 percent": "VALUE_1",
"5-10 percent": "VALUE_2",
"10-15 percent": "VALUE_3",
"15-20 percent": "VALUE_4",
"Over 20 percent": "VALUE_5"
}
reclassSlope = [[0, 5, 1], [5, 10, 2], [10, 15, 3], [15, 20, 4], [20, 25, 5], [25, ]]
transmission = {
ws + "\\RESOURCES\\TransmissionLinesWV.gdb\\TransDist_Under100KV": "Under 100 kV",
ws + "\\RESOURCES\\TransmissionLinesWV.gdb\\TransDist_UnknownKV": "Unknown kV",
ws + "\\RESOURCES\\TransmissionLinesWV.gdb\\TransDist_735kvAndUp": "735kV and Up",
ws + "\\RESOURCES\\TransmissionLinesWV.gdb\\TransDist_500kv": "500 kV",
ws + "\\RESOURCES\\TransmissionLinesWV.gdb\\TransDist_345kv_2": "345 kV",
ws + "\\RESOURCES\\TransmissionLinesWV.gdb\\TransDist_100to161kv": "100 to 161 kV"
}
transmissionDistances = {
"Unknown kV": 0,
"Under 100 kV": 0,
"100 to 161 kV": 0,
"345 kV": 0,
"500 kV": 0,
"735kV and Up": 0
}
whereClause = ""
# would be good to have an acutal name field in future inputs rather than just a number
def analysis():
outputDF = | pan.DataFrame(data=None, columns=tempDF.columns) | pandas.DataFrame |
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize(
"values, dtype",
[
([1, 2, 3], "int64"),
([1.0, 2.0, 3.0], "float64"),
(["a", "b", "c"], "object"),
(["a", "b", "c"], "string"),
([1, 2, 3], "datetime64[ns]"),
([1, 2, 3], "datetime64[ns, CET]"),
([1, 2, 3], "timedelta64[ns]"),
(["2000", "2001", "2002"], "Period[D]"),
([1, 0, 3], "Sparse"),
([pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(3, 4)], "interval"),
],
)
@pytest.mark.parametrize(
"mask", [[True, False, False], [True, True, True], [False, False, False]]
)
@pytest.mark.parametrize("box_mask", [True, False])
@pytest.mark.parametrize("frame", [True, False])
def test_series_mask_boolean(values, dtype, mask, box_mask, frame):
ser = pd.Series(values, dtype=dtype, index=["a", "b", "c"])
if frame:
ser = ser.to_frame()
mask = pd.array(mask, dtype="boolean")
if box_mask:
mask = pd.Series(mask, index=ser.index)
expected = ser[mask.astype("bool")]
result = ser[mask]
tm.assert_equal(result, expected)
if not box_mask:
# Series.iloc[Series[bool]] isn't allowed
result = ser.iloc[mask]
tm.assert_equal(result, expected)
result = ser.loc[mask]
tm.assert_equal(result, expected)
# empty
mask = mask[:0]
ser = ser.iloc[:0]
expected = ser[mask.astype("bool")]
result = ser[mask]
tm.assert_equal(result, expected)
if not box_mask:
# Series.iloc[Series[bool]] isn't allowed
result = ser.iloc[mask]
tm.assert_equal(result, expected)
result = ser.loc[mask]
tm.assert_equal(result, expected)
@pytest.mark.parametrize("frame", [True, False])
def test_na_treated_as_false(frame):
# https://github.com/pandas-dev/pandas/issues/31503
s = | pd.Series([1, 2, 3], name="name") | pandas.Series |
##### Works by typing recomb_to_cM.py <recomb> <mareymap> #####
import sys
import pandas as pd
### Actual program ###
### Tries to read the needed columns of your recomb and mareymap tables
def inputs():
try:
recomb = pd.read_table(sys.argv[1], header=None, skiprows=3, sep = " ", comment="#", engine="c")
except:
raise
print("{0} cannot be read.".format(sys.argv[1]))
try:
mareymap = pd.read_table(sys.argv[2], header=0, skiprows=1, sep = " ", comment="#", engine="c")
except:
raise
print("{0} cannot be read.".format(sys.argv[2]))
#print(mareymap)
mareymap = mareymap[mareymap["map"] == sys.argv[3]]
mareymap_gen_max = max(mareymap["gen"])
mareymap.sort_values(by=["phys"], axis=0, inplace=True)
mareymap = mareymap["phys"].tolist()
return recomb, mareymap, mareymap_gen_max
### Produces a gene map from recombination estimates of 1/Mb
def points(recomb, mareymap, mareymap_gen_max):
tuples = []
for pos in mareymap:
pos_recomb = recomb.loc[((pos < recomb[1]) & (pos >= recomb[0])), 2].values[0]
print(pos_recomb)
tuples.append([pos, pos_recomb])
results = | pd.DataFrame(tuples) | pandas.DataFrame |
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, timedelta
import warnings
import itertools
import numpy as np
import pandas.core.common as com
from pandas.compat import lzip, map, zip, raise_with_traceback, string_types
from pandas.core.api import DataFrame
from pandas.core.base import PandasObject
from pandas.tseries.tools import to_datetime
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
#------------------------------------------------------------------------------
# Helper functions
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
args += list(params)
return args
def _safe_col_name(col_name):
#TODO: probably want to forbid database reserved names, such as "database"
return col_name.strip().replace(' ', '_')
def _handle_date_column(col, format=None):
if isinstance(format, dict):
return to_datetime(col, **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, coerce=True, unit=format)
elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, coerce=True, unit=format)
else:
return to_datetime(col, coerce=True, format=format)
def _parse_date_columns(data_frame, parse_dates):
""" Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def execute(sql, con, cur=None, params=None, flavor='sqlite'):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, a supported SQL flavor must also be provided
cur : depreciated, cursor is obtained from connection
params : list or tuple, optional
List of parameters to pass to execute method.
flavor : string "sqlite", "mysql"
Specifies the flavor of SQL to use.
Ignored when using SQLAlchemy engine. Required when using DBAPI2 connection.
Returns
-------
Results Iterable
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
def tquery(sql, con, cur=None, params=None, flavor='sqlite'):
"""
Returns list of tuples corresponding to each row in given sql
query.
If only one column selected, then plain list is returned.
Parameters
----------
sql: string
SQL query to be executed
con: SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor must also be provided
cur: depreciated, cursor is obtained from connection
params: list or tuple, optional
List of parameters to pass to execute method.
flavor : string "sqlite", "mysql"
Specifies the flavor of SQL to use.
Ignored when using SQLAlchemy engine. Required when using DBAPI2
connection.
Returns
-------
Results Iterable
"""
warnings.warn(
"tquery is depreciated, and will be removed in future versions",
DeprecationWarning)
pandas_sql = pandasSQL_builder(con, flavor=flavor)
args = _convert_params(sql, params)
return pandas_sql.tquery(*args)
def uquery(sql, con, cur=None, params=None, engine=None, flavor='sqlite'):
"""
Does the same thing as tquery, but instead of returning results, it
returns the number of rows affected. Good for update queries.
Parameters
----------
sql: string
SQL query to be executed
con: SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor must also be provided
cur: depreciated, cursor is obtained from connection
params: list or tuple, optional
List of parameters to pass to execute method.
flavor : string "sqlite", "mysql"
Specifies the flavor of SQL to use.
Ignored when using SQLAlchemy engine. Required when using DBAPI2
connection.
Returns
-------
Number of affected rows
"""
warnings.warn(
"uquery is depreciated, and will be removed in future versions",
DeprecationWarning)
pandas_sql = pandasSQL_builder(con, flavor=flavor)
args = _convert_params(sql, params)
return pandas_sql.uquery(*args)
#------------------------------------------------------------------------------
# Read and write to DataFrames
def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True,
params=None, parse_dates=None):
"""
Returns a DataFrame corresponding to the result set of the query
string.
Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string
SQL query to be executed
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor must also be provided
index_col : string, optional
column name to use for the returned DataFrame object.
flavor : string, {'sqlite', 'mysql'}
The flavor of SQL to use. Ignored when using
SQLAlchemy engine. Required when using DBAPI2 connection.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
cur : depreciated, cursor is obtained from connection
params : list or tuple, optional
List of parameters to pass to execute method.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
Returns
-------
DataFrame
See also
--------
read_table
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor)
return pandas_sql.read_sql(sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates)
def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor must also be provided
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
Required when using DBAPI2 connection.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor)
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index)
def has_table(table_name, con, meta=None, flavor='sqlite'):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor name must also be provided
flavor: {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
Required when using DBAPI2 connection.
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor)
return pandas_sql.has_table(table_name)
def read_table(table_name, con, meta=None, index_col=None, coerce_float=True,
parse_dates=None, columns=None):
"""Given a table name and SQLAlchemy engine, return a DataFrame.
Type convertions will be done automatically.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy engine
Legacy mode not supported
meta : SQLAlchemy meta, optional
If omitted MetaData is reflected from engine
index_col : string, optional
Column to set as index
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list
List of column names to select from sql table
Returns
-------
DataFrame
See also
--------
read_sql
"""
pandas_sql = PandasSQLAlchemy(con, meta=meta)
table = pandas_sql.read_table(table_name,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def pandasSQL_builder(con, flavor=None, meta=None):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
try:
import sqlalchemy
if isinstance(con, sqlalchemy.engine.Engine):
return PandasSQLAlchemy(con, meta=meta)
else:
warnings.warn(
"""Not an SQLAlchemy engine,
attempting to use as legacy DBAPI connection""")
if flavor is None:
raise ValueError(
"""PandasSQL must be created with an SQLAlchemy engine
or a DBAPI2 connection and SQL flavour""")
else:
return PandasSQLLegacy(con, flavor)
except ImportError:
warnings.warn("SQLAlchemy not installed, using legacy mode")
if flavor is None:
raise SQLAlchemyRequired
else:
return PandasSQLLegacy(con, flavor)
class PandasSQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas'):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index)
if frame is not None:
# We want to write a frame
if self.pd_sql.has_table(self.name):
if if_exists == 'fail':
raise ValueError("Table '%s' already exists." % name)
elif if_exists == 'replace':
self.pd_sql.drop_table(self.name)
self.table = self._create_table_statement()
self.create()
elif if_exists == 'append':
self.table = self.pd_sql.get_table(self.name)
if self.table is None:
self.table = self._create_table_statement()
else:
self.table = self._create_table_statement()
self.create()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name)
def sql_schema(self):
return str(self.table.compile())
def create(self):
self.table.create()
def insert_statement(self):
return self.table.insert()
def maybe_asscalar(self, i):
try:
return np.asscalar(i)
except AttributeError:
return i
def insert(self):
ins = self.insert_statement()
data_list = []
# to avoid if check for every row
keys = self.frame.columns
if self.index is not None:
for t in self.frame.itertuples():
data = dict((k, self.maybe_asscalar(v))
for k, v in zip(keys, t[1:]))
data[self.index] = self.maybe_asscalar(t[0])
data_list.append(data)
else:
for t in self.frame.itertuples():
data = dict((k, self.maybe_asscalar(v))
for k, v in zip(keys, t[1:]))
data_list.append(data)
self.pd_sql.execute(ins, data_list)
def read(self, coerce_float=True, parse_dates=None, columns=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
cols.insert(0, self.table.c[self.index])
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
data = result.fetchall()
column_names = result.keys()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
# Assume if the index in prefix_index format, we gave it a name
# and should return it nameless
if self.index == self.prefix + '_index':
self.frame.index.name = None
return self.frame
def _index_name(self, index):
if index is True:
if self.frame.index.name is not None:
return _safe_col_name(self.frame.index.name)
else:
return self.prefix + '_index'
elif isinstance(index, string_types):
return index
else:
return None
def _create_table_statement(self):
from sqlalchemy import Table, Column
safe_columns = map(_safe_col_name, self.frame.dtypes.index)
column_types = map(self._sqlalchemy_type, self.frame.dtypes)
columns = [Column(name, typ)
for name, typ in zip(safe_columns, column_types)]
if self.index is not None:
columns.insert(0, Column(self.index,
self._sqlalchemy_type(
self.frame.index),
index=True))
return Table(self.name, self.pd_sql.meta, *columns)
def _harmonize_columns(self, parse_dates=None):
""" Make a data_frame's column type align with an sql_table
column types
Need to work around limited NA value support.
Floats are always fine, ints must always
be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted
to np.datetime if supported, but here we also force conversion
if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._numpy_type(sql_col.type)
if col_type is datetime or col_type is date:
if not issubclass(df_col.dtype.type, np.datetime64):
self.frame[col_name] = _handle_date_column(df_col)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name].astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is int or col_type is bool:
self.frame[col_name].astype(col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _sqlalchemy_type(self, arr_or_dtype):
from sqlalchemy.types import Integer, Float, Text, Boolean, DateTime, Date, Interval
if arr_or_dtype is date:
return Date
if com.is_datetime64_dtype(arr_or_dtype):
try:
tz = arr_or_dtype.tzinfo
return DateTime(timezone=True)
except:
return DateTime
if com.is_timedelta64_dtype(arr_or_dtype):
return Interval
elif com.is_float_dtype(arr_or_dtype):
return Float
elif com.is_integer_dtype(arr_or_dtype):
# TODO: Refine integer size.
return Integer
elif | com.is_bool(arr_or_dtype) | pandas.core.common.is_bool |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
import numpy as np
import seaborn as sn
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
from tqdm import tqdm
def get_model_torch(in_shape, out_shape):
from models import model_torch
return model_torch(in_shape, out_shape)
def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
return np.eye(num_classes, dtype='uint8')[y]
def visualize_conf_matrix(matrix, class_list):
df_cm = pd.DataFrame(matrix, index = [i for i in class_list],
columns = [i for i in class_list])
plt.figure(figsize = (13,7))
sn.set(font_scale=1.8)
sn.heatmap(df_cm, annot=True, cmap='Greys', fmt='g', annot_kws={"size": 20})
plt.show(block=False);
folder = './sound_event_detection/figures/'
if not os.path.exists(folder):
os.makedirs(folder)
plt.savefig(folder + 'confusion_matrix' + '.png', bbox_inches='tight')
def get_conf_matrix(y_pred, y_test):
y_pred_max = []
y_test_max = []
for j in y_pred:
y_pred_max.append(np.argmax(j))
for j in y_test:
y_test_max.append(np.argmax(j))
return confusion_matrix(y_test_max, y_pred_max)
def get_metrics(conf_matrix):
tn = 0.0
fp = 0.0
tp = 0.0
fn = 0.0
epsilon = 0.01
for it1 in range(conf_matrix.shape[0]):
tp += conf_matrix[it1][it1]
for it2 in range(conf_matrix.shape[1]):
if it2 != it1:
fp += conf_matrix[it2][it1]
fn += conf_matrix[it1][it2]
precision = tp / (tp + fp + epsilon)
recall = tp / (tp + fn + epsilon)
f1 = 2 * (precision*recall) / (precision + recall + epsilon)
return tp, precision, recall, f1
print('#'*40, "\n\t\tTesting\n")
token = ''
test_reader = | pd.read_table('./sound_event_detection/src/test'+token+'.csv', sep='\t', encoding='utf-8') | pandas.read_table |
import datetime
import numpy as np
import pandas as pd
from sqlalchemy import sql
def get_and_adjust_data(db_engine, station_id, start, end):
"""
Get data from the database in both the bike count format and the outage
format, between the passed dates. If bike count data and outage data is
available for the same time, bike count data takes precedence.
If no data is available for a subset of the passed period of time, it will
be left out of the returned dataset.
"""
data_list = []
# Create empty DateTimeIndex with frequency of five minutes, and assign it
# to an empty series.
# "5T" is five minutes.
dti = pd.date_range(0, -1, freq="5T")
data = pd.Series(None, index=dti)
# Add data in the bike count format.
bike_counts = pd.read_sql_query(
"SELECT ts, bikes, spaces FROM bike_count "
+ "WHERE station_id = %(station_id)s AND "
+ "ts >= %(start)s AND ts <= %(end)s;",
db_engine, params={
"station_id": station_id, "start": start, "end": end})
# bike_count[0] is the index, [1..3] are the columns in the order
# selected in the above query
for bike_count in bike_counts.itertuples():
# Do not insert counts with no bikes or spaces (inactive stations).
if not (bike_count[2] == 0 and bike_count[3] == 0):
ts = pd.to_datetime(bike_count[1], infer_datetime_format=True)
# Round the timestamp to the nearest five minute mark.
ts += datetime.timedelta(seconds=150)
ts = ts.replace(
minute=(ts.minute - (ts.minute % 5)), second=0, microsecond=0)
# A status of np.nan means the station is neither full nor empty.
status = np.nan
if bike_count[2] == 0:
status = "empty"
elif bike_count[3] == 0:
status = "full"
# Create index with only one entry, ts.
index = pd.date_range(ts, ts, freq="5T")
data_list.append(pd.Series(status, index=index))
if len(data_list) > 0:
data = pd.concat(data_list)
try:
data_list = []
# Add data in the outage format.
outages = pd.read_sql_query(
"SELECT outage_type, outage_start, outage_end FROM outage "
+ "WHERE station_id = %(station_id)s AND "
+ "outage_start >= %(start)s AND outage_end <= %(end)s;",
db_engine, params={
"station_id": station_id, "start": start, "end": end})
# Merge each outage into dataframe.
for outage in outages.itertuples():
ostart = | pd.to_datetime(outage[2], infer_datetime_format=True) | pandas.to_datetime |
import mock
import os
import pandas as pd
import pytest
from datetime import datetime
from flexmock import flexmock
from sportsreference import utils
from sportsreference.ncaab.roster import Player, Roster
from sportsreference.ncaab.teams import Team
YEAR = 2018
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'ncaab', filename)
return open('%s.html' % filepath, 'r', encoding='utf8').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents, status=200):
self.url = url
self.reason = 'Bad URL' # Used when throwing HTTPErrors
self.headers = {} # Used when throwing HTTPErrors
self.status_code = status
self.html_contents = html_contents
self.text = html_contents
if 'purdue' in url:
return MockPQ(read_file('2018'))
if 'isaac-haas-1' in url:
return MockPQ(read_file('isaac-haas-1'))
if 'vince-edwards-2' in url:
return MockPQ(read_file('vince-edwards-2'))
if 'bad' in url:
return MockPQ(None, 404)
return MockPQ(read_file('carsen-edwards-1'))
def mock_request(url):
class MockRequest:
def __init__(self, html_contents, status_code=200):
self.status_code = status_code
self.html_contents = html_contents
self.text = html_contents
if str(YEAR) in url:
return MockRequest('good')
else:
return MockRequest('bad', status_code=404)
class TestNCAABPlayer:
@mock.patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
self.results_career = {
'assist_percentage': 17.3,
'assists': 166,
'block_percentage': 0.6,
'blocks': 11,
'box_plus_minus': 6.1,
'conference': '',
'defensive_box_plus_minus': 1.9,
'defensive_rebound_percentage': 11.8,
'defensive_rebounds': 206,
'defensive_win_shares': 3.5,
'effective_field_goal_percentage': 0.515,
'field_goal_attempts': 835,
'field_goal_percentage': 0.428,
'field_goals': 357,
'free_throw_attempt_rate': 0.279,
'free_throw_attempts': 233,
'free_throw_percentage': 0.798,
'free_throws': 186,
'games_played': 72,
'games_started': 58,
'height': '6-1',
'minutes_played': 1905,
'name': '<NAME>',
'offensive_box_plus_minus': 4.2,
'offensive_rebound_percentage': 1.8,
'offensive_rebounds': 27,
'offensive_win_shares': 4.4,
'personal_fouls': 133,
'player_efficiency_rating': 19.9,
'points': 1046,
'points_produced': 961,
'position': 'Guard',
'season': 'Career',
'steal_percentage': 2.4,
'steals': 78,
'team_abbreviation': 'purdue',
'three_point_attempt_rate': 0.459,
'three_point_attempts': 383,
'three_point_percentage': 0.381,
'three_pointers': 146,
'total_rebound_percentage': 7.2,
'total_rebounds': 233,
'true_shooting_percentage': 0.553,
'turnover_percentage': 11.9,
'turnovers': 128,
'two_point_attempts': 452,
'two_point_percentage': 0.467,
'two_pointers': 211,
'usage_percentage': 28.9,
'weight': 190,
'win_shares': 8.0,
'win_shares_per_40_minutes': 0.167
}
self.results_2018 = {
'assist_percentage': 19.5,
'assists': 104,
'block_percentage': 0.8,
'blocks': 8,
'box_plus_minus': 9.0,
'conference': 'big-ten',
'defensive_box_plus_minus': 1.5,
'defensive_rebound_percentage': 12.9,
'defensive_rebounds': 129,
'defensive_win_shares': 2.0,
'effective_field_goal_percentage': 0.555,
'field_goal_attempts': 500,
'field_goal_percentage': 0.458,
'field_goals': 229,
'free_throw_attempt_rate': 0.318,
'free_throw_attempts': 159,
'free_throw_percentage': 0.824,
'free_throws': 131,
'games_played': 37,
'games_started': 37,
'height': '6-1',
'minutes_played': 1092,
'name': '<NAME>',
'offensive_box_plus_minus': 7.6,
'offensive_rebound_percentage': 1.5,
'offensive_rebounds': 13,
'offensive_win_shares': 4.0,
'personal_fouls': 65,
'player_efficiency_rating': 25.4,
'points': 686,
'points_produced': 626,
'position': 'Guard',
'season': '2017-18',
'steal_percentage': 2.3,
'steals': 42,
'team_abbreviation': 'purdue',
'three_point_attempt_rate': 0.478,
'three_point_attempts': 239,
'three_point_percentage': 0.406,
'three_pointers': 97,
'total_rebound_percentage': 7.7,
'total_rebounds': 142,
'true_shooting_percentage': 0.596,
'turnover_percentage': 10.0,
'turnovers': 64,
'two_point_attempts': 261,
'two_point_percentage': 0.506,
'two_pointers': 132,
'usage_percentage': 30.5,
'weight': 190,
'win_shares': 6.1,
'win_shares_per_40_minutes': 0.223
}
self.player = Player('carsen-edwards-1')
def test_ncaab_player_returns_requested_player_career_stats(self):
# Request the career stats
player = self.player('')
for attribute, value in self.results_career.items():
assert getattr(player, attribute) == value
def test_ncaab_player_returns_requested_player_season_stats(self):
# Request the 2017-18 stats
player = self.player('2017-18')
for attribute, value in self.results_2018.items():
assert getattr(player, attribute) == value
def test_correct_initial_index_found(self):
seasons = ['2017-18', 'Career', '2016-17']
mock_season = mock.PropertyMock(return_value=seasons)
player = Player('carsen-edwards-1')
type(player)._season = mock_season
result = player._find_initial_index()
assert player._index == 1
def test_dataframe_returns_dataframe(self):
dataframe = [
{'assist_percentage': 17.3,
'assists': 166,
'block_percentage': 0.6,
'blocks': 11,
'box_plus_minus': 6.1,
'conference': '',
'defensive_box_plus_minus': 1.9,
'defensive_rebound_percentage': 11.8,
'defensive_rebounds': 206,
'defensive_win_shares': 3.5,
'effective_field_goal_percentage': 0.515,
'field_goal_attempts': 835,
'field_goal_percentage': 0.428,
'field_goals': 357,
'free_throw_attempt_rate': 0.279,
'free_throw_attempts': 233,
'free_throw_percentage': 0.798,
'free_throws': 186,
'games_played': 72,
'games_started': 58,
'height': '6-1',
'minutes_played': 1905,
'offensive_box_plus_minus': 4.2,
'offensive_rebound_percentage': 1.8,
'offensive_rebounds': 27,
'offensive_win_shares': 4.4,
'personal_fouls': 133,
'player_efficiency_rating': 19.9,
'player_id': 'carsen-edwards-1',
'points': 1046,
'points_produced': 961,
'position': 'Guard',
'steal_percentage': 2.4,
'steals': 78,
'team_abbreviation': 'purdue',
'three_point_attempt_rate': 0.459,
'three_point_attempts': 383,
'three_point_percentage': 0.381,
'three_pointers': 146,
'total_rebound_percentage': 7.2,
'total_rebounds': 233,
'true_shooting_percentage': 0.553,
'turnover_percentage': 11.9,
'turnovers': 128,
'two_point_attempts': 452,
'two_point_percentage': 0.467,
'two_pointers': 211,
'usage_percentage': 28.9,
'weight': 190,
'win_shares': 8.0,
'win_shares_per_40_minutes': 0.167},
{'assist_percentage': 14.3,
'assists': 62,
'block_percentage': 0.4,
'blocks': 3,
'box_plus_minus': 2.1,
'conference': 'big-ten',
'defensive_box_plus_minus': 2.4,
'defensive_rebound_percentage': 10.4,
'defensive_rebounds': 77,
'defensive_win_shares': 1.5,
'effective_field_goal_percentage': 0.455,
'field_goal_attempts': 335,
'field_goal_percentage': 0.382,
'field_goals': 128,
'free_throw_attempt_rate': 0.221,
'free_throw_attempts': 74,
'free_throw_percentage': 0.743,
'free_throws': 55,
'games_played': 35,
'games_started': 21,
'height': '6-1',
'minutes_played': 813,
'offensive_box_plus_minus': -0.3,
'offensive_rebound_percentage': 2.2,
'offensive_rebounds': 14,
'offensive_win_shares': 0.4,
'personal_fouls': 68,
'player_efficiency_rating': 12.5,
'player_id': 'carsen-edwards-1',
'points': 360,
'points_produced': 335,
'position': 'Guard',
'steal_percentage': 2.5,
'steals': 36,
'team_abbreviation': 'purdue',
'three_point_attempt_rate': 0.43,
'three_point_attempts': 144,
'three_point_percentage': 0.34,
'three_pointers': 49,
'total_rebound_percentage': 6.6,
'total_rebounds': 91,
'true_shooting_percentage': 0.486,
'turnover_percentage': 14.7,
'turnovers': 64,
'two_point_attempts': 191,
'two_point_percentage': 0.414,
'two_pointers': 79,
'usage_percentage': 26.8,
'weight': 190,
'win_shares': 1.9,
'win_shares_per_40_minutes': 0.092},
{'assist_percentage': 19.5,
'assists': 104,
'block_percentage': 0.8,
'blocks': 8,
'box_plus_minus': 9.0,
'conference': 'big-ten',
'defensive_box_plus_minus': 1.5,
'defensive_rebound_percentage': 12.9,
'defensive_rebounds': 129,
'defensive_win_shares': 2.0,
'effective_field_goal_percentage': 0.555,
'field_goal_attempts': 500,
'field_goal_percentage': 0.458,
'field_goals': 229,
'free_throw_attempt_rate': 0.318,
'free_throw_attempts': 159,
'free_throw_percentage': 0.824,
'free_throws': 131,
'games_played': 37,
'games_started': 37,
'height': '6-1',
'minutes_played': 1092,
'offensive_box_plus_minus': 7.6,
'offensive_rebound_percentage': 1.5,
'offensive_rebounds': 13,
'offensive_win_shares': 4.0,
'personal_fouls': 65,
'player_efficiency_rating': 25.4,
'player_id': 'carsen-edwards-1',
'points': 686,
'points_produced': 626,
'position': 'Guard',
'steal_percentage': 2.3,
'steals': 42,
'team_abbreviation': 'purdue',
'three_point_attempt_rate': 0.478,
'three_point_attempts': 239,
'three_point_percentage': 0.406,
'three_pointers': 97,
'total_rebound_percentage': 7.7,
'total_rebounds': 142,
'true_shooting_percentage': 0.596,
'turnover_percentage': 10.0,
'turnovers': 64,
'two_point_attempts': 261,
'two_point_percentage': 0.506,
'two_pointers': 132,
'usage_percentage': 30.5,
'weight': 190,
'win_shares': 6.1,
'win_shares_per_40_minutes': 0.223}
]
indices = ['Career', '2016-17', '2017-18']
df = pd.DataFrame(dataframe, index=indices)
player = self.player('')
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, player.dataframe]
df1 = | pd.concat(frames) | pandas.concat |
"""
Dataset classes.
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import json
import warnings
from glob import glob
import pandas as pd
from torch.utils.data import Dataset, ConcatDataset
def _create_description(description):
if description is not None:
if (not isinstance(description, pd.Series) and
not isinstance(description, dict)):
raise ValueError(f"'{description}' has to be either a "
f"pandas.Series or a dict.")
if isinstance(description, dict):
description = pd.Series(description)
return description
class BaseDataset(Dataset):
"""Returns samples from an mne.io.Raw object along with a target.
Dataset which serves samples from an mne.io.Raw object along with a target.
The target is unique for the dataset, and is obtained through the
`description` attribute.
Parameters
----------
raw: mne.io.Raw
Continuous data.
description: dict | pandas.Series | None
Holds additional description about the continuous signal / subject.
target_name: str | None
Name of the index in `description` that should be used to provide the
target (e.g., to be used in a prediction task later on).
transform : callable | None
On-the-fly transform applied to the example before it is returned.
"""
def __init__(self, raw, description=None, target_name=None,
transform=None):
self.raw = raw
self.description = _create_description(description)
self.transform = transform
# save target name for load/save later
self.target_name = target_name
if target_name is None:
self.target = None
elif target_name in self.description:
self.target = self.description[target_name]
else:
raise ValueError(f"'{target_name}' not in description.")
def __getitem__(self, index):
X, y = self.raw[:, index][0], self.target
if self.transform is not None:
X = self.transform(X)
return X, y
def __len__(self):
return len(self.raw)
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, value):
if value is not None and not callable(value):
raise ValueError('Transform needs to be a callable.')
self._transform = value
class WindowsDataset(BaseDataset):
"""Returns windows from an mne.Epochs object along with a target.
Dataset which serves windows from an mne.Epochs object along with their
target and additional information. The `metadata` attribute of the Epochs
object must contain a column called `target`, which will be used to return
the target that corresponds to a window. Additional columns
`i_window_in_trial`, `i_start_in_trial`, `i_stop_in_trial` are also
required to serve information about the windowing (e.g., useful for cropped
training).
See `braindecode.datautil.windowers` to directly create a `WindowsDataset`
from a `BaseDataset` object.
Parameters
----------
windows : mne.Epochs
Windows obtained through the application of a windower to a BaseDataset
(see `braindecode.datautil.windowers`).
description : dict | pandas.Series | None
Holds additional info about the windows.
transform : callable | None
On-the-fly transform applied to a window before it is returned.
"""
def __init__(self, windows, description=None, transform=None):
self.windows = windows
self.description = _create_description(description)
self.transform = transform
self.y = self.windows.metadata.loc[:, 'target'].to_numpy()
self.crop_inds = self.windows.metadata.loc[
:, ['i_window_in_trial', 'i_start_in_trial',
'i_stop_in_trial']].to_numpy()
def __getitem__(self, index):
X = self.windows.get_data(item=index)[0].astype('float32')
if self.transform is not None:
X = self.transform(X)
y = self.y[index]
# necessary to cast as list to get list of three tensors from batch,
# otherwise get single 2d-tensor...
crop_inds = self.crop_inds[index].tolist()
return X, y, crop_inds
def __len__(self):
return len(self.windows.events)
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, value):
if value is not None and not callable(value):
raise ValueError('Transform needs to be a callable.')
self._transform = value
class BaseConcatDataset(ConcatDataset):
"""A base class for concatenated datasets. Holds either mne.Raw or
mne.Epoch in self.datasets and has a pandas DataFrame with additional
description.
Parameters
----------
list_of_ds: list
list of BaseDataset, BaseConcatDataset or WindowsDataset
"""
def __init__(self, list_of_ds):
# if we get a list of BaseConcatDataset, get all the individual datasets
if list_of_ds and isinstance(list_of_ds[0], BaseConcatDataset):
list_of_ds = [d for ds in list_of_ds for d in ds.datasets]
super().__init__(list_of_ds)
self.description = | pd.DataFrame([ds.description for ds in list_of_ds]) | pandas.DataFrame |
import inspect
import json
import os
import re
from urllib.parse import quote
from urllib.request import urlopen
import pandas as pd
import param
from .configuration import DEFAULTS
class TutorialData(param.Parameterized):
label = param.String(allow_None=True)
raw = param.Boolean()
verbose = param.Boolean()
return_meta = param.Boolean()
use_cache = param.Boolean()
_source = None
_base_url = None
_data_url = None
_description = None
def __init__(self, **kwds):
super().__init__(**kwds)
self._cache_dir = DEFAULTS["cache_kwds"]["directory"]
self._remove_href = re.compile(r"<(a|/a).*?>")
os.makedirs(self._cache_dir, exist_ok=True)
self._init_owid()
@property
def _cache_path(self):
cache_file = f"{self.label}.pkl"
return os.path.join(self._cache_dir, cache_file)
@property
def _dataset_options(self):
options = set([])
for method in dir(self):
if method.startswith("_load_") and "owid" not in method:
options.add(method.replace("_load_", ""))
return list(options) + list(self._owid_labels_df.columns)
@staticmethod
def _specify_cache(cache_path, **kwds):
if kwds:
cache_ext = "_".join(
f"{key}={val}".replace(os.sep, "") for key, val in kwds.items()
)
cache_path = f"{os.path.splitext(cache_path)[0]}_{cache_ext}.pkl"
return cache_path
def _cache_dataset(self, df, cache_path=None, **kwds):
if cache_path is None:
cache_path = self._cache_path
cache_path = self._specify_cache(cache_path, **kwds)
df.to_pickle(cache_path)
def _read_cache(self, cache_path=None, **kwds):
if not self.use_cache:
return None
if cache_path is None:
cache_path = self._cache_path
cache_path = self._specify_cache(cache_path, **kwds)
try:
return pd.read_pickle(cache_path)
except Exception:
if os.path.exists(cache_path):
os.remove(cache_path)
return None
@staticmethod
def _snake_urlify(s):
# Replace all hyphens with underscore
s = s.replace(" - ", "_").replace("-", "_")
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", "", s)
# Replace all runs of whitespace with a underscore
s = re.sub(r"\s+", "_", s)
return s.lower()
def _init_owid(self):
cache_path = os.path.join(self._cache_dir, "owid_labels.pkl")
self._owid_labels_df = self._read_cache(cache_path=cache_path)
if self._owid_labels_df is not None:
return
owid_api_url = (
"https://api.github.com/"
"repos/owid/owid-datasets/"
"git/trees/master?recursive=1"
)
with urlopen(owid_api_url) as f:
sources = json.loads(f.read().decode("utf-8"))
owid_labels = {}
owid_raw_url = "https://raw.githubusercontent.com/owid/owid-datasets/master/"
for source_tree in sources["tree"]:
path = source_tree["path"]
if ".csv" not in path and ".json" not in path:
continue
label = "owid_" + self._snake_urlify(path.split("/")[-2].strip())
if label not in owid_labels:
owid_labels[label] = {}
url = f"{owid_raw_url}/{quote(path)}"
if ".csv" in path:
owid_labels[label]["data"] = url
elif ".json" in path:
owid_labels[label]["meta"] = url
self._owid_labels_df = pd.DataFrame(owid_labels)
self._cache_dataset(self._owid_labels_df, cache_path=cache_path)
def _load_owid(self, **kwds):
self._data_url = self._owid_labels_df[self.label]["data"]
meta_url = self._owid_labels_df[self.label]["meta"]
with urlopen(meta_url) as response:
meta = json.loads(response.read().decode())
self.label = meta["title"]
self._source = (
" & ".join(source["dataPublishedBy"] for source in meta["sources"])
+ " curated by Our World in Data (OWID)"
)
self._base_url = (
" & ".join(source["link"] for source in meta["sources"])
+ " through https://github.com/owid/owid-datasets"
)
self._description = re.sub(self._remove_href, "", meta["description"])
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df.columns = [self._snake_urlify(col) for col in df.columns]
return df
def _load_annual_co2(self, **kwds):
self._source = "NOAA ESRL"
self._base_url = "https://www.esrl.noaa.gov/"
self._data_url = (
"https://www.esrl.noaa.gov/"
"gmd/webdata/ccgg/trends/co2/co2_annmean_mlo.txt"
)
self._description = (
"The carbon dioxide data on Mauna Loa constitute the longest record "
"of direct measurements of CO2 in the atmosphere. They were started "
"by <NAME> of the Scripps Institution of Oceanography in "
"March of 1958 at a facility of the National Oceanic and Atmospheric "
"Administration [Keeling, 1976]. NOAA started its own CO2 measurements "
"in May of 1974, and they have run in parallel with those made by "
"Scripps since then [Thoning, 1989]."
)
df = self._read_cache(**kwds)
if df is None:
base_kwds = dict(
header=None,
comment="#",
sep="\s+", # noqa
names=["year", "co2_ppm", "uncertainty"],
)
base_kwds.update(kwds)
df = | pd.read_csv(self._data_url, **base_kwds) | pandas.read_csv |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import zipfile
import os
import geopy.distance
import random
import pandas as pd
import numpy as np
import csv
from enum import Enum
from yaml import safe_load
from maro.cli.data_pipeline.utils import download_file, StaticParameter
from maro.utils.logger import CliLogger
from maro.cli.data_pipeline.base import DataPipeline, DataTopology
logger = CliLogger(name=__name__)
class CitiBikePipeline(DataPipeline):
_download_file_name = "trips.zip"
_station_info_file_name = "full_station.json"
_clean_file_name = "trips.csv"
_build_file_name = "trips.bin"
_station_meta_file_name = "station_meta.csv"
_distance_file_name = "distance_adj.csv"
_meta_file_name = "trips.yml"
def __init__(self, topology: str, source: str, station_info: str, is_temp: bool = False):
"""
Generate citi_bike data bin and other necessary files for the specified topology from specified source.
They will be generated in ~/.maro/data/citi_bike/[topology]/_build.
Folder structure:
~/.maro
/data/citi_bike/[topology]
/_build bin data file and other necessary files
/source
/_download original data files
/_clean cleaned data files
/temp download temp files
Args:
topology(str): topology name of the data files
source(str): source url of original data file
station_info(str): source url of station info file
is_temp(bool): (optional) if the data file is temporary
"""
super().__init__("citi_bike", topology, source, is_temp)
self._station_info = station_info
self._station_info_file = os.path.join(self._download_folder, self._station_info_file_name)
self._distance_file = os.path.join(self._build_folder, self._distance_file_name)
self._station_meta_file = os.path.join(self._build_folder, self._station_meta_file_name)
self._common_data = {}
def download(self, is_force: bool = False):
"""download the zip file"""
super().download(is_force)
self._new_file_list.append(self._station_info_file)
if (not is_force) and os.path.exists(self._station_info_file):
logger.info_green("File already exists, skipping download.")
else:
logger.info_green(f"Downloading trip data from {self._station_info} to {self._station_info_file}")
download_file(source=self._station_info, destination=self._station_info_file)
def clean(self):
"""unzip the csv file and process it for building binary file"""
super().clean()
logger.info_green("Cleaning trip data")
if os.path.exists(self._download_file):
# unzip
logger.info_green("Unzip start")
with zipfile.ZipFile(self._download_file, "r") as zip_ref:
for filename in zip_ref.namelist():
# Only one csv file is expected.
if (
filename.endswith(".csv") and
(not (filename.startswith("__MACOSX") or filename.startswith(".")))
):
logger.info_green(f"Unzip {filename} from {self._download_file}")
zip_ref.extractall(self._clean_folder, [filename])
unzip_file = os.path.join(self._clean_folder, filename)
self._new_file_list.append(unzip_file)
self._preprocess(unzipped_file=unzip_file)
break
else:
logger.warning(f"Not found downloaded trip data: {self._download_file}")
def _read_common_data(self):
"""read and full init data and existed stations"""
full_stations = None
with open(self._station_info_file, mode="r", encoding="utf-8") as station_file:
# read station to station file
raw_station_data = pd.DataFrame.from_dict(pd.read_json(station_file)["data"]["stations"])
station_data = raw_station_data.rename(columns={
"lon": "station_longitude",
"lat": "station_latitude",
"region_id": "region"})
# group by station to generate station init info
full_stations = station_data[
["station_id", "capacity", "station_longitude", "station_latitude"]
].reset_index(drop=True)
# generate station id by index
full_stations["station_id"] = pd.to_numeric(full_stations["station_id"], downcast="integer")
full_stations["capacity"] = pd.to_numeric(full_stations["capacity"], downcast="integer")
full_stations["station_longitude"] = pd.to_numeric(full_stations["station_longitude"], downcast="float")
full_stations["station_latitude"] = pd.to_numeric(full_stations["station_latitude"], downcast="float")
full_stations.drop(full_stations[full_stations["capacity"] == 0].index, axis=0, inplace=True)
full_stations.dropna(
subset=["station_id", "capacity", "station_longitude", "station_latitude"], inplace=True
)
self._common_data["full_stations"] = full_stations
self._common_data["full_station_num"] = len(self._common_data["full_stations"])
self._common_data["full_dock_num"] = self._common_data["full_stations"]["capacity"].sum()
def _read_src_file(self, file: str):
"""read and return processed rows"""
ret = []
if os.path.exists(file):
# For ignoring the unimportant issues in the source file.
with open(file, "r", encoding="utf-8", errors="ignore") as fp:
ret = pd.read_csv(fp)
ret = ret[[
"tripduration", "starttime", "start station id", "end station id", "start station latitude",
"start station longitude", "end station latitude", "end station longitude", "gender", "usertype",
"bikeid"
]]
ret["tripduration"] = pd.to_numeric(
pd.to_numeric(ret["tripduration"], downcast="integer") / 60, downcast="integer"
)
ret["starttime"] = pd.to_datetime(ret["starttime"])
ret["start station id"] = pd.to_numeric(ret["start station id"], errors="coerce", downcast="integer")
ret["end station id"] = pd.to_numeric(ret["end station id"], errors="coerce", downcast="integer")
ret["start station latitude"] = pd.to_numeric(ret["start station latitude"], downcast="float")
ret["start station longitude"] = pd.to_numeric(ret["start station longitude"], downcast="float")
ret["end station latitude"] = pd.to_numeric(ret["end station latitude"], downcast="float")
ret["end station longitude"] = | pd.to_numeric(ret["end station longitude"], downcast="float") | pandas.to_numeric |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 18 08:32:19 2021
revi take: plot time series of deal amount for SEI/P2015 clusters (5 or 10) on settelament level
and then on the right a map with corohpleths with mean/median value
for this, i need to prepare the muni shapefile with RC in it.
@author: shlomi
"""
from MA_paths import work_david
from shapely.geometry import *
nadlan_path = work_david / 'Nadlan_deals'
apts = ['דירה', 'דירה בבית קומות']
muni_path = work_david/'gis/muni_il'
dis_dict = {}
dis_dict['ירושלים'] = 1
dis_dict['הצפון'] = 2
dis_dict['חיפה'] = 3
dis_dict['המרכז'] = 4
dis_dict['תל אביב'] = 5
dis_dict['הדרום'] = 6
dis_dict['יו"ש'] = 7
dis_en = {1: 'Jerusalem', 2: 'North', 3: 'Haifa',
4: 'Center', 5: 'Tel-Aviv', 6: 'South',
7: 'J&S'}
P2015_2_dict = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3,
6: 4,
7: 4,
8: 5,
9: 5,
10: 5}
P2015_2_name = {1: 'Very Peripheral',
2: 'Peripheral',
3: 'In Between',
4: 'Centralized',
5: 'Very Centralized'}
SEI2_dict = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3,
6: 3,
7: 4,
8: 4,
9: 5,
10: 5}
def convert_df_variable_names(df, path=work_david, drop=True):
import pandas as pd
vlist = pd.read_csv(path/'nadlan_database_variable_list.csv', header=None)
vlist.columns = ['old', 'new', 'final']
vlist['final'] = vlist['final'].fillna(vlist['new'])
vlist.set_index('old', inplace=True)
di = vlist['final'].to_dict()
df = df.rename(di, axis=1)
df = df[[x for x in df.columns if 'to_drop' not in x]]
return df
def extract_JS_settelments_from_stat_areas(path=work_david, muni_path=muni_path):
from cbs_procedures import read_statistical_areas_gis_file
from cbs_procedures import read_bycode_city_data
import pandas as pd
import geopandas as gpd
st = read_statistical_areas_gis_file(path)
print('extrcting JS big settelments...')
# J&S city codes from nadlan database:
js_cc = [3780, 3616, 3730, 3797, 3760, 3570, 3769, 3640, 3720,
3778]
# js_st = st[st['city_code'].isin(js_cc)]
ccs = st[st['city_code'].isin(js_cc)]['city_code'].unique()
js = st[st['city_code'].isin(ccs)]
sers = []
for cc in ccs:
cols = js[js['city_code']==cc].loc[:, ['city_code', 'NameHe', 'NameEn']]
ser = gpd.GeoSeries(js[js['city_code']==cc]['geometry'].unary_union)
ser['city_code'] = cols['city_code'].unique()[0]
ser['NameHe'] = cols['NameHe'].unique()[0]
ser['NameEn'] = cols['NameEn'].unique()[0]
ser = ser.rename({0: 'geometry'})
sers.append(ser)
gdf = gpd.GeoDataFrame(sers, geometry='geometry')
geos_to_complete = [1, 2, 4, 8, 9]
city_codes_to_complete = [ccs[x] for x in geos_to_complete]
bycode = read_bycode_city_data(path)
names = [bycode.loc[x]['NameHe'] for x in city_codes_to_complete]
js = gpd.read_file(muni_path/'JS_plans.shp')
geos = [js[js['P_NAME']==x].geometry.unary_union for x in names]
sers = []
for i, geo in zip(geos_to_complete, geos):
ser = gpd.GeoSeries(geo)
ser['city_code'] = gdf.iloc[i]['city_code']
ser['NameHe'] = gdf.iloc[i]['NameHe']
ser['NameEn'] = gdf.iloc[i]['NameEn']
ser = ser.rename({0: 'geometry'})
sers.append(ser)
gdf = gdf.drop(geos_to_complete, axis=0)
gdf1 = gpd.GeoDataFrame(sers, geometry='geometry')
gdf = pd.concat([gdf, gdf1], axis=0)
gdf['district'] = 'יו"ש'
return gdf
def prepare_just_city_codes_gis_areas(path=work_david, muni_path=muni_path):
import geopandas as gpd
import pandas as pd
from cbs_procedures import read_statistical_areas_gis_file
js = extract_JS_settelments_from_stat_areas(path, muni_path)
js = js.drop('district', axis=1)
js_ccs = js['city_code'].unique()
st = read_statistical_areas_gis_file(path)
ccs = st['city_code'].unique()
sers = []
ccs = [x for x in ccs if x not in js_ccs]
for cc in ccs:
geo = st[st['city_code'] == cc]['geometry'].unary_union
geo = remove_third_dimension(geo)
ser = gpd.GeoSeries(geo)
ser['city_code'] = cc
ser['NameHe'] = st[st['city_code'] == cc]['NameHe'].unique()[0]
ser['NameEn'] = st[st['city_code'] == cc]['NameEn'].unique()[0]
ser = ser.rename({0: 'geometry'})
sers.append(ser)
gdf_cc = gpd.GeoDataFrame(sers, geometry='geometry')
gdf = pd.concat([gdf_cc, js], axis=0)
gdf = gdf.set_index('city_code')
filename = 'Municipal+J&S+city_code_level.shp'
gdf.to_file(muni_path/filename, encoding='cp1255', index=True, na_rep='None')
print('{} was saved to {}.'.format(filename, muni_path))
return gdf
def prepare_municiapal_level_and_RC_gis_areas(path=work_david, muni_path=muni_path):
import geopandas as gpd
import pandas as pd
js = extract_JS_settelments_from_stat_areas(path, muni_path)
muni = gpd.read_file(path/'gis/muni_il/muni_il.shp')
muni['city_code'] = pd.to_numeric(muni['CR_LAMAS'])
muni['Machoz'] = muni['Machoz'].str.replace('צפון', 'הצפון')
muni['Machoz'] = muni['Machoz'].str.replace('דרום', 'הדרום')
muni['Machoz'] = muni['Machoz'].str.replace('מרכז', 'המרכז')
muni_type_dict = {}
muni_type_dict['עירייה'] = 'City'
muni_type_dict['מועצה מקומית'] = 'LC'
muni_type_dict['מועצה אזורית'] = 'RC'
muni_type_dict['ללא שיפוט'] = 'NA'
muni_type_dict['מועצה מקומית תעשייתית'] = 'ILC'
muni['muni_type'] = muni['Sug_Muni'].map(muni_type_dict)
muni['rc_code'] = muni[muni['muni_type'] ==
'RC']['CR_PNIM'].str[2:4].astype(int)
print('aggragating polygons to city/rc level...')
rc = muni[muni['muni_type'] == 'RC']
non_rc = muni[muni['muni_type'] != 'RC']
sers = []
for nrc in rc['rc_code'].unique():
geo = rc[rc['rc_code'] == nrc]['geometry'].unary_union
geo = remove_third_dimension(geo)
ser = gpd.GeoSeries(geo)
ser['rc_code'] = nrc
ser['NameHe'] = rc[rc['rc_code'] == nrc]['Muni_Heb'].unique()[0]
ser['NameEn'] = rc[rc['rc_code'] == nrc]['Muni_Eng'].unique()[0]
ser['district'] = rc[rc['rc_code'] == nrc]['Machoz'].unique()[0]
ser = ser.rename({0: 'geometry'})
sers.append(ser)
gdf_rc = gpd.GeoDataFrame(sers, geometry='geometry')
sers = []
ccs = non_rc[~non_rc['city_code'].isnull()]['city_code'].unique()
for cc in ccs:
# print(cc)
geo = non_rc[non_rc['city_code'] == cc]['geometry'].unary_union
geo = remove_third_dimension(geo)
ser = gpd.GeoSeries(geo)
ser['city_code'] = cc
ser['NameHe'] = non_rc[non_rc['city_code'] == cc]['Muni_Heb'].unique()[
0]
ser['NameEn'] = non_rc[non_rc['city_code'] == cc]['Muni_Eng'].unique()[
0]
ser['district'] = non_rc[non_rc['city_code'] == cc]['Machoz'].unique()[
0]
ser = ser.rename({0: 'geometry'})
sers.append(ser)
gdf_nonrc = gpd.GeoDataFrame(sers, geometry='geometry')
gdf = pd.concat([gdf_rc, gdf_nonrc, js], axis=0)
gdf = gdf.reset_index(drop=True)
filename = 'Municipal+J&S+Regional.shp'
gdf.to_file(muni_path/filename, encoding='cp1255')
print('{} was saved to {}.'.format(filename, muni_path))
return gdf
def remove_third_dimension(geom):
if geom.is_empty:
return geom
if isinstance(geom, Polygon):
exterior = geom.exterior
new_exterior = remove_third_dimension(exterior)
interiors = geom.interiors
new_interiors = []
for int in interiors:
new_interiors.append(remove_third_dimension(int))
return Polygon(new_exterior, new_interiors)
elif isinstance(geom, LinearRing):
return LinearRing([xy[0:2] for xy in list(geom.coords)])
elif isinstance(geom, LineString):
return LineString([xy[0:2] for xy in list(geom.coords)])
elif isinstance(geom, Point):
return Point([xy[0:2] for xy in list(geom.coords)])
elif isinstance(geom, MultiPoint):
points = list(geom.geoms)
new_points = []
for point in points:
new_points.append(remove_third_dimension(point))
return MultiPoint(new_points)
elif isinstance(geom, MultiLineString):
lines = list(geom.geoms)
new_lines = []
for line in lines:
new_lines.append(remove_third_dimension(line))
return MultiLineString(new_lines)
elif isinstance(geom, MultiPolygon):
pols = list(geom.geoms)
new_pols = []
for pol in pols:
new_pols.append(remove_third_dimension(pol))
return MultiPolygon(new_pols)
elif isinstance(geom, GeometryCollection):
geoms = list(geom.geoms)
new_geoms = []
for geom in geoms:
new_geoms.append(remove_third_dimension(geom))
return GeometryCollection(new_geoms)
else:
raise RuntimeError("Currently this type of geometry is not supported: {}".format(type(geom)))
def create_israel_districts(path=muni_path):
import geopandas as gpd
import pandas as pd
from shapely.geometry import MultiPolygon, Polygon, LineString
from shapely.ops import cascaded_union
muni = gpd.read_file(path/'muni_il.shp')
muni['Machoz'] = muni['Machoz'].str.replace('צפון', 'הצפון')
muni['Machoz'] = muni['Machoz'].str.replace('דרום', 'הדרום')
muni['Machoz'] = muni['Machoz'].str.replace('מרכז', 'המרכז')
dists = muni['Machoz'].unique()
sers = []
for dis in dists:
print(dis)
# print(dis)
geo = muni[muni['Machoz'] == dis].geometry.unary_union
if isinstance(geo, MultiPolygon):
eps = 0.01
omega = cascaded_union([
Polygon(component.exterior).buffer(eps).buffer(-eps) for component in geo
])
geo = omega[0]
geo = remove_third_dimension(geo)
ser = gpd.GeoSeries(geo)
ser = ser.rename({0: 'geometry'})
# print(type(ser))
ser['district'] = dis
ser['district_EN'] = dis_en[dis_dict[dis]]
ser['district_code'] = dis_dict[dis]
bound = ser['geometry'].boundary
if not isinstance(bound, LineString):
ser['geometry'] = Polygon(bound[0])
# ser['geometry'] = ser['geometry'].simplify(0.1)
# ser.crs = muni.crs
sers.append(ser)
# now add J&S:
js = gpd.read_file(path/'J&S_matakim.geojson')
js = js.to_crs(2039)
js1 = gpd.GeoSeries(js.geometry.unary_union)
js1 = js1.rename({0: 'geometry'})
js1['district'] = 'יו"ש'
js1['district_EN'] = 'J&S'
js1['district_code'] = 7
js1 = gpd.GeoDataFrame([js1])
b = js1.geometry.boundary.values[0]
js1['geometry'] = Polygon(b[0])
js1.index = [6]
# sers.append(js1)
dgf = gpd.GeoDataFrame(sers, geometry='geometry', crs=muni.crs)
dgf = pd.concat([dgf, js1], axis=0)
dgf = dgf.rename(
{'district': 'NameHe', 'district_EN': 'NameEn', 'district_code': 'Code'}, axis=1)
dgf.geometry = dgf.geometry.simplify(10)
filename = 'Israel_districts_incl_J&S.shp'
dgf.to_file(path/filename)
print('{} was saved to {}.'.format(filename, path))
return dgf
def create_higher_group_category(df, existing_col='SEI_cluster', n_groups=2,
new_col='SEI2_cluster', names=None):
import pandas as pd
lower_group = sorted(df[existing_col].dropna().unique())
new_group = [lower_group[i:i+n_groups+1]
for i in range(0, len(lower_group), n_groups+1)]
new_dict = {}
if names is not None:
assert len(names) == len(new_group)
for i, item in enumerate(new_group):
if names is not None:
new_dict[names[i]] = new_group[i]
else:
new_dict[i+1] = new_group[i]
m = pd.Series(new_dict).explode().sort_values()
d = {x: y for (x, y) in zip(m.values, m.index)}
df[new_col] = df[existing_col].map(d)
return df
# def geolocate_nadlan_deals_within_city_or_rc(df, muni_path=muni_path,
# savepath=work_david):
# import geopandas as gpd
# import pandas as pd
# # run load_nadlan_combined_deal with return_XY and without add_geo_layers:
# gdf = gpd.read_file(muni_path/'Municipal+J&S+Regional.shp')
# print('geolocating nadlan deals within city or RC...')
# total = gdf.index.size
# keys = []
# for i, row in gdf.iterrows():
# print('index: {} / {}'.format(i, total))
# within = df.geometry.within(row['geometry'])
# if within.sum() == 0:
# print('no deals found in {}'.format(row['NameHe']))
# continue
# inds = df.loc[within].index
# dff = pd.DataFrame(df.loc[inds, 'KEYVALUE'])
# dff['muni_gdf_index'] = i
# keys.append(dff)
# filename = 'Muni_gdf_KEYVALUE_index.csv'
# dff = pd.concat(keys, axis=0)
# dff.to_csv(savepath/filename, na_rep='None')
# print('Done!')
# return dff
def load_nadlan_combined_deal(path=work_david, times=['1998Q1', '2021Q1'],
dealamount_iqr=2, return_XY=False, add_bgr=None,
add_geo_layers=False, add_mean_salaries=False,
rename_vars=True, agg_rooms_345=True):
import pandas as pd
from Migration_main import path_glob
import geopandas as gpd
from cbs_procedures import read_statistical_areas_gis_file
import numpy as np
from cbs_procedures import read_mean_salary
def add_bgr_func(grp, bgr, rooms='Total'):
import numpy as np
cc_as_str = str(grp['city_code'].unique()[0])
try:
gr = bgr.loc[cc_as_str][rooms]
except KeyError:
gr = np.nan
grp['Building_Growth_Rate'] = gr
return grp
def add_stat_area_func(grp, stat_gdf):
city_code11 = grp['city_stat_code'].unique()[0]
geo = stat_gdf[stat_gdf['city_stat11']==city_code11].geometry.item()
grp['stat_geo'] = [geo]*len(grp)
return grp
def add_district_area_func(grp, dis_df):
district_code = grp['district_code'].unique()[0]
geo = dis_df[dis_df['Code']==district_code].geometry.item()
grp['district_geo'] = [geo]*len(grp)
return grp
def add_mean_salary_func(grp, sal):
year = grp['year'].unique()[0]
salary = sal[sal['year']==year]['mean_salary'].item()
grp['mean_salary'] = [salary]*len(grp)
return grp
file = path_glob(
path, 'Nadlan_deals_neighborhood_combined_processed_*.csv')[0]
print(file)
dtypes = {'FULLADRESS': 'object', 'Street': 'object', 'FLOORNO': float,
'NEWPROJECTTEXT': bool, 'PROJECTNAME': 'object', 'DEALAMOUNT': float}
df = pd.read_csv(file, na_values='None', parse_dates=['DEALDATETIME'],
dtype=dtypes)
# filter nans:
# df = df[~df['district'].isnull()]
if times is not None:
print('Slicing to times {} to {}.'.format(*times))
# df = df[df['year'].isin(np.arange(years[0], years[1] + 1))]
df = df.set_index('DEALDATETIME')
df = df.loc[times[0]:times[1]]
df = df.reset_index()
if dealamount_iqr is not None:
print('Filtering DEALAMOUNT with IQR of {}.'.format(dealamount_iqr))
df = df[~df.groupby('year')['DEALAMOUNT'].apply(
is_outlier, method='iqr', k=dealamount_iqr)]
df = df.reset_index(drop=True)
# print('loading gdf muni index...')
df['P2015_cluster2'] = df['P2015_cluster'].map(P2015_2_dict)
if return_XY:
inds = df[df['X'] == 0].index
df.loc[inds, 'X'] = np.nan
inds = df[df['Y'] == 0].index
df.loc[inds, 'Y'] = np.nan
df = gpd.GeoDataFrame(
df, geometry=gpd.points_from_xy(df['X'], df['Y']))
if add_mean_salaries:
print('adding mean salaries.')
sal = read_mean_salary()
df = df.groupby('year').apply(add_mean_salary_func, sal)
df['MSAL_per_ASSET'] = (df['DEALAMOUNT'] / df['mean_salary']).round()
if add_bgr is not None:
print('Adding Building Growth rate.')
file = path_glob(path, 'Building_*_growth_rate_*.csv')[0]
bgr = pd.read_csv(file, na_values='None', index_col='ID')
df = df.groupby('city_code').apply(add_bgr_func, bgr, rooms=add_bgr)
df.loc[df['Building_Growth_Rate'] == 0] = np.nan
if add_geo_layers:
print('adding statistical area geometry')
stat_gdf = read_statistical_areas_gis_file(path)
df = df.groupby('city_stat_code').apply(add_stat_area_func, stat_gdf)
print('adding district area geometry')
dis_df = gpd.read_file(path/'gis/muni_il/Israel_districts_incl_J&S.shp')
df['district_code'] = df['district'].map(dis_dict)
df = df.groupby('district_code').apply(add_district_area_func, dis_df)
if agg_rooms_345:
inds = df.loc[(df['ASSETROOMNUM']>=3) & (df['ASSETROOMNUM']<4)].index
df.loc[inds, 'Rooms_345'] = 3
inds = df.loc[(df['ASSETROOMNUM']>=4) & (df['ASSETROOMNUM']<5)].index
df.loc[inds, 'Rooms_345'] = 4
inds = df.loc[(df['ASSETROOMNUM']>=5) & (df['ASSETROOMNUM']<6)].index
df.loc[inds, 'Rooms_345'] = 5
df['Rooms_345'] = df['Rooms_345'].astype(pd.Int64Dtype()).astype('category')
if rename_vars:
print('renaming vars.')
var_names = pd.read_excel(path/'nadlan_database_variable_list.xls', header=None)
var_di = dict(zip(var_names[0], var_names[1]))
df = df.rename(var_di, axis=1)
return df
def load_nadlan_deals(path=work_david, csv=True,
times=['1998Q1', '2021Q1'], dealamount_iqr=2,
fix_new_status=True, add_SEI2_cluster=True,
add_peripheri_data=True, add_bycode_data=True
):
import pandas as pd
import numpy as np
from Migration_main import path_glob
from cbs_procedures import read_periphery_index
from cbs_procedures import read_bycode_city_data
if csv:
file = path_glob(path, 'Nadlan_deals_processed_*.csv')
dtypes = {'FULLADRESS': 'object', 'Street': 'object', 'FLOORNO': 'object',
'NEWPROJECTTEXT': 'object', 'PROJECTNAME': 'object', 'DEALAMOUNT': float}
df = pd.read_csv(file[0], na_values='None', parse_dates=['DEALDATETIME'],
dtype=dtypes)
else:
file = path_glob(path, 'Nadlan_deals_processed_*.hdf')
df = pd.read_hdf(file)
df['year'] = df['DEALDATETIME'].dt.year
df['month'] = df['DEALDATETIME'].dt.month
df['quarter'] = df['DEALDATETIME'].dt.quarter
df['YQ'] = df['year'].astype(str) + 'Q' + df['quarter'].astype(str)
if times is not None:
print('Slicing to times {} to {}.'.format(*times))
# df = df[df['year'].isin(np.arange(years[0], years[1] + 1))]
df = df.set_index('DEALDATETIME')
df = df.loc[times[0]:times[1]]
df = df.reset_index()
if dealamount_iqr is not None:
print('Filtering DEALAMOUNT with IQR of {}.'.format(dealamount_iqr))
df = df[~df.groupby('year')['DEALAMOUNT'].apply(
is_outlier, method='iqr', k=dealamount_iqr)]
if fix_new_status:
inds = df.loc[(df['Age'] < 0) & (df['Age'] > -5)].index
df.loc[inds, 'New'] = True
df['NEWPROJECTTEXT'] = pd.to_numeric(df['NEWPROJECTTEXT']).fillna(0)
df['NEWPROJECTTEXT'] = df['NEWPROJECTTEXT'].astype(bool)
if add_SEI2_cluster:
SEI_cluster = [x+1 for x in range(10)]
new = [SEI_cluster[i:i+2] for i in range(0, len(SEI_cluster), 2)]
SEI2 = {}
for i, item in enumerate(new):
SEI2[i+1] = new[i]
m = pd.Series(SEI2).explode().sort_values()
d = {x: y for (x, y) in zip(m.values, m.index)}
df['SEI2_cluster'] = df['SEI_cluster'].map(d)
if add_peripheri_data:
pdf = read_periphery_index()
cols = ['TLV_proximity_value', 'TLV_proximity_rank', 'PAI_value',
'PAI_rank', 'P2015_value', 'P2015_rank', 'P2015_cluster']
dicts = [pdf[x].to_dict() for x in cols]
series = [df['city_code'].map(x) for x in dicts]
pdf1 = pd.concat(series, axis=1)
pdf1.columns = cols
df = pd.concat([df, pdf1], axis=1)
if add_bycode_data:
bdf = read_bycode_city_data()
cols = ['district', 'district_EN', 'region', 'natural_area']
dicts = [bdf[x].to_dict() for x in cols]
series = [df['city_code'].map(x) for x in dicts]
bdf1 = pd.concat(series, axis=1)
bdf1.columns = cols
df = pd.concat([df, bdf1], axis=1)
return df
def prepare_periphery_sei_index_map(path=work_david, muni_path=muni_path):
from cbs_procedures import read_periphery_index
from cbs_procedures import read_social_economic_index
import geopandas as gpd
df = read_periphery_index(path)
sei = read_social_economic_index(path, return_stat=False)
muni_gdf = gpd.read_file(muni_path/'Municipal+J&S+Regional.shp')
# first put RC P2015 cluster, rank and value:
rc = df[df['Type'] == 'RC']
sei_rc = sei[sei['Type'] == 'RC']
for rc_n in rc['municipal_status'].unique():
ind = muni_gdf[muni_gdf['rc_code'] == rc_n].index
muni_gdf.loc[ind, 'P2015_value'] = rc[rc['municipal_status']
== rc_n]['P2015_value'].mean()
muni_gdf.loc[ind, 'P2015_cluster'] = rc[rc['municipal_status']
== rc_n]['RC_P2015_cluster'].unique()[0]
muni_gdf.loc[ind, 'P2015_cluster2'] = rc[rc['municipal_status']
== rc_n]['RC_P2_cluster'].unique()[0]
muni_gdf.loc[ind, 'SEI_value'] = sei_rc[sei_rc['muni_state']
== rc_n]['index2017'].mean()
muni_gdf.loc[ind, 'SEI_cluster'] = sei_rc[sei_rc['muni_state']
== rc_n]['RC_cluster2017'].unique()[0]
muni_gdf.loc[ind, 'SEI2_cluster'] = sei_rc[sei_rc['muni_state']
== rc_n]['RC_SEI2_cluster'].unique()[0]
city = df[df['Type'] == 'City/LC']
sei_city = sei[sei['Type'] == 'City/LC']
for cc in city.index:
ind = muni_gdf[muni_gdf['city_code'] == cc].index
muni_gdf.loc[ind, 'P2015_value'] = city.loc[cc, 'P2015_value']
muni_gdf.loc[ind, 'P2015_cluster'] = city.loc[cc, 'P2015_cluster']
muni_gdf.loc[ind, 'P2015_cluster2'] = city.loc[cc, 'P2_cluster']
muni_gdf.loc[ind, 'SEI_value'] = sei_city.loc[cc, 'index2017']
muni_gdf.loc[ind, 'SEI_cluster'] = sei_city.loc[cc, 'cluster2017']
muni_gdf.loc[ind, 'SEI2_cluster'] = sei_city.loc[cc, 'SEI2_cluster']
return muni_gdf
def plot_mean_salary_per_asset(df, year=2000, rooms=[3, 4]):
import geopandas as gpd
import seaborn as sns
from pysal.viz.splot.mapping import vba_choropleth
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.5)
df = df[df['DEALNATUREDESCRIPTION'].isin(apts)]
print('picked {} only.'.format(apts))
df = df[df['ASSETROOMNUM'].isin(rooms)]
print('picked {} rooms only.'.format(rooms))
gdf = gpd.GeoDataFrame(df, geometry='district_geo')
print('picked year {}.'.format(year))
gdf = gdf[gdf['year'] == year]
# return gdf
fig, axs = plt.subplots(1, 2, figsize=(15, 10))
gdf['district_counts'] = gdf.groupby('district')['DEALAMOUNT'].transform('count')
x = gdf['MSAL_per_ASSET'].values
y = gdf['district_counts'].values
gdf.plot(ax=axs[0],
column="MSAL_per_ASSET",
legend=True,
scheme='quantiles',
cmap='Blues')
# vba_choropleth(x, y, gdf, rgb_mapclassify=dict(classifier='quantiles'),
# alpha_mapclassify=dict(classifier='quantiles'),
# cmap='RdBu', ax=axs[1])
fig.tight_layout()
return fig
def calculate_pct_change_by_yearly_periods_and_grps(df,
period1=[1999, 2007],
period2=[2017, 2019],
col='DEALAMOUNT',
agg='median',
grp='gdf_muni_index',
min_p1_deals=50):
print('calculating pct change for {} col using {} grouping and {} statistic.'.format(col, grp, agg))
print('periods are: {}-{} compared to {}-{}'.format(period2[0], period2[1], period1[0], period1[1]))
df1 = df.loc[(df['year']>=period1[0]) & (df['year']<=period1[1])]
df2 = df.loc[(df['year']>=period2[0]) & (df['year']<=period2[1])]
df1_agg = df1.groupby(grp).agg(agg)
df1_cnt = df1.groupby(grp)[col].agg('count')
df2_agg = df2.groupby(grp).agg(agg)
df2_cnt = df2.groupby(grp)[col].agg('count')
df_col = df2_agg[col] - df1_agg[col]
df_col /= df1_agg[col]
df_col *= 100
df_col = df_col.round()
df_col = df_col.to_frame('pct_change')
df_col['period1_cnt'] = df1_cnt
df_col['period2_cnt'] = df2_cnt
if min_p1_deals is not None:
print('filtering minimum deals of {} for {}-{} period.'.format(min_p1_deals, period1[0], period1[1]))
df_col = df_col[df_col['period1_cnt']>=min_p1_deals]
return df_col
def plot_choropleth_muni_level(df, rooms=[3, 4], muni_path=muni_path,
hue='SEI2_cluster',
col='Price_per_m2'):
# import geopandas as gpd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib.dates as mdates
import contextily as ctx
sns.set_theme(style='ticks', font_scale=1.5)
cmap = sns.dark_palette((260, 75, 60), input="husl", n_colors=5, as_cmap=True)
cmap = sns.cubehelix_palette(5, gamma = 1, as_cmap=True)
cmap = sns.cubehelix_palette(5, start = .5, rot = -.75, as_cmap=True)
# cmap = sns.color_palette("Greens", 5, as_cmap=True)
# colors = sns.color_palette("RdPu", 10)[5:]
# df = df[df['DEALNATUREDESCRIPTION'].isin(apts)]
df = df[df['Type_of_asset'].isin(apts)]
df = df.loc[(df['Sale_year'] >= 1999) & (df['Sale_year'] <= 2019)]
print('picked {} only.'.format(apts))
if rooms is not None:
df = df[(df['Rooms'] >= rooms[0]) &
(df['Rooms'] <= rooms[1])]
# df = df[df['ASSETROOMNUM'].isin(rooms)]
print('picked {} rooms only.'.format(rooms))
if col == 'Price_per_m2':
ylabel = r'Median price per M$^2$ [NIS]'
if hue == 'SEI2_cluster':
leg_title = 'Social-Economic cluster'
if hue not in df.columns:
df['SEI2_cluster'] = df['SEI_cluster_2017'].map(SEI2_dict)
elif hue == 'P2015_cluster2':
leg_title = 'Periphery cluster'
if hue not in df.columns:
df['P2015_cluster2'] = df['Periph_cluster'].map(P2015_2_dict)
fig, ax = plt.subplots(
1, 2, gridspec_kw={'width_ratios': [4, 1]}, figsize=(20, 10))
# df['P2015_cluster2'] = df['P2015_cluster2'].map(P2015_2_name)
# df = df.rename({'P2015_cluster2': 'Centrality level'}, axis=1)
df['Sale_year'] = pd.to_datetime(df['Sale_year'], format='%Y')
sns.lineplot(data=df, x='Sale_year', y=col, hue=hue, n_boot=100,
palette=cmap, estimator="mean", ax=ax[0], ci=99,
style=hue, lw=2, seed=1)
ax[0].grid(True)
ax[0].set_ylabel(ylabel)
gdf = prepare_periphery_sei_index_map(work_david, muni_path)
gdf.crs = 2039
gdf = gdf.to_crs(3857)
gdf.plot(column=hue, categorical=True, legend=False,
cmap=cmap, ax=ax[1], edgecolor='k', linewidth=0.25, alpha=0.9)
handles, labels = ax[0].get_legend_handles_labels()
labels = [int(float(x)) for x in labels]
labels = ['{:d}'.format(x) for x in labels]
ax[0].legend(handles=handles, labels=labels, title=leg_title)
# leg.set_bbox_to_anchor((0.0, 1.0, 0.0, 0.0))
# ax[1].set_axis_off()
ax[0].set_xlabel('')
ax[1].tick_params(left=False, labelleft=False, bottom=False, labelbottom=False)
ax[0].xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
ax[0].xaxis.set_major_locator(mdates.YearLocator(2))
ax[0].tick_params(axis='x', rotation=30)
fig.tight_layout()
fig.subplots_adjust(top=0.942,
bottom=0.089,
left=0.081,
right=0.987,
hspace=0.02,
wspace=0.0)
ctx.add_basemap(ax[1], url=ctx.providers.Stamen.TerrainBackground)
# for axis in ['top','bottom','left','right']:
# ax[1].spines[axis].set_linewidth(0.5)
return fig
# def plot_choropleth_muni_level(df, rooms=[3, 4], muni_path=muni_path,
# muni_type='gdf_muni_index', min_p1=50,
# agg='median', col='NIS_per_M2'):
# import geopandas as gpd
# import seaborn as sns
# import matplotlib.pyplot as plt
# import numpy as np
# # def add_muni_geo_func(grp, muni_gdf):
# # inds = grp['gdf_muni_index'].unique()[0]
# # geo = muni_gdf.loc[inds].geometry
# # grp['muni_geo'] = [geo]*len(grp)
# # return grp
# sns.set_theme(style='ticks', font_scale=1.5)
# df = df[df['DEALNATUREDESCRIPTION'].isin(apts)]
# df = df.loc[(df['year']>=1999) & (df['year']<=2019)]
# print('picked {} only.'.format(apts))
# if rooms is not None:
# df = df[(df['ASSETROOMNUM']>=rooms[0]) & (df['ASSETROOMNUM']<=rooms[1])]
# # df = df[df['ASSETROOMNUM'].isin(rooms)]
# print('picked {} rooms only.'.format(rooms))
# df_pct = calculate_pct_change_by_yearly_periods_and_grps(df,
# grp=muni_type,
# min_p1_deals=min_p1,
# col=col,
# agg=agg)
# fig, ax = plt.subplots(1, 2, gridspec_kw={'width_ratios': [4, 1]}, figsize=(20, 10))
# df['P2015_cluster2'] = df['P2015_cluster2'].map(P2015_2_name)
# df = df.rename({'P2015_cluster2': 'Centrality level'}, axis=1)
# sns.lineplot(data=df, x='year', y=col, hue='Centrality level', n_boot=100,
# palette='Set1',estimator=np.median, ax=ax[0],
# hue_order=[x for x in reversed(P2015_2_name.values())])
# ax[0].grid(True)
# if muni_type=='gdf_muni_index':
# muni_gdf = gpd.read_file(muni_path/'Municipal+J&S+Regional.shp')
# elif muni_type=='city_code':
# muni_gdf = gpd.read_file(muni_path/'Municipal+J&S+city_code_level.shp')
# muni_gdf = muni_gdf.set_index('city_code')
# # df = df.groupby('gdf_muni_index').apply(add_muni_geo_func, muni_gdf)
# # gdf = gpd.GeoDataFrame(df, geometry='muni_geo')
# # inds = muni_gdf[muni_gdf.index.isin(df.index)].index
# df_pct.loc[:, 'geometry'] = muni_gdf.loc[df_pct.index]['geometry']
# gdf = gpd.GeoDataFrame(df_pct, geometry='geometry')
# gdf[gdf['pct_change'] >= 0].plot('pct_change', legend=True, scheme="User_Defined",
# k=5, cmap='viridis', classification_kwds=dict(bins=[50, 100, 150, 200, 250]),
# ax=ax[1])
# return gdf
def add_city_polygons_to_nadlan_df(df, muni_path=muni_path):
import geopandas as gpd
muni = load_muni_il(path=muni_path)
ccs = list(set(df['city_code']).intersection(set(muni['CR_LAMAS'])))
muni = muni[muni['CR_LAMAS'].isin(ccs)]
muni = muni.reset_index(drop=True)
df = df[df['city_code'].isin(ccs)]
df = df.drop('geometry', axis=1)
df = df.reset_index(drop=True)
# TODO: fix this:
for i, row in muni.iterrows():
cc = row['CR_LAMAS']
geo = row['geometry']
inds = df[df['city_code']==cc].index
df.loc[inds, 'geometry'] = [geo]*len(inds)
df = gpd.GeoDataFrame(df,geometry='geometry')
return df
def is_outlier(s, k=3, method='std'):
# add IQR
if method == 'std':
lower_limit = s.mean() - (s.std() * k)
upper_limit = s.mean() + (s.std() * k)
elif method == 'iqr':
q1 = s.quantile(0.25)
q3 = s.quantile(0.75)
iqr = q3-q1 # Interquartile range
lower_limit = q1 - k * iqr
upper_limit = q3 + k * iqr
return ~s.between(lower_limit, upper_limit)
# df1 = df[~df.groupby('year')['DEALAMOUNT'].apply(is_outlier)]
def keep_only_historic_changed_assets(df):
df = df.reset_index(drop=True)
grps = df.groupby('GUSH').groups
inds = []
for gush, ind in grps.items():
if len(ind) > 1:
inds.append([x for x in ind])
flat_list = [item for sublist in inds for item in sublist]
df_slice = df.loc[flat_list]
return df_slice
def create_neighborhood_polygons(gdf):
""" an attempt to muild neighborhoods polygons from asset points"""
import numpy as np
gdf = gdf.reset_index()
neis = gdf['Neighborhood'].unique()
gdf['neighborhood_shape'] = gdf.geometry
# Must be a geodataframe:
for nei in neis:
gdf1 = gdf[gdf['Neighborhood'] == nei]
inds = gdf1.index
polygon = gdf1.geometry.unary_union.convex_hull
# gdf.loc[inds, 'neighborhood_shape'] = [polygon for x in range(len(inds))]
gdf.loc[inds, 'neighborhood_shape'] = polygon
return gdf
def convert_da_to_long_form_df(da, var_name=None, value_name=None):
""" convert xarray dataarray to long form pandas df
to use with seaborn"""
import xarray as xr
if var_name is None:
var_name = 'var'
if value_name is None:
value_name = 'value'
dims = [x for x in da.dims]
if isinstance(da, xr.Dataset):
value_vars = [x for x in da]
elif isinstance(da, xr.DataArray):
value_vars = [da.name]
df = da.to_dataframe()
for i, dim in enumerate(da.dims):
df[dim] = df.index.get_level_values(i)
df = df.melt(value_vars=value_vars, value_name=value_name,
id_vars=dims, var_name=var_name)
return df
def plot_jointplot(df, x='mean_distance_to_28_mokdim', y='Periph_value',
xlim=[30, 130], ylim=[None, None], xlabel='Distance to Employment Centers [km]', ylabel='Periphery Index'):
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
corr = df[[x,y]].corr('spearman')[y][0]
n = len(df[[x,y]].dropna())
# print(corr)
g = sns.JointGrid(data=df, x=x,
y=y, height=7.5)
g.plot_joint(sns.kdeplot, fill=True, gridsize=100)
# g.plot_joint(sns.histplot, fill=True)
# g.plot_joint(sns.kdeplot, zorder=-1, levels=6)
g.plot_marginals(sns.histplot)
g.ax_joint.set_xlim(*xlim)
g.ax_joint.set_ylim(*ylim)
g.ax_joint.grid(True)
if xlabel is not None:
g.ax_joint.set_xlabel(xlabel)
if ylabel is not None:
g.ax_joint.set_ylabel(ylabel)
textstr = 'Spearman correlation: {:.2f}, n={}'.format(corr, n)
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5, edgecolor='k')
print(textstr)
# g.ax_joint.text(0.24, 0.9, textstr,
# verticalalignment='top', horizontalalignment='center',
# transform=g.ax_joint.transAxes, color='k', fontsize=18, bbox=props)
g.fig.tight_layout()
return g
def calculate_recurrent_times_and_pct_change(df, plot=True):
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
df = df.sort_values('DEALDATETIME')
dff = df.groupby('GUSH')['DEALAMOUNT'].count()
dff2 = df[df['GUSH'].isin(dff[dff > 1].index)]
seconds_between_deals = dff2.groupby(
'GUSH')['DEALDATETIME'].diff().dt.total_seconds()
deals_pct_change = dff2.groupby('GUSH')['DEALAMOUNT'].pct_change()
df['years_between_deals'] = seconds_between_deals / 60 / 60 / 24 / 365.25
df['mean_years_between_deals'] = df.groupby(
'GUSH')['years_between_deals'].transform('mean')
df['deals_pct_change'] = deals_pct_change * 100
df['mean_deals_pct_change'] = df.groupby(
'GUSH')['deals_pct_change'].transform('mean')
# drop duplicated dt's:
deals_inds_to_drop = deals_pct_change[deals_pct_change == 0].index
seconds_inds_to_drop = seconds_between_deals[seconds_between_deals == 0].index
inds_to_drop = deals_inds_to_drop.union(seconds_inds_to_drop)
df = df.drop(inds_to_drop, axis=0)
print('Dropped {} deals'.format(len(inds_to_drop)))
if plot:
g = sns.JointGrid(data=df, x='years_between_deals',
y='deals_pct_change', height=7.5)
g.plot_joint(sns.kdeplot, fill=True, cut=1, gridsize=100)
g.plot_marginals(sns.histplot)
g.ax_joint.grid(True)
g.ax_joint.set_xlim(-1, 21)
g.ax_joint.set_ylim(-100, 260)
g.ax_joint.set_ylabel('Change in recurrent deals [%]')
g.ax_joint.set_xlabel('Years between recurrent deals')
g.fig.tight_layout()
return g
else:
return df
def plot_recurrent_deals(df, max_number_of_sells=6, rooms=[2, 3, 4, 5]):
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
sns.set_theme(style='ticks', font_scale=1.5)
if rooms is not None:
df = df[df['ASSETROOMNUM'].isin(rooms)]
df = df.rename({'ASSETROOMNUM': 'Number of rooms'}, axis=1)
df['Number of rooms'] = df['Number of rooms'].astype(int)
df = df[df['DEALNATUREDESCRIPTION'].isin(apts)]
dff = df.groupby(['GUSH', 'Number of rooms'])['DEALAMOUNT'].count()
dff = dff[dff <= 6]
df1 = dff.groupby('Number of rooms').value_counts()
f, ax = plt.subplots(figsize=(7, 7))
sns.lineplot(x='DEALAMOUNT', y=df1, data=df1, hue='Number of rooms',
style='Number of rooms', palette='Set1', ax=ax, markers=True,
markersize=15)
ax.set(xscale="linear", yscale="log")
ax.grid(True)
ax.set_xlabel('Number of times an apartment is sold')
ax.set_ylabel('Total deals')
else:
df1 = df[df['DEALNATUREDESCRIPTION'].isin(apts)].groupby('GUSH')[
'DEALAMOUNT'].count()
v = np.arange(1, max_number_of_sells + 1)
n = [len(df1[df1 == x]) for x in v]
dfn = pd.DataFrame(n, index=v)
dfn.columns = ['Number of Deals']
f, ax = plt.subplots(figsize=(7, 7))
ax = sns.scatterplot(x=dfn.index, y='Number of Deals',
data=dfn, ax=ax, s=50)
p = np.polyfit(v, np.log(n), 1)
fit = np.exp(np.polyval(p, v))
print(fit)
dfn['Fit'] = fit
ax = sns.lineplot(x=dfn.index, y='Fit', data=dfn, ax=ax, color='r')
ax.set(xscale="linear", yscale="log")
ax.grid(True)
ax.set_xlabel('Number of times an apartment is sold')
return f
def plot_deal_amount_room_number(df, rooms=[2, 3, 4, 5],
path=nadlan_path, yrmin='2000', yrmax='2020',
just_with_historic_change=False):
import seaborn as sns
import matplotlib.pyplot as plt
from cbs_procedures import read_bycode_city_data
import numpy as np
sns.set_theme(style='ticks', font_scale=1.5)
# df = df.loc[(df['ASSETROOMNUM'] >= room_min) &
# (df['ASSETROOMNUM'] <= room_max)]
df = df[df['ASSETROOMNUM'].isin(rooms)]
df.set_index('DEALDATETIME', inplace=True)
df = df.loc[yrmin:yrmax]
city_code = df.loc[:, 'city_code'].unique()[0]
df = df.rename({'ASSETROOMNUM': 'Rooms', 'DEALAMOUNT': 'Price'}, axis=1)
df['Price'] /= 1000000
fig, ax = plt.subplots(figsize=(10, 8))
ax = sns.lineplot(data=df, x=df.index.year, y='Price', hue='Rooms',
ci='sd', ax=ax, palette='Set1')
ax.grid(True)
ax.set_xlabel('')
ax.set_xticks(np.arange(int(yrmin), int(yrmax) + 1, 2))
ax.tick_params(axis='x', rotation=30)
ax.set_ylabel('Price [millions of NIS]')
bycode = read_bycode_city_data()
city_name = bycode[bycode['city_code'] == city_code]['NameEn'].values[0]
fig.suptitle('Real-Estate prices in {}'.format(city_name))
return ax
def plot_groupby_m2_price_time_series(df, grps=['City', 'Neighborhood'],
col='NIS_per_M2'):
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
groups = grps.copy()
groups.append('year')
dfn = df.groupby(groups, as_index=False)[col].mean().groupby('year').mean()
std = df.groupby(groups, as_index=False)[
col].mean().groupby('year').std()[col].values
dfn['plus_std'] = dfn[col] + std
dfn['minus_std'] = dfn[col] - std
fig, ax = plt.subplots(figsize=(14, 5))
dfn[col].plot(ax=ax)
ax.fill_between(dfn.index, dfn['minus_std'], dfn['plus_std'], alpha=0.3)
ax.set_ylabel(r'NIS per M$^2$')
fig.tight_layout()
ax.grid(True)
return fig
def plot_room_number_deals(df, rooms_range=[2, 6]):
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
if rooms_range is not None:
df = df.loc[(df['ASSETROOMNUM'] >= rooms_range[0]) &
(df['ASSETROOMNUM'] <= rooms_range[1])]
dff = df.groupby(['ASSETROOMNUM', 'year'])['DEALAMOUNT'].count()
da = dff.to_xarray()
dff = convert_da_to_long_form_df(da, value_name='Deals')
ax = sns.barplot(data=dff, x='year', y='Deals',
hue='ASSETROOMNUM', palette='Set1')
ax.grid(True)
return dff
def plot_price_per_m2(df, x='Sale_year', y='Price_per_m2',
n_boot=100, filter_price_iqr=2, remove_P2_cluster=1):
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
sns.set_theme(style='ticks', font_scale=1.5)
df = df[df['Type_of_asset'].isin(apts)]
if filter_price_iqr is not None:
df = df[~df.groupby('Sale_year')['Price_per_m2'].apply(
is_outlier, method='iqr', k=filter_price_iqr)]
# rename, remove, etc:
if remove_P2_cluster is not None:
inds = df[df['P2015_cluster2']==remove_P2_cluster].index
df.loc[inds, 'P2015_cluster2'] = np.nan
hue_order = [x for x in reversed(P2015_2_name.values())][:-1]
else:
hue_order = [x for x in reversed(P2015_2_name.values())]
df['P2015_cluster2'] = df['P2015_cluster2'].map(P2015_2_name)
df['Rooms_345'] = df['Rooms_345'].astype(pd.Int64Dtype()).astype('category')
df = df.rename({'Rooms_345': 'Number of Rooms', 'P2015_cluster2': 'Periphery cluster'}, axis=1)
fig, ax = plt.subplots(figsize=(15, 7))
sns.lineplot(data=df, x=x, y=y, hue='Periphery cluster', style='Number of Rooms',
ci=95, n_boot=n_boot, palette='viridis', ax=ax,
hue_order=hue_order)
ax.grid(True)
ax.set_xlabel('')
ax.set_ylabel(r'Median apartment price per m$^2$')
fig.tight_layout()
return
def plot_price_per_m2_hist(df, years=[2009, 2020], filter_price_iqr=2,
P2_cluster=5):
import seaborn as sns
# import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.5)
df = df[df['Type_of_asset'].isin(apts)]
if filter_price_iqr is not None:
df = df[~df.groupby('Sale_year')['Price_per_m2'].apply(
is_outlier, method='iqr', k=filter_price_iqr)]
df1 = df[df['P2015_cluster2']==P2_cluster]
df1 = df1.rename({'Rooms_345': 'Number of Rooms', 'P2015_cluster2': 'Periphery cluster'}, axis=1)
df1 = df1.loc[(df['Sale_year']>=years[0])&(df['Sale_year']<=years[-1])]
fg = sns.displot(data=df1, x='Price_per_m2', hue='Number of Rooms',col='Sale_year',
col_wrap=3, kde=True)
# ax.grid(True)
# ax.set_xlabel('')
# ax.set_ylabel(r'Median apartment price per m$^2$')
# fg.fig.tight_layout()
fg.set_titles('Sale Year = {col_name}')
fg.set_xlabels(r'Price per m$^2$')
fg.fig.subplots_adjust(top=0.938,
bottom=0.077,
left=0.057,
right=0.845,
hspace=0.211,
wspace=0.073)
return fg
def compare_kiryat_gat_israel_dealamount(df_kg, df_isr):
# TODO: complete this
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
df_kg = df_kg.groupby(['rooms', 'YQ'])[
'DEALAMOUNT'].mean().to_frame().unstack().T.droplevel(0)
df_isr = df_isr.groupby(['rooms', 'YQ'])[
'DEALAMOUNT'].mean().to_frame().unstack().T.droplevel(0)
# df_kg['YQ'] = df_kg.index
# df_isr['YQ'] = df_isr.index
# df_kg = df_kg.melt(id_vars='YQ', value_name='price_in_kg')
# df_isr = df_isr.melt(id_vars='YQ', value_name='price_in_israel')
# df = pd.concat([df_kg, df_isr], axis=1)
df = df_kg / df_isr
# df['price_diff'] = df['price_in_kg'] - df['price_in_israel']
fig, ax = plt.subplots(figsize=(15.5, 6))
df1 = df # / 1e6
df1.index = pd.to_datetime(df1.index)
df1.plot(ax=ax, cmap=sns.color_palette("tab10", as_cmap=True))
ax.set_ylabel('Price difference [million NIS]')
df2 = df1.rolling(4, center=True).mean()
df2.columns = ['{} rolling mean'.format(x) for x in df2.columns]
df2.plot(ax=ax, cmap=sns.color_palette("tab10", as_cmap=True), ls='--')
ax.axvline(pd.to_datetime('2008-07-01'), color='g')
ax.axvline( | pd.to_datetime('2006-01-01') | pandas.to_datetime |
import os,glob
import pandas as pd
path = "C:/Users/jeche/Desktop/python/ML/Project_/spotifyML/"
files = glob.glob(os.path.join(path, '*.csv'))
df_from_each_file= (pd.read_csv(f, sep=',') for f in files)
df_merged = | pd.concat(df_from_each_file, ignore_index=True) | pandas.concat |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with | ensure_clean_store(setup_path) | pandas.tests.io.pytables.common.ensure_clean_store |
import datetime
from collections import OrderedDict
import warnings
import numpy as np
from numpy import array, nan
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from conftest import assert_frame_equal, assert_series_equal
from pvlib import irradiance
from conftest import requires_ephem, requires_numba
# fixtures create realistic test input data
# test input data generated at Location(32.2, -111, 'US/Arizona', 700)
# test input data is hard coded to avoid dependencies on other parts of pvlib
@pytest.fixture
def times():
# must include night values
return pd.date_range(start='20140624', freq='6H', periods=4,
tz='US/Arizona')
@pytest.fixture
def irrad_data(times):
return pd.DataFrame(np.array(
[[ 0. , 0. , 0. ],
[ 79.73860422, 316.1949056 , 40.46149818],
[1042.48031487, 939.95469881, 118.45831879],
[ 257.20751138, 646.22886049, 62.03376265]]),
columns=['ghi', 'dni', 'dhi'], index=times)
@pytest.fixture
def ephem_data(times):
return pd.DataFrame(np.array(
[[124.0390863 , 124.0390863 , -34.0390863 , -34.0390863 ,
352.69550699, -2.36677158],
[ 82.85457044, 82.97705621, 7.14542956, 7.02294379,
66.71410338, -2.42072165],
[ 10.56413562, 10.56725766, 79.43586438, 79.43274234,
144.76567754, -2.47457321],
[ 72.41687122, 72.46903556, 17.58312878, 17.53096444,
287.04104128, -2.52831909]]),
columns=['apparent_zenith', 'zenith', 'apparent_elevation',
'elevation', 'azimuth', 'equation_of_time'],
index=times)
@pytest.fixture
def dni_et(times):
return np.array(
[1321.1655834833093, 1321.1655834833093, 1321.1655834833093,
1321.1655834833093])
@pytest.fixture
def relative_airmass(times):
return pd.Series([np.nan, 7.58831596, 1.01688136, 3.27930443], times)
# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203
@pytest.mark.parametrize('testval, expected', [
(doy, value),
(np.float64(doy), value),
(dt_date, value),
(dt_datetime, value),
(dt_np64, value),
(np.array([doy]), np.array([value])),
(pd.Series([doy]), np.array([value])),
(dt_index, pd.Series([value], index=dt_index)),
(timestamp, value)
])
@pytest.mark.parametrize('method', [
'asce', 'spencer', 'nrel', pytest.param('pyephem', marks=requires_ephem)])
def test_get_extra_radiation(testval, expected, method):
out = irradiance.get_extra_radiation(testval, method=method)
assert_allclose(out, expected, atol=10)
def test_get_extra_radiation_epoch_year():
out = irradiance.get_extra_radiation(doy, method='nrel', epoch_year=2012)
assert_allclose(out, 1382.4926804890767, atol=0.1)
@requires_numba
def test_get_extra_radiation_nrel_numba(times):
with warnings.catch_warnings():
# don't warn on method reload or num threads
warnings.simplefilter("ignore")
result = irradiance.get_extra_radiation(
times, method='nrel', how='numba', numthreads=4)
# and reset to no-numba state
irradiance.get_extra_radiation(times, method='nrel')
assert_allclose(result,
[1322.332316, 1322.296282, 1322.261205, 1322.227091])
def test_get_extra_radiation_invalid():
with pytest.raises(ValueError):
irradiance.get_extra_radiation(300, method='invalid')
def test_grounddiffuse_simple_float():
result = irradiance.get_ground_diffuse(40, 900)
assert_allclose(result, 26.32000014911496)
def test_grounddiffuse_simple_series(irrad_data):
ground_irrad = irradiance.get_ground_diffuse(40, irrad_data['ghi'])
assert ground_irrad.name == 'diffuse_ground'
def test_grounddiffuse_albedo_0(irrad_data):
ground_irrad = irradiance.get_ground_diffuse(
40, irrad_data['ghi'], albedo=0)
assert 0 == ground_irrad.all()
def test_grounddiffuse_albedo_invalid_surface(irrad_data):
with pytest.raises(KeyError):
irradiance.get_ground_diffuse(
40, irrad_data['ghi'], surface_type='invalid')
def test_grounddiffuse_albedo_surface(irrad_data):
result = irradiance.get_ground_diffuse(40, irrad_data['ghi'],
surface_type='sand')
assert_allclose(result, [0, 3.731058, 48.778813, 12.035025], atol=1e-4)
def test_isotropic_float():
result = irradiance.isotropic(40, 100)
assert_allclose(result, 88.30222215594891)
def test_isotropic_series(irrad_data):
result = irradiance.isotropic(40, irrad_data['dhi'])
assert_allclose(result, [0, 35.728402, 104.601328, 54.777191], atol=1e-4)
def test_klucher_series_float():
# klucher inputs
surface_tilt, surface_azimuth = 40.0, 180.0
dhi, ghi = 100.0, 900.0
solar_zenith, solar_azimuth = 20.0, 180.0
# expect same result for floats and pd.Series
expected = irradiance.klucher(
surface_tilt, surface_azimuth,
pd.Series(dhi), pd.Series(ghi),
pd.Series(solar_zenith), pd.Series(solar_azimuth)
) # 94.99429931664851
result = irradiance.klucher(
surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth
)
assert_allclose(result, expected[0])
def test_klucher_series(irrad_data, ephem_data):
result = irradiance.klucher(40, 180, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
# pvlib matlab 1.4 does not contain the max(cos_tt, 0) correction
# so, these values are different
assert_allclose(result, [0., 36.789794, 109.209347, 56.965916], atol=1e-4)
# expect same result for np.array and pd.Series
expected = irradiance.klucher(
40, 180, irrad_data['dhi'].values, irrad_data['ghi'].values,
ephem_data['apparent_zenith'].values, ephem_data['azimuth'].values
)
assert_allclose(result, expected, atol=1e-4)
def test_haydavies(irrad_data, ephem_data, dni_et):
result = irradiance.haydavies(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'])
# values from matlab 1.4 code
assert_allclose(result, [0, 27.1775, 102.9949, 33.1909], atol=1e-4)
def test_reindl(irrad_data, ephem_data, dni_et):
result = irradiance.reindl(
40, 180, irrad_data['dhi'], irrad_data['dni'], irrad_data['ghi'],
dni_et, ephem_data['apparent_zenith'], ephem_data['azimuth'])
# values from matlab 1.4 code
assert_allclose(result, [np.nan, 27.9412, 104.1317, 34.1663], atol=1e-4)
def test_king(irrad_data, ephem_data):
result = irradiance.king(40, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'])
assert_allclose(result, [0, 44.629352, 115.182626, 79.719855], atol=1e-4)
def test_perez(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], relative_airmass)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=irrad_data.index)
assert_series_equal(out, expected, check_less_precise=2)
def test_perez_components(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], relative_airmass,
return_components=True)
expected = pd.DataFrame(np.array(
[[ 0. , 31.46046871, np.nan, 45.45539877],
[ 0. , 26.84138589, np.nan, 31.72696071],
[ 0. , 0. , np.nan, 4.47966439],
[ 0. , 4.62212181, np.nan, 9.25316454]]).T,
columns=['sky_diffuse', 'isotropic', 'circumsolar', 'horizon'],
index=irrad_data.index
)
expected_for_sum = expected['sky_diffuse'].copy()
expected_for_sum.iloc[2] = 0
sum_components = out.iloc[:, 1:].sum(axis=1)
sum_components.name = 'sky_diffuse'
assert_frame_equal(out, expected, check_less_precise=2)
assert_series_equal(sum_components, expected_for_sum, check_less_precise=2)
def test_perez_arrays(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'].values, dni.values,
dni_et, ephem_data['apparent_zenith'].values,
ephem_data['azimuth'].values,
relative_airmass.values)
expected = np.array(
[ 0. , 31.46046871, np.nan, 45.45539877])
assert_allclose(out, expected, atol=1e-2)
assert isinstance(out, np.ndarray)
def test_perez_scalar():
# copied values from fixtures
out = irradiance.perez(40, 180, 118.45831879, 939.95469881,
1321.1655834833093, 10.56413562, 144.76567754,
1.01688136)
# this will fail. out is ndarry with ndim == 0. fix in future version.
# assert np.isscalar(out)
assert_allclose(out, 109.084332)
@pytest.mark.parametrize('model', ['isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'])
def test_sky_diffuse_zenith_close_to_90(model):
# GH 432
sky_diffuse = irradiance.get_sky_diffuse(
30, 180, 89.999, 230,
dni=10, ghi=51, dhi=50, dni_extra=1360, airmass=12, model=model)
assert sky_diffuse < 100
def test_get_sky_diffuse_invalid():
with pytest.raises(ValueError):
irradiance.get_sky_diffuse(
30, 180, 0, 180, 1000, 1100, 100, dni_extra=1360, airmass=1,
model='invalid')
def test_liujordan():
expected = pd.DataFrame(np.array(
[[863.859736967, 653.123094076, 220.65905025]]),
columns=['ghi', 'dni', 'dhi'],
index=[0])
out = irradiance.liujordan(
pd.Series([10]), pd.Series([0.5]), pd.Series([1.1]), dni_extra=1400)
assert_frame_equal(out, expected)
def test_get_total_irradiance(irrad_data, ephem_data, dni_et, relative_airmass):
models = ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez']
for model in models:
total = irradiance.get_total_irradiance(
32, 180,
ephem_data['apparent_zenith'], ephem_data['azimuth'],
dni=irrad_data['dni'], ghi=irrad_data['ghi'],
dhi=irrad_data['dhi'],
dni_extra=dni_et, airmass=relative_airmass,
model=model,
surface_type='urban')
assert total.columns.tolist() == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
@pytest.mark.parametrize('model', ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez'])
def test_get_total_irradiance_scalars(model):
total = irradiance.get_total_irradiance(
32, 180,
10, 180,
dni=1000, ghi=1100,
dhi=100,
dni_extra=1400, airmass=1,
model=model,
surface_type='urban')
assert list(total.keys()) == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
# test that none of the values are nan
assert np.isnan(np.array(list(total.values()))).sum() == 0
def test_poa_components(irrad_data, ephem_data, dni_et, relative_airmass):
aoi = irradiance.aoi(40, 180, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
gr_sand = irradiance.get_ground_diffuse(40, irrad_data['ghi'],
surface_type='sand')
diff_perez = irradiance.perez(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'], relative_airmass)
out = irradiance.poa_components(
aoi, irrad_data['dni'], diff_perez, gr_sand)
expected = pd.DataFrame(np.array(
[[ 0. , -0. , 0. , 0. ,
0. ],
[ 35.19456561, 0. , 35.19456561, 31.4635077 ,
3.73105791],
[956.18253696, 798.31939281, 157.86314414, 109.08433162,
48.77881252],
[ 90.99624896, 33.50143401, 57.49481495, 45.45978964,
12.03502531]]),
columns=['poa_global', 'poa_direct', 'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=irrad_data.index)
assert_frame_equal(out, expected)
@pytest.mark.parametrize('pressure,expected', [
(93193, [[830.46567, 0.79742, 0.93505],
[676.09497, 0.63776, 3.02102]]),
(None, [[868.72425, 0.79742, 1.01664],
[680.66679, 0.63776, 3.28463]]),
(101325, [[868.72425, 0.79742, 1.01664],
[680.66679, 0.63776, 3.28463]])
])
def test_disc_value(pressure, expected):
# see GH 449 for pressure=None vs. 101325.
columns = ['dni', 'kt', 'airmass']
times = pd.DatetimeIndex(['2014-06-24T1200', '2014-06-24T1800'],
tz='America/Phoenix')
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
out = irradiance.disc(ghi, zenith, times, pressure=pressure)
expected_values = np.array(expected)
expected = pd.DataFrame(expected_values, columns=columns, index=times)
# check the pandas dataframe. check_less_precise is weird
assert_frame_equal(out, expected, check_less_precise=True)
# use np.assert_allclose to check values more clearly
assert_allclose(out.values, expected_values, atol=1e-5)
def test_disc_overirradiance():
columns = ['dni', 'kt', 'airmass']
ghi = np.array([3000])
solar_zenith = np.full_like(ghi, 0)
times = pd.date_range(start='2016-07-19 12:00:00', freq='1s',
periods=len(ghi), tz='America/Phoenix')
out = irradiance.disc(ghi=ghi, solar_zenith=solar_zenith,
datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[8.72544336e+02, 1.00000000e+00, 9.99493933e-01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_disc_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
columns = ['dni', 'kt', 'airmass']
times = pd.DatetimeIndex(['2016-07-19 06:11:00'], tz='America/Phoenix')
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.16046346e-02, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# max_zenith and/or max_airmass keep these results reasonable
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.0, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# still get reasonable values because of max_airmass=12 limit
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
max_zenith=100)
expected = pd.DataFrame(np.array(
[[0., 1.16046346e-02, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# still get reasonable values because of max_airmass=12 limit
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100)
expected = pd.DataFrame(np.array(
[[277.50185968, 1.0, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# max_zenith keeps this result reasonable
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_airmass=100)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.0, 36.39544757]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# allow zenith to be close to 90 and airmass to be infinite
# and we get crazy values
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
max_zenith=100, max_airmass=100)
expected = pd.DataFrame(np.array(
[[6.68577449e+03, 1.16046346e-02, 3.63954476e+01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# allow min cos zenith to be 0, zenith to be close to 90,
# and airmass to be very big and we get even higher DNI values
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100, max_airmass=100)
expected = pd.DataFrame(np.array(
[[7.21238390e+03, 1., 3.63954476e+01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_dirint_value():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure)
assert_almost_equal(dirint_data.values,
np.array([868.8, 699.7]), 1)
def test_dirint_nans():
times = pd.date_range(start='2014-06-24T12-0700', periods=5, freq='6H')
ghi = pd.Series([np.nan, 1038.62, 1038.62, 1038.62, 1038.62], index=times)
zenith = pd.Series([10.567, np.nan, 10.567, 10.567, 10.567], index=times)
pressure = pd.Series([93193., 93193., np.nan, 93193., 93193.], index=times)
temp_dew = pd.Series([10, 10, 10, np.nan, 10], index=times)
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=temp_dew)
assert_almost_equal(dirint_data.values,
np.array([np.nan, np.nan, np.nan, np.nan, 893.1]), 1)
def test_dirint_tdew():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=10)
assert_almost_equal(dirint_data.values,
np.array([882.1, 672.6]), 1)
def test_dirint_no_delta_kt():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
use_delta_kt_prime=False)
assert_almost_equal(dirint_data.values,
np.array([861.9, 670.4]), 1)
def test_dirint_coeffs():
coeffs = irradiance._get_dirint_coeffs()
assert coeffs[0, 0, 0, 0] == 0.385230
assert coeffs[0, 1, 2, 1] == 0.229970
assert coeffs[3, 2, 6, 3] == 1.032260
def test_dirint_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
# times don't have any physical relevance
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([0, 1], index=times)
solar_zenith = pd.Series([90, 89.99], index=times)
out = irradiance.dirint(ghi, solar_zenith, times)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected)
out = irradiance.dirint(ghi, solar_zenith, times, max_zenith=90)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0,
max_zenith=90)
expected = pd.Series([0.0, 144.264507], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0,
max_zenith=100)
expected = | pd.Series([0.0, 144.264507], index=times, name='dni') | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[60]:
import pandas as pd
import requests
import torch
#import locale
import warnings
import os
model = torch.hub.load('ultralytics/yolov5', 'yolov5n') # or yolov5m, yolov5l, yolov5x, custom
warnings.filterwarnings('ignore')
#locale.setlocale(locale.LC_TIME, 'it_IT.utf8')
webcam_json_url = 'https://vit.trilogis.it/json/webcam'
output_data = "data" + os.sep + "vehicles_on_trentino_webcams.parquet"
last_output_data = "data" + os.sep + "last_vehicles_on_trentino_webcams.csv"
# In[61]:
webcams = pd.DataFrame(requests.get(webcam_json_url).json()['webcams']['webcam'])
# In[62]:
webcams['Id'] = webcams['Id'].astype(int)
webcams['Cod'] = webcams['Cod'].astype(int)
webcams['Nome'] = webcams['Nome'].astype(str)
webcams['Direzione'] = webcams['Direzione'].astype(str)
webcams['Url_Immagine'] = webcams['Url_Immagine'].astype(str)
webcams['IP_Webcam'] = webcams['IP_Webcam'].astype(str)
webcams['Km'] = webcams['Km'].astype(str)
webcams['Strada'] = webcams['Strada'].astype(str)
webcams['Localita'] = webcams['Localita'].astype(str)
webcams['ZonaTN'] = webcams['ZonaTN'].astype(int)
webcams['Lat'] = webcams['Lat'].apply(lambda x: float(x) if x!='' else 0)
webcams['Lng'] = webcams['Lng'].apply(lambda x: float(x) if x!='' else 0)
webcams['Monitoraggio'] = webcams['Monitoraggio'].astype(bool)
webcams['Live'] = webcams['Live'].astype(bool)
webcams['TS_Image'] = webcams['TS_Image'].astype(str)
# In[63]:
#rimozione webcam con webcam dismesse
webcams = webcams[webcams['Url_Immagine'] != 'http://vit.trilogis.it/cam/webcam_outdated.jpg']
# In[64]:
def to_timestamp(x):
mesi = {
'gennaio': '01',
'febbraio': '02',
'marzo': '03',
'aprile': '04',
'maggio': '05',
'giugno': '06',
'luglio': '07',
'agosto': '08',
'settembre': '09',
'ottobre': '10',
'novembre': '11',
'dicembre': "12"
}
data = x.split(",")[1].split(" ")
day = data[1]
month = mesi[data[2]]
year = data[3]
hour = data[4]
rtime = year + "-" + month + "-" + day + " " + hour + ",0"
return rtime
# In[65]:
webcams['timestamp'] = webcams['TS_Image'].apply(lambda x: to_timestamp(x))
webcams['timestamp'] = pd.to_datetime(webcams.timestamp, format='%Y-%m-%d %H:%M:%S,%f')
vehicles = ['car','truck','bus','train','motorcycle']
# In[66]:
# 45 -> train 0.50 (è una staccionata)
# 34 -> train 0.55 (sono in guardrail)
# CAM65 -> auto in parcheggio (eliminare pixel?)
# CAM100 -> auto in parcheggio (eliminare pixel?)
# CAM111 -> auto in parcheggio (eliminare pixel?)
# CAM125 -> auto in parcheggio (eliminare pixel?)
# CAM126 -> auto in parcheggio (eliminare pixel?)
ids_clean = [34]
# In[67]:
def identifyVehicles(id,indf):
num_vehicles = 0
indf = indf[indf['Id'] == id]
url = indf['Url_Immagine'].values[0]
try:
if (url.find("webcam_outdated.jpg") == -1):
results = model([url])
#results.save("docs" + os.sep + "results")
results_df = results.pandas().xyxy[0]
results_df = results_df[results_df['confidence'] >= 0.4]
results_df = results_df[results_df['name'].isin(vehicles)]
num_vehicles = results_df.shape[0]
if id in ids_clean:
num_vehicles = num_vehicles - 1
if num_vehicles < 0:
num_vehicles = 0
except OSError as e:
pass
return(num_vehicles) #['class'].sum())
# In[68]:
webcams['veicoli'] = webcams['Id'].apply(lambda x: identifyVehicles(x,webcams))
# In[69]:
columns = {
'Id':'id','Cod':'codice',"Nome":'nome',
'Direzione':'direzione','Url_Immagine':'url',
'Attiva': 'attiva',
'Comune':'comune','Comunita':'comunita_valle',
'IP_Webcam':'ip_webcam','Km':'km','Strada':'strada',
'Localita':'localita','ZonaTN':'zona_tn', 'Lat':'latitudine',
'Lng':'longitude','Monitoraggio':'monitoraggio',
'Live':'live','TS_Image':'data'}
# In[70]:
webcams.rename(columns=columns, inplace=True)
# In[71]:
del webcams['data']
del webcams['ip_webcam']
del webcams['monitoraggio']
del webcams['live']
del webcams['attiva']
# In[72]:
if os.path.exists(output_data):
last_out = pd.read_parquet(output_data)
last_timestamp = last_out.timestamp.max()
actual_timestamp = webcams.timestamp.max().strftime("%Y-%m-%d %H:%M:%S")
if str(last_timestamp) != str(actual_timestamp):
newdata = | pd.concat([last_out, webcams]) | pandas.concat |
import pandas as pd
from tqdm import tqdm
import click
import numpy as np
from os import listdir
from os.path import splitext, join
from .ingestion import ingest_file
from .evaluate_config import EvaluationConfig as args
class EmptyGroundTruthError(Exception):
pass
def get_ious(pred_df, box):
X = 0
Y = 1
X2 = 2
Y2 = 3
xml_box = pred_df[["x0", "y0", "x1", "y1"]].values
N, _ = xml_box.shape
i_boxes = np.zeros((N,4))
i_boxes[:, X] = np.maximum(xml_box[:, X].reshape(-1), box[X].reshape(-1))
i_boxes[:, Y] = np.maximum(xml_box[:, Y].reshape(-1), box[ Y].reshape(-1))
i_boxes[:, X2] = np.minimum(xml_box[:, X2].reshape(-1), box[X2].reshape(-1))
i_boxes[:, Y2] = np.minimum(xml_box[:, Y2].reshape(-1), box[Y2].reshape(-1))
i_area = (i_boxes[:, X2] - i_boxes[:, X]) * (i_boxes[:, Y2] - i_boxes[:, Y])
i_area[i_boxes[:, X2] < i_boxes[:, X]] = 0
i_area[i_boxes[:, Y2] < i_boxes[:, Y]] = 0
xml_area = (xml_box[:, X2] - xml_box[:, X]) * (xml_box[:, Y2] - xml_box[:, Y])
box_area = (box[X2] - box[X]) * (box[Y2] - box[Y])
iou = i_area/(xml_area+ box_area - i_area)
iou_df = pd.DataFrame({
"iou": iou
})
return iou_df
def get_confusion(combined_df, classes):
cols = ["ground_truth"] + classes
preds = np.zeros(len(classes))
data = [classes] + ([preds]*len(classes))
pd_dict = dict(zip(cols, data))
confusion_df = pd.DataFrame(pd_dict)
confusion_df = confusion_df.set_index("ground_truth")
for row in combined_df.itertuples():
confusion_df.at[row.gt_label, row.pred_label] +=1
return confusion_df
def match(pred_df, gt_df, thres=0.5):
if pred_df.shape[0] == 0:
combined_df = pd.DataFrame(columns=[])
unmatced = gt_df
return combined_df, gt_df
for idx, row in enumerate(gt_df.itertuples()):
box = np.array([row.x0, row.y0, row.x1, row.y1])
pred_df[f"iou_{idx}"] = get_ious(pred_df, box)
overlaps = pred_df.filter(regex=("iou_*")).values
matches = np.argmax(overlaps, axis=1)
max_overlaps = np.array([pred_df.at[idx, f"iou_{best}"] for idx, best in enumerate(matches)])
match_labels = [gt_df.at[match, "label"] for match in matches]
mask = max_overlaps < thres
matches[mask] = -1.0
pred_df["gt_id"] = matches
pred_df["gt_label"] = match_labels
pred_df["max_overlap"] = max_overlaps
combined_df = pred_df.rename(index=str, columns={"label": "pred_label"})
combined_df = combined_df[combined_df["max_overlap"]>= thres]
unmatched_idxs = list(filter(lambda x: x not in matches, range(gt_df.shape[0])))
unmatched = gt_df.loc[unmatched_idxs]
return combined_df, unmatched
def get_tp(combined_df, cls):
"""
true positives have the correct label,
only one tp per ground truth label
"""
if combined_df.shape[0] == 0:
return 0.0
tp_candidates = combined_df[combined_df["pred_label"] == combined_df["gt_label"]]
tp_candidates = tp_candidates[tp_candidates["pred_label"] == cls]
tps = tp_candidates.shape[0]
if not args.multiple_proposals:
groups = tp_candidates.groupby("gt_id")
tps = len(groups)
return float(tps)
def get_fp(combined_df, cls):
if combined_df.shape[0] == 0:
return 0.0
fp_candidates = combined_df[combined_df["pred_label"] == cls]
fp_type_1 = fp_candidates[fp_candidates["pred_label"] != fp_candidates["gt_label"]].shape[0]
fp_type_2 = 0.0
tp_candidates = combined_df[combined_df["pred_label"] == combined_df["gt_label"]]
tp_candidates = tp_candidates[tp_candidates["pred_label"] == cls]
groups = tp_candidates.groupby("gt_id")
matches = len(groups)
tot = groups.size()
fp_type_2 += tot.sum() - matches
if args.multiple_proposals:
fn_type_2 = 0
return float(fp_type_1 + fp_type_2)
def get_fn(combined_df, unmatched, cls):
if combined_df.shape[0] > 0:
fn_candidates = combined_df[combined_df["gt_label"] == cls]
fn_type_1 = fn_candidates[fn_candidates["pred_label"] != fn_candidates["gt_label"] ].shape[0]
else:
fn_type_1 = 0
fn_type_2 = unmatched[unmatched["label"] == cls].shape[0]
if not args.e2e:
fn_type_2 = 0
return float(fn_type_1 + fn_type_2)
def get_precision(combined_df, cls):
tp = get_tp(combined_df, cls)
fp = get_fp(combined_df, cls)
if tp == 0 and fp == 0:
return np.nan
return tp/(tp+fp)
def get_recall(combined_df, unmatched, cls):
tp = get_tp(combined_df, cls)
fn = get_fn(combined_df, unmatched, cls)
if tp == 0 and fn == 0:
return np.nan
return tp / (tp +fn)
def get_gt_instances(gt_df, cls):
return gt_df[gt_df["label"] == cls].shape[0]
def evaluate_single(pred_path, gt_path, classes=None, thres=0.5):
pred_df = ingest_file(pred_path)
gt_df = ingest_file(gt_path)
if gt_df.shape[0] ==0:
raise EmptyGroundTruthError()
combined , unmatched = match(pred_df, gt_df,thres=0.5)
prec_cls = {}
rec_cls = {}
for cls in classes:
prec = get_precision(combined, cls)
rec = get_recall(combined, unmatched, cls)
prec_cls[cls] = prec
rec_cls[cls] = rec
confusion_df = get_confusion(combined, classes)
prec_df = | pd.Series(prec_cls,name="precisions") | pandas.Series |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: li
@file: factor_cash_flow.py
@time: 2019-05-30
"""
import gc, six
import json
import numpy as np
import pandas as pd
from utilities.calc_tools import CalcTools
from utilities.singleton import Singleton
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
@six.add_metaclass(Singleton)
class FactorCashFlow(object):
"""
现金流量
"""
def __init__(self):
__str__ = 'factor_cash_flow'
self.name = '财务指标'
self.factor_type1 = '财务指标'
self.factor_type2 = '现金流量'
self.description = '财务指标的二级指标-现金流量'
@staticmethod
def CashOfSales(tp_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow', 'operating_revenue']):
"""
:name: 经验活动产生的现金流量净额/营业收入
:desc: 经营活动产生的现金流量净额/营业收入(MRQ)
:unit:
:view_dimension: 0.01
"""
cash_flow = tp_cash_flow.loc[:, dependencies]
cash_flow['CashOfSales'] = np.where(CalcTools.is_zero(cash_flow.operating_revenue.values),
0,
cash_flow.net_operate_cash_flow.values / cash_flow.operating_revenue.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code")
# factor_cash_flow['CashOfSales'] = cash_flow['CashOfSales']
return factor_cash_flow
@staticmethod
def NOCFToOpt(tp_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow', 'total_operating_revenue', 'total_operating_cost']):
"""
:name: 经营活动产生的现金流量净额/(营业总收入-营业总成本)
:desc: 经营活动产生的现金流量净额/(营业总收入-营业总成本)
:unit:
:view_dimension: 0.01
"""
cash_flow = tp_cash_flow.loc[:, dependencies]
cash_flow['NOCFToOpt'] = np.where(
CalcTools.is_zero((cash_flow.total_operating_revenue.values - cash_flow.total_operating_cost.values)), 0,
cash_flow.net_operate_cash_flow.values / (
cash_flow.total_operating_revenue.values - cash_flow.total_operating_cost.values))
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code")
# factor_cash_flow['NOCFToOpt'] = cash_flow['NOCFToOpt']
return factor_cash_flow
@staticmethod
def SalesServCashToOR(tp_cash_flow, factor_cash_flow, dependencies=['goods_sale_and_service_render_cash', 'operating_revenue']):
"""
:name: 销售商品和提供劳务收到的现金/营业收入
:desc: 销售商品和提供劳务收到的现金/营业收入
:unit:
:view_dimension: 0.01
"""
cash_flow = tp_cash_flow.loc[:, dependencies]
cash_flow['SalesServCashToOR'] = np.where(CalcTools.is_zero(cash_flow.operating_revenue.values),
0,
cash_flow.goods_sale_and_service_render_cash.values / cash_flow.operating_revenue.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code")
# factor_cash_flow['SalesServCashToOR'] = cash_flow['SalesServCashToOR']
return factor_cash_flow
@staticmethod
def OptOnReToAssetTTM(ttm_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow', 'net_profit', 'total_assets']):
"""
:name:(经营活动产生的金流量净额(TTM)-净利润(TTM)) /总资产(TTM)
:desc:(经营活动产生的金流量净额(TTM) - 净利润(TTM)) /总资产(TTM)
:unit:
:view_dimension: 0.01
"""
cash_flow = ttm_cash_flow.loc[:, dependencies]
cash_flow['OptOnReToAssetTTM'] = np.where(CalcTools.is_zero(cash_flow.total_assets.values), 0,
(cash_flow.net_operate_cash_flow.values - cash_flow.net_profit.values)
/ cash_flow.total_assets.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code")
# factor_cash_flow['OptOnReToAssetTTM'] = cash_flow['OptOnReToAssetTTM']
return factor_cash_flow
@staticmethod
def NetProCashCoverTTM(ttm_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow', 'np_parent_company_owners']):
"""
:name: 经营活动产生的现金流量净额(TTM)/归属于母公司所有者的净利润(TTM)
:desc: 经营活动产生的现金流量净额(TTM)/归属于母公司所有者的净利润(TTM)
:unit:
:view_dimension: 0.01
"""
cash_flow = ttm_cash_flow.loc[:, dependencies]
cash_flow['NetProCashCoverTTM'] = np.where(
CalcTools.is_zero(cash_flow.np_parent_company_owners.values), 0,
cash_flow.net_operate_cash_flow.values / cash_flow.np_parent_company_owners.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code")
# factor_cash_flow['NetProCashCoverTTM'] = cash_flow['NetProCashCoverTTM']
return factor_cash_flow
@staticmethod
def OptToEnterpriseTTM(ttm_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow', 'longterm_loan', 'shortterm_loan', 'market_cap', 'cash_and_equivalents_at_end']):
"""
:name: 经营活动产生的现金流量净额(TTM)/企业价值(TTM)
:desc: 经营活动产生的现金流量净额(TTM)/(长期借款(TTM)+ 短期借款(TTM)+ 总市值 - 期末现金及现金等价物(TTM)
:unit:
:view_dimension: 0.01
"""
cash_flow = ttm_cash_flow.loc[:, dependencies]
cash_flow['OptToEnterpriseTTM'] = np.where(CalcTools.is_zero(
cash_flow.longterm_loan.values + cash_flow.shortterm_loan.values + \
cash_flow.market_cap.values - cash_flow.cash_and_equivalents_at_end.values), 0,
cash_flow.net_operate_cash_flow.values / (cash_flow.longterm_loan.values + cash_flow.shortterm_loan.values + \
cash_flow.market_cap.values - cash_flow.cash_and_equivalents_at_end.values))
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code")
return factor_cash_flow
@staticmethod
def OptCFToRevTTM(ttm_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow', 'operating_revenue']):
"""
:name: 经营活动产生的现金流量净额(TTM)/营业收入(TTM)
:desc: 经营活动产生的现金流量净额(TTM)/营业收入(TTM)
:unit:
:view_dimension: 0.01
"""
cash_flow = ttm_cash_flow.loc[:, dependencies]
cash_flow['OptCFToRevTTM'] = np.where(
CalcTools.is_zero(cash_flow.operating_revenue.values), 0,
cash_flow.net_operate_cash_flow.values / cash_flow.operating_revenue.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code")
# factor_cash_flow['OptCFToRevTTM'] = cash_flow['OptCFToRevTTM']
return factor_cash_flow
@staticmethod
def OptToAssertTTM(ttm_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow', 'total_assets']):
"""
:name: 经营活动产生的现金流量净额(TTM)/总资产(TTM)
:desc: 经营活动产生的现金流量净额(TTM)/总资产(TTM)
:unit:
:view_dimension: 0.01
"""
cash_flow = ttm_cash_flow.loc[:, dependencies]
cash_flow['OptToAssertTTM'] = np.where(CalcTools.is_zero(cash_flow.total_assets.values),
0,
cash_flow.net_operate_cash_flow.values / cash_flow.total_assets.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code")
# factor_cash_flow['OptToAssertTTM'] = cash_flow['OptToAssertTTM']
return factor_cash_flow
@staticmethod
def SaleServCashToOptReTTM(ttm_cash_flow, factor_cash_flow, dependencies=['goods_sale_and_service_render_cash',
'operating_revenue']):
"""
:name: 销售商品和提供劳务收到的现金(TTM)/营业收入(TTM)
:desc: 销售商品提供劳务收到的现金(TTM)/营业收入(TTM)
:unit:
:view_dimension: 0.01
"""
cash_flow = ttm_cash_flow.loc[:, dependencies]
cash_flow['SaleServCashToOptReTTM'] = np.where(
CalcTools.is_zero(cash_flow.operating_revenue.values), 0,
cash_flow.goods_sale_and_service_render_cash.values / cash_flow.operating_revenue.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code")
return factor_cash_flow
@staticmethod
def NOCFTOOPftTTM(ttm_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow', 'operating_profit']):
"""
:name: 经营活动产生的现金流量净额/营业利润(TTM)
:desc: 经营活动产生的现金流量净额(TTM)/营业利润(TTM)
:unit:
:view_dimension: 0.01
"""
cash_flow = ttm_cash_flow.loc[:, dependencies]
func = lambda x: x[0] / x[1] if x[1] is not None and x[1] != 0 else None
cash_flow['NOCFTOOPftTTM'] = cash_flow[dependencies].apply(func, axis=1)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code")
return factor_cash_flow
@staticmethod
def OptCFToNITTM(ttm_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow',
'total_operating_revenue',
'total_operating_cost']):
"""
:name: 经营活动产生的现金流量净额(TTM)/经营活动净收益(TTM)
:desc: 经营活动产生的现金流量净额(TTM)/经营活动净收益(TTM)
:unit:
:view_dimension: 0.01
"""
cash_flow = ttm_cash_flow.loc[:, dependencies]
func = lambda x: x[0] / (x[1] - x[2]) if x[1] is not None and x[2] is not None and (x[1] - x[2]) != 0 else None
cash_flow['OptCFToNITTM'] = cash_flow[dependencies].apply(func, axis=1)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = | pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code") | pandas.merge |
import json
#import requests
import pandas as pd
import numpy as np
import os
from tqdm import tqdm
import uuid
import subprocess
from datetime import datetime
from bs4 import BeautifulSoup as bs
import re
import pysam
import mysecrets
import glob
import tarfile
from flask import Flask, request, redirect, url_for, jsonify, render_template, flash, send_file
from werkzeug.utils import secure_filename
from flask_sitemap import Sitemap
from flask_uploads import UploadSet, configure_uploads, DATA
from pymongo import MongoClient
from pprint import pprint
import htmltableparser
#import getSimpleSumStats
genomicWindowLimit = 2000000
one_sided_SS_window_size = 100000 # (100 kb on either side of the lead SNP)
fileSizeLimit = 500 * 1024 * 1024 # in Bytes
MYDIR = os.path.dirname(__file__)
APP_STATIC = os.path.join(MYDIR, 'static')
##################
# Default settings
##################
default_region = "1:205500000-206000000"
default_chromname = "#CHROM"
default_posname = "POS"
default_snpname = "ID"
default_refname = "REF"
default_altname = "ALT"
default_pname = "P"
default_betaname = "BETA"
default_stderrname = "SE"
default_nname = "N"
default_mafname = "MAF"
# Default column names for secondary datasets:
CHROM = 'CHROM'
BP = 'BP'
SNP = 'SNP'
P = 'P'
coloc2colnames = ['CHR','POS','SNPID','A2','A1','BETA','SE','PVAL','MAF', 'N']
coloc2eqtlcolnames = coloc2colnames + ['ProbeID']
coloc2gwascolnames = coloc2colnames + ['type']
################
################
app = Flask(__name__)
ext = Sitemap(app=app)
app.config['UPLOAD_FOLDER'] = os.path.join(MYDIR, 'static/upload/')
app.config['UPLOADED_FILES_DEST'] = os.path.join(MYDIR, 'static/upload/')
app.config['MAX_CONTENT_LENGTH'] = fileSizeLimit
ALLOWED_EXTENSIONS = set(['txt', 'tsv', 'ld', 'html'])
app.config['UPLOADED_FILES_ALLOW'] = ALLOWED_EXTENSIONS
app.secret_key = mysecrets.mysecret
files = UploadSet('files', DATA)
configure_uploads(app, files)
collapsed_genes_df_hg19 = pd.read_csv(os.path.join(MYDIR, 'data/collapsed_gencode_v19_hg19.gz'), compression='gzip', sep='\t', encoding='utf-8')
collapsed_genes_df_hg38 = pd.read_csv(os.path.join(MYDIR, 'data/collapsed_gencode_v26_hg38.gz'), compression='gzip', sep='\t', encoding='utf-8')
collapsed_genes_df = collapsed_genes_df_hg19 # For now
ld_mat_diag_constant = 1e-6
conn = "mongodb://localhost:27017"
client = MongoClient(conn)
db = client.GTEx_V7 # For now
available_gtex_versions = ["V7", "V8"]
valid_populations = ["EUR", "AFR","EAS", "SAS", "AMR", "ASN", "NFE"]
####################################
# Helper functions
####################################
def parseRegionText(regiontext, build):
if build not in ['hg19', 'hg38']:
raise InvalidUsage(f'Unrecognized build: {build}', status_code=410)
regiontext = regiontext.strip().replace(' ','').replace(',','').replace('chr','')
if not re.search("^\d+:\d+-\d+$", regiontext.replace('X','23').replace('x','23')):
raise InvalidUsage(f'Invalid coordinate format. {regiontext} e.g. 1:205,000,000-206,000,000', status_code=410)
chrom = regiontext.split(':')[0].lower().replace('chr','').upper()
pos = regiontext.split(':')[1]
startbp = pos.split('-')[0].replace(',','')
endbp = pos.split('-')[1].replace(',','')
chromLengths = pd.read_csv(os.path.join(MYDIR, 'data', build + '_chrom_lengths.txt'), sep="\t", encoding='utf-8')
chromLengths.set_index('sequence',inplace=True)
if chrom in ['X','x'] or chrom == '23':
chrom = 23
maxChromLength = chromLengths.loc['chrX', 'length']
try:
startbp = int(startbp)
endbp = int(endbp)
except:
raise InvalidUsage(f"Invalid coordinates input: {regiontext}", status_code=410)
else:
try:
chrom = int(chrom)
if chrom == 23:
maxChromLength = chromLengths.loc['chrX', 'length']
else:
maxChromLength = chromLengths.loc['chr'+str(chrom), 'length']
startbp = int(startbp)
endbp = int(endbp)
except:
raise InvalidUsage(f"Invalid coordinates input {regiontext}", status_code=410)
if chrom < 1 or chrom > 23:
raise InvalidUsage('Chromosome input must be between 1 and 23', status_code=410)
elif startbp > endbp:
raise InvalidUsage('Starting chromosome basepair position is greater than ending basepair position', status_code=410)
elif startbp > maxChromLength or endbp > maxChromLength:
raise InvalidUsage('Start or end coordinates are out of range', status_code=410)
elif (endbp - startbp) > genomicWindowLimit:
raise InvalidUsage(f'Entered region size is larger than {genomicWindowLimit/10**6} Mbp', status_code=410)
else:
return chrom, startbp, endbp
def allowed_file(filenames):
if type(filenames) == type('str'):
return '.' in filenames and filenames.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
for filename in filenames:
if not ('.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS):
return False
return True
def writeList(alist, filename):
with open(filename, 'w') as f:
for item in alist:
f.write("%s\n" % item)
def writeMat(aMat, filename):
aMat = np.matrix(aMat)
with open(filename, 'w') as f:
for row in np.arange(aMat.shape[0]):
for col in np.arange(aMat.shape[1] - 1):
f.write("%s\t" % str(aMat[row,col]))
f.write("%s\n" % str(aMat[row,-1]))
def genenames(genename, build):
# Given either ENSG gene name or HUGO gene name, returns both HUGO and ENSG names
ensg_gene = genename
if build.lower() in ["hg19","grch37"]:
collapsed_genes_df = collapsed_genes_df_hg19
elif build.lower() in ["hg38", "grch38"]:
collapsed_genes_df = collapsed_genes_df_hg38
if genename in list(collapsed_genes_df['name']):
ensg_gene = collapsed_genes_df['ENSG_name'][list(collapsed_genes_df['name']).index(genename)]
if genename in list(collapsed_genes_df['ENSG_name']):
genename = collapsed_genes_df['name'][list(collapsed_genes_df['ENSG_name']).index(genename)]
return genename, ensg_gene
def classify_files(filenames):
gwas_filepath = ''
ldmat_filepath = ''
html_filepath = ''
extensions = []
for file in filenames:
filename = secure_filename(file.filename)
extension = filename.split('.')[-1]
if extension not in extensions:
if extension in ['txt', 'tsv']:
extensions.extend(['txt','tsv'])
else:
extensions.append(extension)
else:
raise InvalidUsage('Please upload up to 3 different file types as described', status_code=410)
if extension in ['txt', 'tsv']:
gwas_filepath = os.path.join(MYDIR, app.config['UPLOAD_FOLDER'], filename)
elif extension in ['ld']:
ldmat_filepath = os.path.join(MYDIR, app.config['UPLOAD_FOLDER'], filename)
elif extension in ['html']:
html_filepath = os.path.join(MYDIR, app.config['UPLOAD_FOLDER'], filename)
return gwas_filepath, ldmat_filepath, html_filepath
def isSorted(l):
# l is a list
# returns True if l is sorted, False otherwise
return all(l[i] <= l[i+1] for i in range(len(l)-1))
def Xto23(l):
newl = []
validchroms = [str(i) for i in list(np.arange(1,24))]
validchroms.append('.')
for x in l:
if str(str(x).strip().lower().replace('chr','').upper()) == "X":
newl.append(23)
elif str(str(x).strip().lower().replace('chr','')) in validchroms:
if x!='.':
newl.append(int(str(x).strip().lower().replace('chr','')))
else:
newl.append('.')
else:
raise InvalidUsage('Chromosome unrecognized', status_code=410)
return newl
def verifycol(formname, defaultname, filecolnames, error_message_):
"""
Checks if the user-entered column name (formname)
(or the default column name if no column name was entered - defaultname)
can be found in the dataset column names (ie. filecolnames list).
If not, the error_message_ is output and program halted with 410 status
"""
theformname = formname
if formname=='': theformname=str(defaultname)
if theformname not in filecolnames:
raise InvalidUsage(error_message_, status_code=410)
return theformname
def buildSNPlist(df, chromcol, poscol, refcol, altcol, build):
snplist = []
if build.lower() in ["hg38","grch38"]:
build = 'b38'
else:
build = 'b37'
for i in np.arange(df.shape[0]):
chrom = list(df[chromcol])[i]
pos = list(df[poscol])[i]
ref = list(df[refcol])[i]
alt = list(df[altcol])[i]
try:
snplist.append(str(chrom)+"_"+str(pos)+"_"+str(ref)+"_"+str(alt)+"_"+str(build))
except:
raise InvalidUsage(f'Could not convert marker at row {str(i)}')
return snplist
def fetchSNV(chrom, bp, ref, build):
variantid = '.'
if ref is None or ref=='.':
ref=''
# Ensure valid region:
try:
regiontxt = str(chrom) + ":" + str(bp) + "-" + str(int(bp)+1)
except:
raise InvalidUsage(f'Invalid input for {str(chrom):str(bp)}')
chrom, startbp, endbp = parseRegionText(regiontxt, build)
chrom = str(chrom).replace('chr','').replace('23',"X")
# Load dbSNP151 SNP names from region indicated
dbsnp_filepath = ''
if build.lower() in ["hg38", "grch38"]:
suffix = 'b38'
dbsnp_filepath = os.path.join(MYDIR, 'data', 'dbSNP151', 'GRCh38p7', 'All_20180418.vcf.gz')
else:
suffix = 'b37'
dbsnp_filepath = os.path.join(MYDIR, 'data', 'dbSNP151', 'GRCh37p13', 'All_20180423.vcf.gz')
# Load variant info from dbSNP151
tbx = pysam.TabixFile(dbsnp_filepath)
varlist = []
for row in tbx.fetch(str(chrom), bp-1, bp):
rowlist = str(row).split('\t')
chromi = rowlist[0].replace('chr','')
posi = rowlist[1]
idi = rowlist[2]
refi = rowlist[3]
alti = rowlist[4]
varstr = '_'.join([chromi, posi, refi, alti, suffix])
varlist.append(varstr)
# Check if there is a match to an SNV with the provided info
if len(varlist) == 1:
variantid = varstr
elif len(varlist) > 1 and ref != '':
for v in varlist:
if v.split('_')[2] == ref:
variantid = v
break
return variantid
def standardizeSNPs(variantlist, regiontxt, build):
"""
Input: Variant names in any of these formats: rsid, chrom_pos_ref_alt, chrom:pos_ref_alt, chrom:pos_ref_alt_b37/b38
Output: chrom_pos_ref_alt_b37/b38 variant ID format, but looks at GTEx variant lookup table first.
In the case of multi-allelic variants (e.g. rs2211330(T/A,C)), formats such as 1_205001063_T_A,C_b37 are accepted
If variant ID format is chr:pos, and the chr:pos has a unique biallelic SNV, then it will be assigned that variant
"""
if all(x=='.' for x in variantlist):
raise InvalidUsage('No variants provided')
if np.nan in variantlist:
raise InvalidUsage('Missing variant IDs detected in row(s): ' + str([ i+1 for i,x in enumerate(variantlist) if str(x) == 'nan' ]))
# Ensure valid region:
chrom, startbp, endbp = parseRegionText(regiontxt, build)
chrom = str(chrom).replace('23',"X")
# Load GTEx variant lookup table for region indicated
db = client.GTEx_V7
rsid_colname = 'rs_id_dbSNP147_GRCh37p13'
if build.lower() in ["hg38", "grch38"]:
db = client.GTEx_V8
rsid_colname = 'rs_id_dbSNP151_GRCh38p7'
collection = db['variant_table']
variants_query = collection.find(
{ '$and': [
{ 'chr': int(chrom.replace('X','23')) },
{ 'variant_pos': { '$gte': int(startbp), '$lte': int(endbp) } }
]}
)
variants_list = list(variants_query)
variants_df = pd.DataFrame(variants_list)
variants_df = variants_df.drop(['_id'], axis=1)
# Load dbSNP151 SNP names from region indicated
dbsnp_filepath = ''
suffix = 'b37'
if build.lower() in ["hg38", "grch38"]:
suffix = 'b38'
dbsnp_filepath = os.path.join(MYDIR, 'data', 'dbSNP151', 'GRCh38p7', 'All_20180418.vcf.gz')
else:
suffix = 'b37'
dbsnp_filepath = os.path.join(MYDIR, 'data', 'dbSNP151', 'GRCh37p13', 'All_20180423.vcf.gz')
# Load dbSNP file
#delayeddf = delayed(pd.read_csv)(dbsnp_filepath,skiprows=getNumHeaderLines(dbsnp_filepath),sep='\t')
#dbsnp = dd.from_delayed(delayeddf)
tbx = pysam.TabixFile(dbsnp_filepath)
print('Compiling list of known variants in the region from dbSNP151')
chromcol = []
poscol = []
idcol = []
refcol = []
altcol = []
variantid = [] # in chr_pos_ref_alt_build format
rsids = dict({}) # a multi-allelic variant rsid (key) can be represented in several variantid formats (values)
for row in tbx.fetch(str(chrom), startbp, endbp):
rowlist = str(row).split('\t')
chromi = rowlist[0].replace('chr','')
posi = rowlist[1]
idi = rowlist[2]
refi = rowlist[3]
alti = rowlist[4]
varstr = '_'.join([chromi, posi, refi, alti, suffix])
chromcol.append(chromi)
poscol.append(posi)
idcol.append(idi)
refcol.append(refi)
altcol.append(alti)
variantid.append(varstr)
rsids[idi] = [varstr]
altalleles = alti.split(',') # could have more than one alt allele (multi-allelic)
if len(altalleles)>1:
varstr = '_'.join([chromi, posi, refi, altalleles[0], suffix])
rsids[idi].append(varstr)
for i in np.arange(len(altalleles)-1):
varstr = '_'.join([chromi, posi, refi, altalleles[i+1], suffix])
rsids[idi].append(varstr)
print('Cleaning and mapping list of variants')
variantlist = [asnp.split(';')[0].replace(':','_').replace('.','') for asnp in variantlist] # cleaning up the SNP names a bit
stdvariantlist = []
for variant in variantlist:
if variant == '':
stdvariantlist.append('.')
continue
variantstr = variant.replace('chr','')
if re.search("^23_",variantstr): variantstr = variantstr.replace('23_','X_',1)
if variantstr.startswith('rs'):
try:
# Here's the difference from the first function version (we look at GTEx first)
if variant in list(variants_df[rsid_colname]):
stdvar = variants_df['variant_id'].loc[ variants_df[rsid_colname] == variant].to_list()[0]
stdvariantlist.append(stdvar)
else:
stdvariantlist.append(rsids[variantstr][0])
except:
stdvariantlist.append('.')
elif re.search("^\d+_\d+_[A,T,G,C]+_[A,T,C,G]+,*", variantstr.replace('X','23')):
strlist = variantstr.split('_')
strlist = list(filter(None, strlist)) # remove empty strings
try:
achr, astart, aend = parseRegionText(strlist[0]+":"+strlist[1]+"-"+str(int(strlist[1])+1), build)
achr = str(achr).replace('23','X')
if achr == str(chrom) and astart >= startbp and astart <= endbp:
variantstr = variantstr.replace("_"+str(suffix),"") + "_"+str(suffix)
if len(variantstr.split('_')) == 5:
stdvariantlist.append(variantstr)
else:
raise InvalidUsage(f'Variant format not recognizable: {variant}. Is it from another coordinate build system?', status_code=410)
else:
stdvariantlist.append('.')
except:
raise InvalidUsage(f'Problem with variant {variant}', status_code=410)
elif re.search("^\d+_\d+_*[A,T,G,C]*", variantstr.replace('X','23')):
strlist = variantstr.split('_')
strlist = list(filter(None, strlist)) # remove empty strings
try:
achr, astart, aend = parseRegionText(strlist[0]+":"+strlist[1]+"-"+str(int(strlist[1])+1), build)
achr = str(achr).replace('23','X')
if achr == str(chrom) and astart >= startbp and astart <= endbp:
if len(strlist)==3:
aref=strlist[2]
else:
aref=''
stdvariantlist.append(fetchSNV(achr, astart, aref, build))
else:
stdvariantlist.append('.')
except:
raise InvalidUsage(f'Problem with variant {variant}', status_code=410)
else:
raise InvalidUsage(f'Variant format not recognized: {variant}', status_code=410)
return stdvariantlist
def cleanSNPs(variantlist, regiontext, build):
"""
Parameters
----------
variantlist : list
list of variant IDs in rs id or chr_pos, chr_pos_ref_alt, chr_pos_ref_alt_build, etc formats
regiontext : str
the region of interest in chr:start-end format
build : str
build.lower() in ['hg19','hg38', 'grch37', 'grch38'] must be true
Returns
-------
A cleaner set of SNP names
rs id's are cleaned to contain only one,
non-rs id formats are standardized to chr_pos_ref_alt_build format)
any SNPs not in regiontext are returned as '.'
"""
variantlist = [asnp.split(';')[0].replace(':','_').replace('.','') for asnp in variantlist] # cleaning up the SNP names a bit
std_varlist = standardizeSNPs(variantlist, regiontext, build)
final_varlist = [ e if (e.startswith('rs') and std_varlist[i] != '.') else std_varlist[i] for i, e in enumerate(variantlist) ]
return final_varlist
def torsid(variantlist, regiontext, build):
"""
Parameters
----------
variantlist : list
List of variants in either rs id or other chr_pos, chr_pos_ref, chr_pos_ref_alt, chr_pos_ref_alt_build format.
Returns
-------
rsidlist : list
Corresponding rs id in the region if found.
Otherwise returns '.'
"""
if all(x=='.' for x in variantlist):
raise InvalidUsage('No variants provided')
variantlist = cleanSNPs(variantlist, regiontext, build)
chrom, startbp, endbp = parseRegionText(regiontext, build)
chrom = str(chrom).replace('23',"X")
# Load dbSNP151 SNP names from region indicated
dbsnp_filepath = ''
suffix = 'b37'
if build.lower() in ["hg38", "grch38"]:
suffix = 'b38'
dbsnp_filepath = os.path.join(MYDIR, 'data', 'dbSNP151', 'GRCh38p7', 'All_20180418.vcf.gz')
else:
suffix = 'b37'
dbsnp_filepath = os.path.join(MYDIR, 'data', 'dbSNP151', 'GRCh37p13', 'All_20180423.vcf.gz')
# Load dbSNP file
tbx = pysam.TabixFile(dbsnp_filepath)
print('Compiling list of known variants in the region from dbSNP151')
chromcol = []
poscol = []
idcol = []
refcol = []
altcol = []
rsid = dict({}) # chr_pos_ref_alt_build (keys) for rsid output (values)
for row in tbx.fetch(str(chrom), startbp, endbp):
rowlist = str(row).split('\t')
chromi = rowlist[0].replace('chr','')
posi = rowlist[1]
idi = rowlist[2]
refi = rowlist[3]
alti = rowlist[4]
varstr = '_'.join([chromi, posi, refi, alti, suffix])
chromcol.append(chromi)
poscol.append(posi)
idcol.append(idi)
refcol.append(refi)
altcol.append(alti)
rsid[varstr] = idi
altalleles = alti.split(',') # could have more than one alt allele (multi-allelic)
if len(altalleles)>1:
varstr = '_'.join([chromi, posi, refi, altalleles[0], suffix])
rsid[varstr] = idi
for i in np.arange(len(altalleles)-1):
varstr = '_'.join([chromi, posi, refi, altalleles[i+1], suffix])
rsid[varstr] = idi
finalvarlist = []
for variant in variantlist:
if not variant.startswith('rs'):
try:
finalvarlist.append(rsid[variant])
except:
finalvarlist.append('.')
else:
finalvarlist.append(variant)
return finalvarlist
def decomposeVariant(variant_list):
"""
Parameters
----------
variantid_list : list
list of str standardized variants in chr_pos_ref_alt_build format
Returns
-------
A pandas.dataframe with chromosome, pos, reference and alternate alleles columns
"""
chromlist = [x.split('_')[0] if len(x.split('_'))==5 else x for x in variant_list]
chromlist = [int(x) if x not in ["X","."] else x for x in chromlist]
poslist = [int(x.split('_')[1]) if len(x.split('_'))==5 else x for x in variant_list]
reflist = [x.split('_')[2] if len(x.split('_'))==5 else x for x in variant_list]
altlist = [x.split('_')[3] if len(x.split('_'))==5 else x for x in variant_list]
df = pd.DataFrame({
default_chromname: chromlist
,default_posname: poslist
,default_refname: reflist
,default_altname: altlist
})
return df
def addVariantID(gwas_data, chromcol, poscol, refcol, altcol, build = "hg19"):
"""
Parameters
----------
gwas_data : pandas.DataFrame
Has a minimum of chromosome, position, reference and alternate allele columns.
chromcol : str
chromosome column name in gwas_data
poscol : str
position column name in gwas_data
refcol : str
reference allele column name in gwas_data
altcol : str
alternate allele column name in gwas_data
Returns
-------
pandas.dataframe with list of standardized variant ID's in chrom_pos_ref_alt_build format added to gwas_data
"""
varlist = []
buildstr = 'b37'
if build.lower() == 'hg38':
buildstr = 'b38'
chromlist = list(gwas_data[chromcol])
poslist = list(gwas_data[poscol])
reflist = [x.upper() for x in list(gwas_data[refcol])]
altlist = [x.upper() for x in list(gwas_data[altcol])]
for i in np.arange(gwas_data.shape[0]):
chrom = chromlist[i]
pos = poslist[i]
ref = reflist[i]
alt = altlist[i]
varlist.append('_'.join([str(chrom),str(pos),ref,alt,buildstr]))
gwas_data[default_snpname] = varlist
return gwas_data
def verifyStdSNPs(stdsnplist, regiontxt, build):
# Ensure valid region:
chrom, startbp, endbp = parseRegionText(regiontxt, build)
chrom = str(chrom).replace('23',"X")
# Load GTEx variant lookup table for region indicated
db = client.GTEx_V7
if build.lower() in ["hg38", "grch38"]:
db = client.GTEx_V8
collection = db['variant_table']
variants_query = collection.find(
{ '$and': [
{ 'chr': int(chrom.replace('X','23')) },
{ 'variant_pos': { '$gte': int(startbp), '$lte': int(endbp) } }
]}
)
variants_list = list(variants_query)
variants_df = | pd.DataFrame(variants_list) | pandas.DataFrame |
# flake8: noqa: F841
import tempfile
from pathlib import Path
from typing import List
from pandas._typing import Scalar, ArrayLike
import pandas as pd
import numpy as np
from pandas.core.window import ExponentialMovingWindow
def test_types_init() -> None:
pd.Series(1)
pd.Series((1, 2, 3))
pd.Series(np.array([1, 2, 3]))
pd.Series(data=[1, 2, 3, 4], name="series")
pd.Series(data=[1, 2, 3, 4], dtype=np.int8)
pd.Series(data={'row1': [1, 2], 'row2': [3, 4]})
pd.Series(data=[1, 2, 3, 4], index=[4, 3, 2, 1], copy=True)
def test_types_any() -> None:
res1: bool = pd.Series([False, False]).any()
res2: bool = pd.Series([False, False]).any(bool_only=False)
res3: bool = pd.Series([np.nan]).any(skipna=False)
def test_types_all() -> None:
res1: bool = pd.Series([False, False]).all()
res2: bool = pd.Series([False, False]).all(bool_only=False)
res3: bool = pd.Series([np.nan]).all(skipna=False)
def test_types_csv() -> None:
s = pd.Series(data=[1, 2, 3])
csv_df: str = s.to_csv()
with tempfile.NamedTemporaryFile() as file:
s.to_csv(file.name)
s2: pd.DataFrame = pd.read_csv(file.name)
with tempfile.NamedTemporaryFile() as file:
s.to_csv(Path(file.name))
s3: pd.DataFrame = pd.read_csv(Path(file.name))
# This keyword was added in 1.1.0 https://pandas.pydata.org/docs/whatsnew/v1.1.0.html
with tempfile.NamedTemporaryFile() as file:
s.to_csv(file.name, errors='replace')
s4: pd.DataFrame = pd.read_csv(file.name)
def test_types_copy() -> None:
s = pd.Series(data=[1, 2, 3, 4])
s2: pd.Series = s.copy()
def test_types_select() -> None:
s = pd.Series(data={'row1': 1, 'row2': 2})
s[0]
s[1:]
def test_types_iloc_iat() -> None:
s = pd.Series(data={'row1': 1, 'row2': 2})
s2 = pd.Series(data=[1, 2])
s.loc['row1']
s.iat[0]
s2.loc[0]
s2.iat[0]
def test_types_loc_at() -> None:
s = pd.Series(data={'row1': 1, 'row2': 2})
s2 = pd.Series(data=[1, 2])
s.loc['row1']
s.at['row1']
s2.loc[1]
s2.at[1]
def test_types_boolean_indexing() -> None:
s = pd.Series([0, 1, 2])
s[s > 1]
s[s]
def test_types_df_to_df_comparison() -> None:
s = pd.Series(data={'col1': [1, 2]})
s2 = pd.Series(data={'col1': [3, 2]})
res_gt: pd.Series = s > s2
res_ge: pd.Series = s >= s2
res_lt: pd.Series = s < s2
res_le: pd.Series = s <= s2
res_e: pd.Series = s == s2
def test_types_head_tail() -> None:
s = pd.Series([0, 1, 2])
s.head(1)
s.tail(1)
def test_types_sample() -> None:
s = pd.Series([0, 1, 2])
s.sample(frac=0.5)
s.sample(n=1)
def test_types_nlargest_nsmallest() -> None:
s = pd.Series([0, 1, 2])
s.nlargest(1)
s.nlargest(1, 'first')
s.nsmallest(1, 'last')
s.nsmallest(1, 'all')
def test_types_filter() -> None:
s = pd.Series(data=[1, 2, 3, 4], index=['cow', 'coal', 'coalesce', ''])
s.filter(items=['cow'])
s.filter(regex='co.*')
s.filter(like='al')
def test_types_setting() -> None:
s = pd.Series([0, 1, 2])
s[3] = 4
s[s == 1] = 5
s[:] = 3
def test_types_drop() -> None:
s = pd.Series([0, 1, 2])
res: pd.Series = s.drop(0)
res2: pd.Series = s.drop([0, 1])
res3: pd.Series = s.drop(0, axis=0)
res4: None = s.drop([0, 1], inplace=True, errors='raise')
res5: None = s.drop([0, 1], inplace=True, errors='ignore')
def test_types_drop_multilevel() -> None:
index = pd.MultiIndex(levels=[['top', 'bottom'], ['first', 'second', 'third']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
s = pd.Series(data=[1, 2, 3, 4, 5, 6], index=index)
res: pd.Series = s.drop(labels='first', level=1)
def test_types_dropna() -> None:
s = pd.Series([1, np.nan, np.nan])
res: pd.Series = s.dropna()
res2: None = s.dropna(axis=0, inplace=True)
def test_types_fillna() -> None:
s = pd.Series([1, np.nan, np.nan, 3])
res: pd.Series = s.fillna(0)
res2: pd.Series = s.fillna(0, axis='index')
res3: pd.Series = s.fillna(method='backfill', axis=0)
res4: None = s.fillna(method='bfill', inplace=True)
res5: pd.Series = s.fillna(method='pad')
res6: pd.Series = s.fillna(method='ffill', limit=1)
def test_types_sort_index() -> None:
s = pd.Series([1, 2, 3], index=[2, 3, 1])
res: pd.Series = s.sort_index()
res2: None = s.sort_index(ascending=False, inplace=True)
res3: pd.Series = s.sort_index(kind="mergesort")
# This was added in 1.1.0 https://pandas.pydata.org/docs/whatsnew/v1.1.0.html
def test_types_sort_index_with_key() -> None:
s = pd.Series([1, 2, 3], index=['a', 'B', 'c'])
res: pd.Series = s.sort_index(key=lambda k: k.str.lower())
def test_types_sort_values() -> None:
s = pd.Series([4, 2, 1, 3])
res: pd.Series = s.sort_values(0)
res2: pd.Series = s.sort_values(ascending=False)
res3: None = s.sort_values(inplace=True, kind='quicksort')
res4: pd.Series = s.sort_values(na_position='last')
res5: pd.Series = s.sort_values(ignore_index=True)
# This was added in 1.1.0 https://pandas.pydata.org/docs/whatsnew/v1.1.0.html
def test_types_sort_values_with_key() -> None:
s = pd.Series([1, 2, 3], index=[2, 3, 1])
res: pd.Series = s.sort_values(key=lambda k: -k)
def test_types_shift() -> None:
s = pd.Series([1, 2, 3])
s.shift()
s.shift(axis=0, periods=1)
s.shift(-1, fill_value=0)
def test_types_rank() -> None:
s = pd.Series([1, 1, 2, 5, 6, np.nan, 'milion'])
s.rank()
s.rank(axis=0, na_option='bottom')
s.rank(method="min", pct=True)
s.rank(method="dense", ascending=True)
s.rank(method="first", numeric_only=True)
def test_types_mean() -> None:
s = pd.Series([1, 2, 3, np.nan])
f1: float = s.mean()
s1: pd.Series = s.mean(axis=0, level=0)
f2: float = s.mean(skipna=False)
f3: float = s.mean(numeric_only=False)
def test_types_median() -> None:
s = pd.Series([1, 2, 3, np.nan])
f1: float = s.median()
s1: pd.Series = s.median(axis=0, level=0)
f2: float = s.median(skipna=False)
f3: float = s.median(numeric_only=False)
def test_types_sum() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.sum()
s.sum(axis=0, level=0)
s.sum(skipna=False)
s.sum(numeric_only=False)
s.sum(min_count=4)
def test_types_cumsum() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.cumsum()
s.cumsum(axis=0)
s.cumsum(skipna=False)
def test_types_min() -> None:
s = | pd.Series([1, 2, 3, np.nan]) | pandas.Series |
# pice_data_db.py
import datetime
import os
import pandas as pd
import requests_cache
import sqlalchemy
import yfinance as yf
from requests.api import get
from sqlalchemy.sql.expression import table
# setting some global variables
equities_conn_string = str(os.environ["POSTGRES_DB_EQUITIES"])
equitiies_pricedata_conn_string = str(os.environ["POSTGRES_DB_EQUITIES_PRICEDATA"])
equities_engine = sqlalchemy.create_engine(equities_conn_string)
equities_pricedata_engine = sqlalchemy.create_engine(equitiies_pricedata_conn_string)
session = requests_cache.CachedSession("yfinance.cache")
def load_data_from_csv():
# load .csv data for the NASDAQ exchange
nasdaq_data = | pd.read_csv("nasdaq_companies.csv") | pandas.read_csv |
from src.orm_model import Tweet, Author
import pandas as pd
from sklearn.linear_model import LogisticRegression
# import pickle
def get_most_likely_author(tweet_body, spacy_model):
authors = Author.query.all()
features = pd.DataFrame()
target = pd.Series()
for a in authors:
for t in a.tweets:
if not len(features) > 0:
features = pd.DataFrame(t.vect).T
else:
features = pd.concat([pd.DataFrame(t.vect).T, features])
target = target.append( | pd.Series([a.name]) | pandas.Series |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import pandas as pd
import numpy as np
from tqdm import tqdm
import time
import logging
from sklearn.model_selection import StratifiedKFold
from keras_bert import load_trained_model_from_checkpoint, Tokenizer
from keras.optimizers import Adam
import pandas as pd
from sklearn.metrics import mean_absolute_error, accuracy_score, f1_score
from keras.layers import *
from keras.models import Model
import keras.backend as K
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from keras.activations import softmax
learning_rate = 5e-5
min_learning_rate = 1e-5
batch_size =32
val_batch_size = 512
pred_batch_size = 512
percent_of_epoch = 0.25 * 0.05
num_epochs = 7 //percent_of_epoch
patience = 4
nfolds=5
model_path= "./model"
bert_path = "/home/mhxia/workspace/BDCI/chinese_wwm_ext_L-12_H-768_A-12/"
config_path = bert_path + 'bert_config.json'
checkpoint_path = bert_path + 'bert_model.ckpt'
dict_path = bert_path + 'vocab.txt'
MAX_LEN = 64
token_dict = {}
with open(dict_path, 'r', encoding='utf-8') as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
tokenizer = Tokenizer(token_dict)
train= pd.read_csv('./data/train_set.csv')
test=pd.read_csv('./data/dev_set.csv',sep='\t')
train_achievements = train['question1'].values
train_requirements = train['question2'].values
labels = train['label'].values
def label_process(x):
if x==0:
return [1,0]
else:
return [0,1]
train['label']=train['label'].apply(label_process)
labels_cat=list(train['label'].values)
labels_cat=np.array(labels_cat)
test_achievements = test['question1'].values
test_requirements = test['question2'].values
print(train.shape,test.shape)
def tokenize_data(X1, X2):
T,T_ = [], []
for i, _ in enumerate(X1):
achievements = X1[i]
requirements = X2[i]
t, t_ = tokenizer.encode(first=achievements, second=requirements, max_len=MAX_LEN)
T.append(t)
T_.append(t_)
T = np.array(T)
T_ = np.array(T_)
return T, T_
def apply_multiple(input_, layers):
if not len(layers) > 1:
raise ValueError('Layers list should contain more than 1 layer')
else:
agg_ = []
for layer in layers:
agg_.append(layer(input_))
out_ = Concatenate()(agg_)
return out_
def unchanged_shape(input_shape):
return input_shape
def substract(input_1, input_2):
neg_input_2 = Lambda(lambda x: -x, output_shape=unchanged_shape)(input_2)
out_ = Add()([input_1, neg_input_2])
return out_
def submult(input_1, input_2):
mult = Multiply()([input_1, input_2])
sub = substract(input_1, input_2)
out_ = Concatenate()([sub, mult])
return out_
def soft_attention_alignment(input_1, input_2):
attention = Dot(axes=-1)([input_1, input_2])
w_att_1 = Lambda(lambda x: softmax(x, axis=1), ##soft max to each column
output_shape=unchanged_shape)(attention)
w_att_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2), ## axis =2 soft max to each row
output_shape=unchanged_shape)(attention))
in1_aligned = Dot(axes=1)([w_att_1, input_1])
in2_aligned = Dot(axes=1)([w_att_2, input_2])
return in1_aligned, in2_aligned
def focal_loss(y_true, y_pred, alpha=0.25, gamma=2.):
y_pred = K.clip(y_pred, 1e-8, 1 - 1e-8)
return - alpha * y_true * K.log(y_pred) * (1 - y_pred)**gamma\
- (1 - alpha) * (1 - y_true) * K.log(1 - y_pred) * y_pred**gamma
def get_model():
bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path)
# for l in bert_model.layers:
# l.trainable = True
T1 = Input(shape=(None,))
T2 = Input(shape=(None,))
tp1 = Lambda(lambda x: K.zeros_like(x))(T1)
tp2 = Lambda(lambda x: K.zeros_like(x))(T2)
x1 = bert_model([T1, tp1])
x2 = bert_model([T2, tp2])
X1 = Lambda(lambda x: x[:, 0:-1])(x1)
X2 = Lambda(lambda x: x[:, 0:-1])(x2)
encode = Bidirectional(LSTM(200, return_sequences=True))
q1_encoded = encode(X1)
q2_encoded = encode(X2)
q1_aligned, q2_aligned = soft_attention_alignment(q1_encoded, q2_encoded)
q1_combined = Concatenate()([q1_encoded, q2_aligned, submult(q1_encoded, q2_aligned)])
q2_combined = Concatenate()([q2_encoded, q1_aligned, submult(q2_encoded, q1_aligned)])
compose = Bidirectional(GRU(200, return_sequences=True))
q1_compare = compose(q1_combined)
q2_compare = compose(q2_combined)
# Aggregate
q1_rep = apply_multiple(q1_compare, [GlobalAvgPool1D(), GlobalMaxPool1D()])
q2_rep = apply_multiple(q2_compare, [GlobalAvgPool1D(), GlobalMaxPool1D()])
# Classifier
merged = Concatenate()([q1_rep, q2_rep])
dense = BatchNormalization()(merged)
dense = Dense(30, activation='selu')(dense)
dense = BatchNormalization()(dense)
output = Dense(2, activation='softmax')(dense)
model = Model([T1, T2], output)
model.compile(
# loss='categorical_crossentropy',
loss=focal_loss,
optimizer=Adam(1e-3), # 用足够小的学习率
metrics=['accuracy']
)
model.summary()
return model
skf = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=42)
oof_train = np.zeros((len(train), 2), dtype=np.float32)
oof_test = np.zeros((len(test), 2), dtype=np.float32)
for fold, (train_index, valid_index) in enumerate(skf.split(train_achievements, labels)):
x1 = train_achievements[train_index]
x2 = train_requirements[train_index]
x1_token, x2_token = tokenize_data(x1, x2)
y = labels_cat[train_index]
val_x1 = train_achievements[valid_index]
val_x2 = train_requirements[valid_index]
val_x1_token, val_x2_token = tokenize_data(val_x1, val_x2)
val_y = labels_cat[valid_index]
early_stopping = EarlyStopping(monitor='val_accuracy', patience=patience, verbose=1)
model_checkpoint = ModelCheckpoint(model_path+"model_%s.w"%fold, monitor='val_accuracy', verbose=1,save_best_only=True, save_weights_only=False, mode='auto')
model = get_model()
model.fit(x=[x1_token, x2_token], y=y,
validation_data= ([val_x1_token, val_x2_token],val_y),
batch_size=batch_size,
epochs=num_epochs,
# steps_per_epoch= (len(x1)+ batch_size -1) // batch_size * percent_of_epoch,
# validation_steps = (len(val_x1)+ batch_size -1) // batch_size * percent_of_epoch ,
verbose=1,
callbacks=[early_stopping, model_checkpoint]
)
# model.load_weights('bert{}.w'.format(fold))
test_x1, test_x2 = tokenize_data(test_achievements, test_requirements)
oof_test += model.predict((test_x1, test_x2), batch_size=pred_batch_size)
K.clear_session()
oof_test /= nfolds
test=pd.DataFrame(oof_test)
test.to_csv('test_pred.csv',index=False)
test.head(),test.shape
train=pd.DataFrame(oof_train)
train.to_csv('train_pred.csv',index=False)
pred=pd.read_csv('test_pred.csv').values
pred=pred.argmax(axis=1)
sub= | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `gffpandas` package."""
# standard library imports
import shutil
import time
from pathlib import Path
# first-party imports
import gffpandas.gffpandas as gff3pd
# third-party imports
import pandas as pd
# module imports
from . import print_docstring
# global constants
REFSEQ_URL = (
"https://ftp.ncbi.nih.gov/genomes/refseq/vertebrate_mammalian"
+ "/Homo_sapiens/annotation_releases/109.20191205/GCF_000001405.39_GRCh38.p13/"
)
HUMAN_GFF = "GCF_000001405.39_GRCh38.p13_genomic.gff"
TESTFILELIST = ["test_file.gff"]
written_df = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type="
"genomic DNA;serovar=Typhimurium;strain=SL1344",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
13,
235,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
21,
345,
".",
"-",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
34,
335,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
)
written_header = "##gff-version 3\n" "##sequence-region NC_016810.1 1 20\n"
written_csv = (
"seq_id,source,type,start,end,score,strand,phase,attributes\n"
"NC_016810.1,RefSeq,region,1,4000,.,+,.,Dbxref=taxon:216597;ID="
"id0;gbkey=Src;genome=genomic;mol_type=genomic DNA;serovar="
"Typhimurium;strain=SL1344\n"
"NC_016810.1,RefSeq,gene,1,20,.,+,.,ID=gene1;Name=thrL;gbkey="
"Gene;gene=thrL;locus_tag=SL1344_0001\n"
"NC_016810.1,RefSeq,CDS,13,235,.,+,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1,RefSeq,gene,1,20,.,+,.,ID=gene2;Name=thrA;gbkey="
"Gene;gene=thrA;locus_tag=SL1344_0002\n"
"NC_016810.1,RefSeq,CDS,341,523,.,+,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1,RefSeq,gene,1,600,.,-,.,ID=gene3;Name=thrX;gbkey="
"Gene;gene=thrX;locus_tag=SL1344_0003\n"
"NC_016810.1,RefSeq,CDS,21,345,.,-,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene3;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1,RefSeq,gene,41,255,.,+,.,ID=gene4;Name=thrB;gbkey="
"Gene;gene=thrB;locus_tag=SL1344_0004\n"
"NC_016810.1,RefSeq,CDS,61,195,.,+,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1,RefSeq,gene,170,546,.,+,.,ID=gene5;Name=thrC;gbkey"
"=Gene;gene=thrC;locus_tag=SL1344_0005\n"
"NC_016810.1,RefSeq,CDS,34,335,.,+,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene5;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
)
written_tsv = (
"seq_id\tsource\ttype\tstart\tend\tscore\tstrand\tphase\t"
"attributes\n"
"NC_016810.1\tRefSeq\tregion\t1\t4000\t.\t+\t.\tDbxref=taxon:21"
"6597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA;"
"serovar=Typhimurium;strain=SL1344\n"
"NC_016810.1\tRefSeq\tgene\t1\t20\t.\t+\t.\tID=gene1;Name=thrL;"
"gbkey=Gene;gene=thrL;locus_tag=SL1344_0001\n"
"NC_016810.1\tRefSeq\tCDS\t13\t235\t.\t+\t0\tDbxref=UniProtKB%2"
"52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051"
"79941.1;Parent=gene1;gbkey=CDS;product=thr operon leader "
"peptide;protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1\tRefSeq\tgene\t1\t20\t.\t+\t.\tID=gene2;Name=thrA;"
"gbkey=Gene;gene=thrA;locus_tag=SL1344_0002\n"
"NC_016810.1\tRefSeq\tCDS\t341\t523\t.\t+\t0\tDbxref=UniProtKB%"
"252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_005"
"179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader "
"peptide;protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1\tRefSeq\tgene\t1\t600\t.\t-\t.\tID=gene3;Name=thrX"
";gbkey=Gene;gene=thrX;locus_tag=SL1344_0003\n"
"NC_016810.1\tRefSeq\tCDS\t21\t345\t.\t-\t0\tDbxref=UniProtKB%2"
"52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051"
"79941.1;Parent=gene3;gbkey=CDS;product=thr operon leader "
"peptide;protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1\tRefSeq\tgene\t41\t255\t.\t+\t.\tID=gene4;Name="
"thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004\n"
"NC_016810.1\tRefSeq\tCDS\t61\t195\t.\t+\t0\tDbxref=UniProtKB%2"
"52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051"
"79941.1;Parent=gene4;gbkey=CDS;product=thr operon leader "
"peptide;protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1\tRefSeq\tgene\t170\t546\t.\t+\t.\tID=gene5;Name="
"thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005\n"
"NC_016810.1\tRefSeq\tCDS\t34\t335\t.\t+\t0\tDbxref=UniProt"
"KB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name="
"YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon "
"leader peptide;protein_id=YP_005179941.1;transl_table=11\n"
)
written_gff = (
"##gff-version 3\n"
"##sequence-region NC_016810.1 1 20\n"
"NC_016810.1 RefSeq region 1 4000 . +"
" . Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=ge"
"nomic;mol_type=genomic DNA;serovar=Typhimurium;strain=SL1344\n"
"NC_016810.1 RefSeq gene 1 20 . +"
" . ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_"
"tag=SL1344_0001\n"
"NC_016810.1 RefSeq CDS 13 235 . +"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene1;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
"NC_016810.1 RefSeq gene 1 20 . +"
" . ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_"
"tag=SL1344_0002\n"
"NC_016810.1 RefSeq CDS 341 523 . +"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene2;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
"NC_016810.1 RefSeq gene 1 600 . -"
" . ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_"
"tag=SL1344_0003\n"
"NC_016810.1 RefSeq CDS 21 345 . -"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene3;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
"NC_016810.1 RefSeq gene 41 255 . +"
" . ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_"
"tag=SL1344_0004\n"
"NC_016810.1 RefSeq CDS 61 195 . +"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene4;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
"NC_016810.1 RefSeq gene 170 546 . +"
" . ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_"
"tag=SL1344_0005\n"
"NC_016810.1 RefSeq CDS 34 335 . +"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene5;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
)
written_filtered_length = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
13,
235,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 2, 3, 4, 7, 8],
)
compare_get_feature_by_attribute = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 3, 5, 7, 9],
)
compare_get_feature_by_attribute2 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
21,
345,
".",
"-",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[4, 6, 8],
)
written_attribute_df = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic"
" DNA;serovar=Typhimurium;strain=SL1344",
"taxon:216597",
"id0",
None,
None,
"Src",
None,
"genomic",
None,
"genomic DNA",
None,
None,
"Typhimurium",
"SL1344",
None,
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
None,
"gene1",
"thrL",
None,
"Gene",
"thrL",
None,
"SL1344_0001",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
13,
235,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene1",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
None,
"gene2",
"thrA",
None,
"Gene",
"thrA",
None,
"SL1344_0002",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene2",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
None,
"gene3",
"thrX",
None,
"Gene",
"thrX",
None,
"SL1344_0003",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
21,
345,
".",
"-",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene3",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
None,
"gene4",
"thrB",
None,
"Gene",
"thrB",
None,
"SL1344_0004",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene4",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
None,
"gene5",
"thrC",
None,
"Gene",
"thrC",
None,
"SL1344_0005",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
34,
335,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene5",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
"Dbxref",
"ID",
"Name",
"Parent",
"gbkey",
"gene",
"genome",
"locus_tag",
"mol_type",
"product",
"protein_id",
"serovar",
"strain",
"transl_table",
],
)
strand_counts = pd.value_counts(written_df["strand"]).to_dict()
type_counts = pd.value_counts(written_df["type"]).to_dict()
compare_stats_dic = {
"Maximal_bp_length": 599,
"Minimal_bp_length": 19,
"Counted_strands": strand_counts,
"Counted_feature_types": type_counts,
}
df_empty = pd.DataFrame(
{},
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[],
)
redundant_entry = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[3],
)
compare_filter_feature_df = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 3, 5, 7, 9],
)
compare_overlap_gene_1_40 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 3],
)
compare_overlap_40_300 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA"
";serovar=Typhimurium;strain=SL1344",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
13,
235,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
34,
335,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[0, 2, 7, 8, 9, 10],
)
compare_overlap_170_171 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
21,
345,
".",
"-",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[5, 6],
)
compare_overlap_525_545 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA"
";serovar=Typhimurium;strain=SL1344",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[0, 9],
)
compare_overlap_341_500 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA"
";serovar=Typhimurium;strain=SL1344",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader pep"
"tide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[0, 4, 9],
)
compare_complement = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 3, 4],
)
def generate_gff3_df():
read_in_file = gff3pd.read_gff3("test_file.gff")
return read_in_file
@print_docstring()
def test_clean_datadir(request):
"""Clean up datadir."""
testdir = Path(request.fspath.dirpath())
datadir = testdir / "data"
if datadir.exists():
shutil.rmtree(datadir) # remove anything left in data directory
@print_docstring()
def test_setup_datadir(request, datadir_mgr, capsys):
"""Copy in and download static data."""
testdir = Path(request.fspath.dirpath())
datadir = testdir / "data"
filesdir = testdir / "testdata"
shutil.copytree(filesdir, datadir)
with capsys.disabled():
datadir_mgr.download(
download_url=REFSEQ_URL,
files=[HUMAN_GFF],
scope="global",
md5_check=False,
gunzip=True,
progressbar=True,
)
@print_docstring()
def test_read_gff3_if_df_type(datadir_mgr):
"""Test basic gff3dataframe creation."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
assert type(gff3_df) == gff3pd.Gff3DataFrame
pd.testing.assert_frame_equal(gff3_df.df, written_df)
@print_docstring()
def test_generate_gff_header(datadir_mgr):
"""Test GFF header generation."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
object_header = generate_gff3_df()
generate_header = object_header._read_gff_header()
assert type(object_header) == gff3pd.Gff3DataFrame
assert object_header.header == written_header
assert generate_header == written_header
@print_docstring()
def test_if_df_values_equal_gff_values(datadir_mgr):
"""Testing whether dataframe values equal input GFF values."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
test_df_object = generate_gff3_df()
test_df = test_df_object._read_gff3_to_df()
assert type(test_df_object) == gff3pd.Gff3DataFrame
pd.testing.assert_frame_equal(test_df, written_df)
@print_docstring()
def test_to_csv(datadir_mgr):
"""Test CSV file creation."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
gff3_df.to_csv("temp.csv")
csv_content = open("temp.csv").read()
assert csv_content == written_csv
@print_docstring()
def test_to_tsv(datadir_mgr):
"""Test TSV file creation."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
gff3_df.to_tsv("temp.tsv")
tsv_content = open("temp.tsv").read()
assert tsv_content == written_tsv
@print_docstring()
def test_to_gff3(datadir_mgr):
"""Test GFF file creation and rereading."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
gff3_df.to_gff3("temp.gff")
gff_content = open("temp.gff").read()
assert gff_content == written_gff
read_gff_output = gff3pd.read_gff3("temp.gff")
read_in_file = gff3pd.read_gff3("test_file.gff")
pd.testing.assert_frame_equal(read_in_file.df, read_gff_output.df)
@print_docstring()
def test_filter_feature_of_type(datadir_mgr):
"""Test feature filtering."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
object_type_df = gff3_df.filter_feature_of_type(["gene"])
assert type(object_type_df) == gff3pd.Gff3DataFrame
assert object_type_df.df.empty == compare_filter_feature_df.empty
pd.testing.assert_frame_equal(object_type_df.df, compare_filter_feature_df)
assert object_type_df.header == written_header
@print_docstring()
def test_filter_by_length(datadir_mgr):
"""Test filtering by length."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
filtered_length = gff3_df.filter_by_length(min_length=10, max_length=300)
assert type(filtered_length) == gff3pd.Gff3DataFrame
pd.testing.assert_frame_equal(filtered_length.df, written_filtered_length)
assert filtered_length.header == written_header
@print_docstring()
def test_get_feature_by_attribute(datadir_mgr):
"""Test get feature by attibute."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
filtered_gff3_df = gff3_df.get_feature_by_attribute("gbkey", ["Gene"])
filtered_gff3_df2 = gff3_df.get_feature_by_attribute(
"Parent", ["gene2", "gene3", "gene4"]
)
filtered_gff3_df3 = gff3_df.get_feature_by_attribute(
"locus_tag", ["SL1344_0006"]
)
assert type(filtered_gff3_df) == gff3pd.Gff3DataFrame
assert type(filtered_gff3_df2) == gff3pd.Gff3DataFrame
assert type(filtered_gff3_df3) == gff3pd.Gff3DataFrame
assert filtered_gff3_df.df.shape == (5, 9)
pd.testing.assert_frame_equal(
filtered_gff3_df.df, compare_get_feature_by_attribute
)
pd.testing.assert_frame_equal(
filtered_gff3_df2.df, compare_get_feature_by_attribute2
)
assert filtered_gff3_df3.df.shape == df_empty.shape
@print_docstring()
def test_attributes_to_columns(datadir_mgr):
"""Test attributes to columns."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
gff3_df_with_attr_columns = gff3_df.attributes_to_columns()
assert gff3_df_with_attr_columns.shape == (11, 23)
assert gff3_df_with_attr_columns.shape == written_attribute_df.shape
assert type(gff3_df_with_attr_columns) == type(written_attribute_df)
pd.testing.assert_frame_equal(gff3_df_with_attr_columns, written_attribute_df)
@print_docstring()
def test_stats_dic(datadir_mgr):
"""Test stats dictionary."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
stats_dict = gff3_df.stats_dic()
assert type(stats_dict) == type(compare_stats_dic)
assert stats_dict.keys() == compare_stats_dic.keys()
assert stats_dict["Maximal_bp_length"] == compare_stats_dic["Maximal_bp_length"]
assert stats_dict["Minimal_bp_length"] == compare_stats_dic["Minimal_bp_length"]
assert stats_dict["Counted_strands"] == compare_stats_dic["Counted_strands"]
assert (
stats_dict["Counted_feature_types"]
== compare_stats_dic["Counted_feature_types"]
)
@print_docstring()
def test_overlaps_with(datadir_mgr):
"""Test finding overlaps."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
overlap_gene_1_40 = gff3_df.overlaps_with(
seq_id="NC_016810.1", type="gene", start=1, end=40, strand="+"
)
overlap_40_300 = gff3_df.overlaps_with(
seq_id="NC_016810.1", start=40, end=300, strand="+"
)
overlap_170_171 = gff3_df.overlaps_with(
seq_id="NC_016810.1", start=170, end=171, strand="-"
)
overlap_525_545 = gff3_df.overlaps_with(
seq_id="NC_016810.1", start=525, end=545, strand="+"
)
overlap_341_500 = gff3_df.overlaps_with(
seq_id="NC_016810.1", start=341, end=500, strand="+"
)
complement_test = gff3_df.overlaps_with(
seq_id="NC_016810.1", start=40, end=300, strand="+", complement=True
)
out_of_region = gff3_df.overlaps_with(
seq_id="NC_016810.1", start=1, end=4000, strand="+", complement=True
)
assert type(overlap_gene_1_40) == gff3pd.Gff3DataFrame
assert type(overlap_40_300) == gff3pd.Gff3DataFrame
assert type(overlap_170_171) == gff3pd.Gff3DataFrame
assert type(overlap_525_545) == gff3pd.Gff3DataFrame
assert type(overlap_341_500) == gff3pd.Gff3DataFrame
assert type(complement_test) == gff3pd.Gff3DataFrame
assert type(out_of_region) == gff3pd.Gff3DataFrame
pd.testing.assert_frame_equal(overlap_gene_1_40.df, compare_overlap_gene_1_40)
pd.testing.assert_frame_equal(overlap_40_300.df, compare_overlap_40_300)
pd.testing.assert_frame_equal(overlap_170_171.df, compare_overlap_170_171)
pd.testing.assert_frame_equal(overlap_525_545.df, compare_overlap_525_545)
pd.testing.assert_frame_equal(overlap_341_500.df, compare_overlap_341_500)
pd.testing.assert_frame_equal(complement_test.df, compare_complement)
assert out_of_region.df.shape == df_empty.shape
@print_docstring()
def test_find_duplicated_entries(datadir_mgr):
"""Test finding duplicated entries."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
redundant_df = gff3_df.find_duplicated_entries(
seq_id="NC_016810.1", type="gene"
)
redundant_df2 = gff3_df.find_duplicated_entries(
seq_id="NC_016810.1", type="CDS"
)
assert type(redundant_df) == gff3pd.Gff3DataFrame
assert type(redundant_df2) == gff3pd.Gff3DataFrame
| pd.testing.assert_frame_equal(redundant_df.df, redundant_entry) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError):
td + other
with pytest.raises(TypeError):
other + td
with pytest.raises(TypeError):
td - other
with pytest.raises(TypeError):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
with pytest.raises(TypeError):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
# TODO: moved from index tests following #24365, may need de-duplication
def test_ops_ndarray(self):
td = Timedelta("1 day")
# timedelta, timedelta
other = pd.to_timedelta(["1 day"]).values
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
msg = r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
expected = pd.to_timedelta(["0 days"]).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td - np.array([1])
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) - td
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
"ufunc '?multiply'? cannot use operands with types"
r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(["2000-01-01"]).values
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(["1999-12-31"]).values
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="d")
with pytest.raises(TypeError):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td).value == -1 * td.value
assert op(-1.0, td).value == -1.0 * td.value
with pytest.raises(TypeError):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError):
# invalid multiply with another timedelta
op(td, td)
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5.0
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = td / nan
assert result is NaT
result = td // nan
assert result is NaT
# ---------------------------------------------------------------
# Timedelta.__rdiv__
def test_td_rdiv_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = offsets.Hour(1) / td
assert result == 1 / 240.0
assert np.timedelta64(60, "h") / td == 0.25
# ---------------------------------------------------------------
# Timedelta.__floordiv__
def test_td_floordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
assert td // scalar == 1
assert -td // scalar.to_pytimedelta() == -2
assert (2 * td) // scalar.to_timedelta64() == 2
def test_td_floordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64("NaT"))
def test_td_floordiv_offsets(self):
# GH#19738
td = Timedelta(hours=3, minutes=4)
assert td // offsets.Hour(1) == 3
assert td // offsets.Minute(2) == 92
def test_td_floordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
with pytest.raises(TypeError):
td // np.datetime64("2016-01-01", dtype="datetime64[us]")
def test_td_floordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
expected = Timedelta(hours=1, minutes=32)
assert td // 2 == expected
assert td // 2.0 == expected
assert td // np.float64(2.0) == expected
assert td // np.int32(2.0) == expected
assert td // np.uint8(2.0) == expected
def test_td_floordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
# Array-like others
assert td // np.array(scalar.to_timedelta64()) == 1
res = (3 * td) // np.array([scalar.to_timedelta64()])
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")])
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_floordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
ser = pd.Series([1], dtype=np.int64)
res = td // ser
assert res.dtype.kind == "m"
# ---------------------------------------------------------------
# Timedelta.__rfloordiv__
def test_td_rfloordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# scalar others
# x // Timedelta is defined only for timedelta-like x. int-like,
# float-like, and date-like, in particular, should all either
# a) raise TypeError directly or
# b) return NotImplemented, following which the reversed
# operation will raise TypeError.
assert td.__rfloordiv__(scalar) == 1
assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2
assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0
def test_td_rfloordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert np.isnan(td.__rfloordiv__(NaT))
assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT")))
def test_td_rfloordiv_offsets(self):
# GH#19738
assert offsets.Hour(1) // Timedelta(minutes=25) == 2
def test_td_rfloordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
dt64 = np.datetime64("2016-01-01", "us")
with pytest.raises(TypeError):
td.__rfloordiv__(dt64)
def test_td_rfloordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert td.__rfloordiv__(np.nan) is NotImplemented
assert td.__rfloordiv__(3.5) is NotImplemented
assert td.__rfloordiv__(2) is NotImplemented
with pytest.raises(TypeError):
td.__rfloordiv__(np.float64(2.0))
with pytest.raises(TypeError):
td.__rfloordiv__(np.uint8(9))
with pytest.raises(TypeError, match="Invalid dtype"):
# deprecated GH#19761, enforced GH#29797
td.__rfloordiv__(np.int32(2.0))
def test_td_rfloordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# Array-like others
assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1
res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()]))
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
arr = np.array([(10 * scalar).to_timedelta64(), np.timedelta64("NaT")])
res = td.__rfloordiv__(arr)
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_rfloordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
ser = pd.Series([1], dtype=np.int64)
res = td.__rfloordiv__(ser)
assert res is NotImplemented
with pytest.raises(TypeError, match="Invalid dtype"):
# Deprecated GH#19761, enforced GH#29797
# TODO: GH-19761. Change to TypeError.
ser // td
# ----------------------------------------------------------------
# Timedelta.__mod__, __rmod__
def test_mod_timedeltalike(self):
# GH#19365
td = Timedelta(hours=37)
# Timedelta-like others
result = td % Timedelta(hours=6)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
result = td % timedelta(minutes=60)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % NaT
assert result is NaT
def test_mod_timedelta64_nat(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64("NaT", "ns")
assert result is NaT
def test_mod_timedelta64(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64(2, "h")
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
def test_mod_offset(self):
# GH#19365
td = Timedelta(hours=37)
result = td % offsets.Hour(5)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=2)
def test_mod_numeric(self):
# GH#19365
td = Timedelta(hours=37)
# Numeric Others
result = td % 2
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % 1e12
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
result = td % int(1e12)
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
def test_mod_invalid(self):
# GH#19365
td = Timedelta(hours=37)
with pytest.raises(TypeError):
td % Timestamp("2018-01-22")
with pytest.raises(TypeError):
td % []
def test_rmod_pytimedelta(self):
# GH#19365
td = Timedelta(minutes=3)
result = timedelta(minutes=4) % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=1)
def test_rmod_timedelta64(self):
# GH#19365
td = Timedelta(minutes=3)
result = np.timedelta64(5, "m") % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=2)
def test_rmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
with pytest.raises(TypeError):
Timestamp("2018-01-22") % td
with pytest.raises(TypeError):
15 % td
with pytest.raises(TypeError):
16.0 % td
with pytest.raises(TypeError):
np.array([22, 24]) % td
# ----------------------------------------------------------------
# Timedelta.__divmod__, __rdivmod__
def test_divmod_numeric(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, 53 * 3600 * 1e9)
assert result[0] == Timedelta(1, unit="ns")
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=1)
assert result
result = divmod(td, np.nan)
assert result[0] is NaT
assert result[1] is NaT
def test_divmod(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
result = divmod(td, 54)
assert result[0] == Timedelta(hours=1)
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(0)
result = divmod(td, NaT)
assert np.isnan(result[0])
assert result[1] is NaT
def test_divmod_offset(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, offsets.Hour(-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_divmod_invalid(self):
# GH#19365
td = Timedelta(days=2, hours=6)
with pytest.raises(TypeError):
divmod(td, Timestamp("2018-01-22"))
def test_rdivmod_pytimedelta(self):
# GH#19365
result = divmod(timedelta(days=2, hours=6), Timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
def test_rdivmod_offset(self):
result = divmod(offsets.Hour(54), | Timedelta(hours=-4) | pandas.Timedelta |
import os
import pandas as pd
import numpy as np
from collections import Counter
from imblearn.datasets import make_imbalance
from imblearn.over_sampling import SMOTE, ADASYN
from sklearn.utils import shuffle
os.chdir('/content/gdrive/My Drive/training_testing_data/')
train = pd.read_csv('train_data_rp_3_IMBALANCED.csv')
X_train = train.iloc[:, :-1]
X_train = X_train.values
Y_train = train.iloc[:, -1:]
Y_train = Y_train.values
oversample = SMOTE()
X_train_SMOTE, Y_train_SMOTE = oversample.fit_resample(X_train, Y_train)
print('SMOTE:', sorted(Counter(Y_train_SMOTE).items()))
X_train_SMOTE, Y_train_SMOTE = shuffle(X_train_SMOTE, Y_train_SMOTE, random_state=42)
X_train_SMOTE = pd.DataFrame(X_train_SMOTE)
Y_train_SMOTE = pd.DataFrame(Y_train_SMOTE)
train_SMOTE = | pd.concat([X_train_SMOTE, Y_train_SMOTE], axis=1, ignore_index=True) | pandas.concat |
import asyncio
import itertools
import pandas as pd
import math
import numbers
def series_function():
"""
Wrap a standard function to allow for a Series of values to
be applied instead.
"""
def decorator(f):
def handler(*xs):
is_series = any(isinstance(x, pd.Series) for x in xs)
if is_series:
args = zip(*(x.array if isinstance(x, pd.Series) else itertools.repeat(x) for x in xs))
# return a series of the results
return pd.Series(f(*xs) for xs in args)
# just return the scalar of the function
return f(*xs)
return handler
return decorator
def parseries_function():
"""
Similar to series_function, except that the wrapped function
is asynchronous and will be executed in parallel using an
executor.
It is assumed that the first argument to the wrapped function
is the executor to use.
"""
def decorator(f):
async def handler(*xs):
is_series = any(isinstance(x, pd.Series) for x in xs)
loop = asyncio.get_event_loop()
if is_series:
args = zip(*(x.array if isinstance(x, pd.Series) else itertools.repeat(x) for x in xs))
# submit the jobs to the executor
jobs = [loop.run_in_executor(xs[0], f, *xs[1:]) for xs in args]
# wait for all the jobs to complete
data = [await job for job in jobs]
# union the results together in a series
return pd.Series(rs for rs in data)
# just run a single job
return await loop.run_in_executor(xs[0], f, *xs[1:])
return handler
return decorator
def unknown_function(f):
"""
Simple handler for an unknown function.
"""
def handler(*args):
raise RuntimeError(f'Unknown function {f}')
return handler
def is_na(x):
"""
Helper function, acts like a unary operator.is_na.
"""
return x.isna() if isinstance(x, pd.Series) else x is None or math.isnan(x)
def is_not_na(x):
"""
Helper function, acts like a unary operator.is_not_na.
"""
return x.notna() if isinstance(x, pd.Series) else not (x is None or math.isnan(x))
def is_in(a, b, **kwargs):
"""
Test if an element is within the series.
"""
if not isinstance(b, pd.Series):
b = | pd.Series([b]) | pandas.Series |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import glob
import os
from collections import Counter
desired_width=320
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns',10)
#import dataset merah
path =r'C:\Users\fikri\Desktop\pyproj\komoditas bawang\komoditas'
filenames = glob.glob(path + "\*.csv")
dfs = []
for csv in filenames:
frame = | pd.read_csv(csv) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.