prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# %% [markdown]
# # Models and Ensembling Methods
# %% [markdown]
# ## Import dependencies
import numpy
from gensim.models import word2vec
from gensim.models import KeyedVectors
import pandas
from nltk import WordPunctTokenizer
from sklearn.preprocessing import label_binarize
import sqlite3
from sklearn.multiclass import OneVsRestClassifier
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn import svm
from itertools import cycle
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate
from sklearn.metrics import precision_score, recall_score, roc_auc_score
from sklearn.metrics import multilabel_confusion_matrix, confusion_matrix
from sklearn.metrics import make_scorer
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn import tree
from sklearn.model_selection import GridSearchCV
from mlxtend.plotting import plot_learning_curves
import lime
import lime.lime_tabular
# %% [markdown]
# ## Define Constants
W2V_FEATURE_SIZE = 300
N_CLASSES = 4
RANDOM_STATE = 123
N_FOLDS = 5
# %% [markdown]
# ## Read in the data
# %% [markdown]
# ### Load raw train and test data
# %% [markdown]
# #### Load in the data from the database
# %%
dbconn = sqlite3.connect('./data/cleanedtraintest_v2.db')
train_data_df = pandas.read_sql_query(
'SELECT category, content_cleaned FROM train_data', dbconn)
test_data_df = pandas.read_sql_query(
'SELECT category, content_cleaned FROM test_data', dbconn)
dbconn.commit()
dbconn.close()
# %% [markdown]
# #### Check the if the data was loaded correctly
# %%
train_data_df
# %%
test_data_df
# %% [markdown]
# #### Train & Test data where x is the predictor features, y is the predicted feature
x_train = train_data_df.content_cleaned
y_train = label_binarize(train_data_df.category, classes=range(1, N_CLASSES + 1))
x_test = test_data_df.content_cleaned
y_test = label_binarize(test_data_df.category, classes=range(1, N_CLASSES + 1))
# %% [markdown]
# ### Load word2vec data
# %% [markdown]
# #### Load word2vec feature arrays from .npz files
# load dict of arrays
w2v_train_features_array_dict = numpy.load(
'./data/word2vec-train-features-120000-min5dim300.npz')
w2v_test_features_array_dict = numpy.load(
'./data/word2vec-test-features-120000-min5dim300.npz')
# extract the first array from train
data = w2v_train_features_array_dict['arr_0']
# print the array
print(data)
# extract the first array from test
data = w2v_test_features_array_dict['arr_0']
# print the array
print(data)
# %% [markdown]
# #### Load word2vec model trained key vectors
w2v_model_train = KeyedVectors.load(
'./data/custom-trained-word2vec-120000-min5dim300.kv')
# %% [markdown]
# #### Get the word2vec data back into usable form
wpt = WordPunctTokenizer()
tokenized_corpus_train = [wpt.tokenize(document) for document in x_train]
tokenized_corpus_test = [wpt.tokenize(document) for document in x_test]
# %%
def average_word_vectors(words, model, vocabulary, num_features):
feature_vector = numpy.zeros((num_features,), dtype="float32")
nwords = 0.
for word in words:
if word in vocabulary:
nwords = nwords + 1.
feature_vector = numpy.add(feature_vector, model[word])
if nwords:
feature_vector = numpy.divide(feature_vector, nwords)
return feature_vector
def averaged_word_vectorizer(corpus, model, num_features):
vocabulary = set(model.wv.index2word)
features = [average_word_vectors(tokenized_sentence, model, vocabulary, num_features)
for tokenized_sentence in corpus]
return numpy.array(features)
# %% [markdown]
# #### Obtain document level embeddings
# %%
w2v_feature_array_train = averaged_word_vectorizer(corpus=tokenized_corpus_train,
model=w2v_model_train, num_features=W2V_FEATURE_SIZE)
w2v_feature_array_test = averaged_word_vectorizer(corpus=tokenized_corpus_test,
model=w2v_model_train, num_features=W2V_FEATURE_SIZE)
x_train_w2v = pandas.DataFrame(w2v_feature_array_train)
x_test_w2v = pandas.DataFrame(w2v_feature_array_test)
# %% [markdown]
# #### Sample down for speed, for now.
x_train_w2v = x_train_w2v.sample(
n = 3000, replace = False, random_state = RANDOM_STATE
)
y_train = train_data_df.category.sample(
n = 3000, replace = False, random_state = RANDOM_STATE
)
y_train = label_binarize(y_train, classes=range(1, N_CLASSES + 1))
# %% [markdown]
# #### Delete variables we don't need anymore to save memory
# del(w2v_feature_array_test)
# del(w2v_feature_array_train)
# del(w2v_test_features_array_dict)
# del(w2v_train_features_array_dict)
# del(tokenized_corpus_test)
# del(tokenized_corpus_train)
# del(wpt)
# del(train_data_df)
# del(test_data_df)
# del(x_train)
# del(x_test)
# del(data)
# %% [markdown]
# ## Build Models
# %% [markdown]
# ### SVM Model Building Function
def run_svm(x_train, y_train):
classifier = OneVsRestClassifier(svm.LinearSVC(random_state=RANDOM_STATE))
classifier.fit(x_train, y_train)
return classifier
# %% [markdown]
# ### Logistic Regression Model Building Function
def run_logreg(x_train, y_train):
classifier = OneVsRestClassifier(LogisticRegression(random_state=RANDOM_STATE))
classifier.fit(x_train, y_train)
return classifier
# %% [markdown]
# ### Naive Bayes Function
def run_nb(x_train, y_train):
classifier = OneVsRestClassifier(GaussianNB())
classifier.fit(x_train, y_train)
return classifier
# %% [markdown]
# ### Decision Trees Function
def run_dectree(x_train, y_train):
classifier = OneVsRestClassifier(tree.DecisionTreeClassifier())
classifier.fit(x_train, y_train)
return classifier
# %% [markdown]
# ### Functions to calculate scores and to plot them
# Calculate, then plot the Precision, Recall, Average Precision, F1
def prf1_calc(classifier, algo_name, n_classes, x_test, y_test):
# Get the decision function from the classifier
if algo_name == 'SVM':
y_score = classifier.decision_function(x_test)
else:
y_score = classifier.predict_proba(x_test)
y_pred = classifier.predict(x_test)
# The average precision score in multi-label settings
# For each class
precision = dict()
recall = dict()
average_f1 = dict()
average_precision = dict()
mcc = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
average_f1[i] = f1_score(y_test[:, i], y_pred[:, i])
mcc[i] = matthews_corrcoef(y_test[:, i], y_pred[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
average_f1['micro'] = f1_score(y_test, y_pred, average='micro')
mcc['micro'] = sum(mcc.values())/4
# Plot the data
prf1_plot(precision, recall, average_precision, algo_name, n_classes)
# Return all metrics
results = pandas.DataFrame()
for k in average_precision.keys():
results.at[algo_name, f'P-R {k}'] = numpy.round(average_precision[k], 3)
results.at[algo_name, f'F1 {k}'] = numpy.round(average_f1[k], 3)
results.at[algo_name, f'MCC {k}'] = numpy.round(mcc[k], 3)
return results
# Function to Plot Precision, Recall, F1
def prf1_plot(precision, recall, average_precision, algo_name, n_classes):
print(algo_name)
print('Average precision score, micro-averaged over all classes: {0:0.2f}'
.format(average_precision["micro"]))
# Plot the micro-averaged Precision-Recall curve
plt.figure()
plt.step(recall['micro'], precision['micro'], where='post')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title(
'Average precision score, micro-averaged over all classes: AP={0:0.2f}'
.format(average_precision["micro"]))
# Plot Precision-Recall curve for each class and iso-f1 curves
# setup plot details
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
plt.figure(figsize=(7, 8))
f_scores = numpy.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = numpy.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(lines, labels, loc=(0, -.5), prop=dict(size=14))
plt.show()
# %% [markdown]
# ## Run the Base Models
# %%
# Run SVM Model
svm_model = run_svm(x_train_w2v, y_train)
# %%
# Run Logistic Regression Model
logreg_model = run_logreg(x_train_w2v, y_train)
# %%
# Run Naive Bayes Classifier
nb_model = run_nb(x_train_w2v, y_train)
# %%
# Run Decision Trees Classifier
dectree_model = run_dectree(x_train_w2v, y_train)
# %% [markdown]
# ## Get the scores
# %%
# Initialize the dataframe to keep track of the scores
scores = pandas.DataFrame()
# %%
# Precision, Recall, Avg. Precision for SVM
scores = scores.append(prf1_calc(svm_model, 'SVM', N_CLASSES, x_test_w2v, y_test))
# %%
# Precision, Recall, Avg. Precision for LOG REG
scores = scores.append(prf1_calc(logreg_model, 'LOGREG', N_CLASSES, x_test_w2v, y_test))
# %%
# Precision, Recall, Avg. Precision for Naive Bayes
scores = scores.append(prf1_calc(nb_model, 'NB', N_CLASSES, x_test_w2v, y_test))
# %%
# Precision, Recall, Avg. Precision for Decision Trees
scores = scores.append(prf1_calc(dectree_model, 'DT', N_CLASSES, x_test_w2v, y_test))
# %% [markdown]
# ## Look at Cross-Validation
# %% Create model list to iterate through for cross validation
gnb = OneVsRestClassifier(GaussianNB())
sv = OneVsRestClassifier(svm.LinearSVC(random_state=RANDOM_STATE))
lreg = OneVsRestClassifier(LogisticRegression(random_state=RANDOM_STATE))
dtree = OneVsRestClassifier(tree.DecisionTreeClassifier())
model_list = [gnb, sv, lreg, dtree]
model_namelist = ['Gaussian Naive Bayes',
'SVM/Linear SVC',
'Logistic Regression',
'Decision Tree']
# %% Make scoring metrics to pass cv function through
scoring = {'precision': make_scorer(precision_score, average='micro'),
'recall': make_scorer(recall_score, average='micro'),
'f1': make_scorer(f1_score, average='micro'),
'roc_auc': make_scorer(roc_auc_score, average='micro'),
# 'mcc': make_scorer(matthews_corrcoef) <- cannot support multi-label
}
cv_result_entries = []
i = 0
# %% Loop cross validation through various models and generate results
for mod in model_list:
metrics = cross_validate(
mod,
x_train_w2v,
y_train,
cv=N_FOLDS,
scoring = scoring,
return_train_score=False,
n_jobs=-1
)
for key in metrics.keys():
for fold_index, score in enumerate(metrics[key]):
cv_result_entries.append((model_namelist[i], fold_index, key, score))
i += 1
# %%
#cv_result_entries = pandas.read_csv('./data/cv-results.csv')
cv_results_df = pandas.DataFrame(cv_result_entries)
#cv_results_df.drop('Unnamed: 0', axis=1, inplace=True)
cv_results_df.columns = ['algo', 'cv fold', 'metric', 'value']
#test_df = pandas.DataFrame((cv_results_df[cv_results_df.metric.eq('fit_time')]))
# %% Plot cv results
for metric_name, metric in zip(['fit_time',
'test_precision',
'test_recall',
'test_f1',
'test_roc_auc'],
['Fit Time',
'Precision',
'Recall',
'F1 Score',
'ROC AUC']):
sns.boxplot(x='algo', y='value', #hue='algo',
data=cv_results_df[cv_results_df.metric.eq(f'{metric_name}')])
sns.stripplot(x='algo', y = 'value',
data = cv_results_df[cv_results_df.metric.eq(f'{metric_name}')],
size = 5, linewidth = 1)
plt.title(f'{metric} Algo Comparison', fontsize=12)
plt.xlabel('Algorithm', fontsize=12)
plt.ylabel(f'{metric}', fontsize=12)
plt.xticks([0, 1, 2, 3, 4])
plt.xticks(rotation=45)
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
# %% Misclassification Errors
i=0
for model in model_list:
plt.figure()
plot_learning_curves(x_train_w2v, y_train, x_test_w2v, y_test, model)
plt.title('Learning Curve for ' + model_namelist[i], fontsize=14)
plt.xlabel('Training Set Size (%)', fontsize=12)
plt.ylabel('Misclassification Error', fontsize=12)
plt.show()
i += 1
# %% Get predictions
y_test_pred = []
for model in model_list:
y_test_pred.append(model.predict(x_test_w2v))
# %% Confusion Matrix
CLASSES = ['World', 'Sports', 'Business', 'Sci/Tech']
i=0
for _ in model_list:
cm = confusion_matrix(numpy.argmax(y_test, axis=1),
numpy.argmax(y_test_pred[i], axis=1))
cm_df = pandas.DataFrame(cm, index = CLASSES, columns = CLASSES)
cm_df.index.name = 'Actual'
cm_df.columns.name = 'Predicted'
plt.title('Confusion Matrix for ' + model_namelist[i], fontsize=14)
sns.heatmap(cm_df, annot=True, fmt='.6g', annot_kws={"size": 10}, cmap='Reds')
plt.show()
i += 1
# %% HYPER PARAMETER TUNING BY HYPEROPT (not working)
'''from hyperopt import STATUS_OK
N_FOLDS = 5
#%%
#Objective Function
def objective(params, n_folds = N_FOLDS):
cv_results = cross_validate(OneVsRestClassifier(GaussianNB()),
x_train_w2v,
y_train,
cv = n_folds,
fit_params= params,
scoring = {'f1': make_scorer(f1_score, average='micro')},
return_train_score=False,
n_jobs=-1
)
# Extract the best score
best_score = max(cv_results['test_f1'])
# Loss must be minimized
loss = 1 - best_score
# Dictionary with information for evaluation
return {'loss': loss, 'params': params, 'status': STATUS_OK}
# %%
#Domain Space
from hyperopt import hp
space = {'estimator__var_smoothing': hp.uniform('estimator__var_smoothing',
1.e-09, 1.e+00)}
#%%
# Optimization Algorithm
from hyperopt import tpe
tpe_algo = tpe.suggest
#%%
# Results History
from hyperopt import Trials
bayes_trials = Trials()
#%%
# Run the optimization
from hyperopt import fmin
from hyperopt import rand
MAX_EVALS = 500
params = space
# Optimize
best = fmin(fn = objective, space = space, algo = tpe.suggest,
max_evals = 100, trials = bayes_trials)
print(best)'''
# %% [markdown]
# ## Hyper-parameter tuning with exhaustive Grid Search
# ### Tune hyperparameters for Gaussian Naive-Bayes
params_gnb = {'estimator__var_smoothing': [1.e-09, 1.e-08, 1.e-07, 1.e-06, 1.e-05,
1.e-04, 1.e-03, 1.e-02, 1.e-01, 1.e+00]
}
clf = GridSearchCV(estimator=gnb,
param_grid=params_gnb,
scoring='f1_micro',
n_jobs=-1,
cv=N_FOLDS,
return_train_score=True
)
clf_res = clf.fit(x_train_w2v, y_train)
print('Best Score: ', clf_res.best_score_)
print('Best Params: ', clf_res.best_params_)
# %%
# ### Tune hyperparameters for Logistic Regression
params_lreg = {
"estimator__penalty": ['l1', 'l2'],
"estimator__C": [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000],
#"estimator__class_weight":[{1:0.5, 0:0.5}, {1:0.4, 0:0.6},
# {1:0.6, 0:0.4}, {1:0.7, 0:0.3}],
"estimator__solver": ["newton-cg", "sag", "saga", "lbfgs"]
}
clf = GridSearchCV(estimator=lreg,
param_grid=params_lreg,
scoring='f1_micro',
n_jobs=-1,
cv=N_FOLDS,
return_train_score=True
)
clf_res = clf.fit(x_train_w2v, y_train)
print('Best score:', clf_res.best_score_)
print('Best Params:', clf_res.best_params_)
# %%
# ### Tune hyperparameters for SVM (Linear SVC)
params_sv = {
"estimator__penalty":['l1', 'l2'],
"estimator__tol": [1.e-08, 1.e-07, 1.e-06, 1.e-05,
1.e-04, 1.e-03, 1.e-02, 1.e-01, 1.e+00],
"estimator__loss":['hinge','squared_hinge'],
"estimator__C": [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000]
#"estimator__class_weight":['None',{1:0.5, 0:0.5},
# {1:0.4, 0:0.6}, {1:0.6, 0:0.4}, {1:0.7, 0:0.3}],
}
clf = GridSearchCV(estimator=sv,
param_grid=params_sv,
scoring='f1_micro',
n_jobs=-1,
cv=N_FOLDS,
return_train_score=False
)
clf_res = clf.fit(x_train_w2v, y_train)
print('Best score:', clf_res.best_score_)
print('Best Params:', clf_res.best_params_)
# %%
# ### Tune hyperparameters for Decision Trees
params_dtree = {
"estimator__splitter":["best", "random"],
"estimator__min_samples_split":range(1, 20, 1)
}
clf = GridSearchCV(estimator=dtree,
param_grid=params_dtree,
scoring='f1_micro',
n_jobs=-1,
cv=N_FOLDS,
return_train_score=False
)
clf_res = clf.fit(x_train_w2v, y_train)
print('Best score:', clf_res.best_score_)
print('Best Params:', clf_res.best_params_)
# %% [markdown]
# ## Ensemble Methods
# %% [markdown]
# ### Stacking
estimators = [
('nb', GaussianNB()),
('svm', svm.LinearSVC())
]
sclf = OneVsRestClassifier(StackingClassifier(
estimators=estimators, final_estimator=LogisticRegression())
)
metrics = cross_validate(
sclf,
x_train_w2v,
y_train,
cv=N_FOLDS,
scoring = scoring,
return_train_score=False,
n_jobs=-1
)
# %%
res = []
for key in metrics.keys():
for fold_index, score in enumerate(metrics[key]):
res.append(('Stacking', fold_index, key, score))
# %%
res_df = pandas.DataFrame.from_dict(res)
res_df.columns = ['algo', 'cv fold', 'metric', 'value']
cv_results_inc_ens = pandas.concat([cv_results_df, res_df])
# %% [markdown]
# ### Bagging
sclf = OneVsRestClassifier(BaggingClassifier(
base_estimator=LogisticRegression())
)
metrics = cross_validate(
sclf,
x_train_w2v,
y_train,
cv=N_FOLDS,
scoring = scoring,
return_train_score=False,
n_jobs=-1
)
# %%
res = []
for key in metrics.keys():
for fold_index, score in enumerate(metrics[key]):
res.append(('Bagging', fold_index, key, score))
# %%
res_df = pandas.DataFrame.from_dict(res)
res_df.columns = ['algo', 'cv fold', 'metric', 'value']
cv_results_inc_ens = pandas.concat([cv_results_inc_ens, res_df])
# %% [markdown]
# ### Boosting
from sklearn.ensemble import AdaBoostClassifier
sclf = OneVsRestClassifier(AdaBoostClassifier(
random_state=RANDOM_STATE)
)
metrics = cross_validate(
sclf,
x_train_w2v,
y_train,
cv=N_FOLDS,
scoring = scoring,
return_train_score=False,
n_jobs=-1
)
# %%
res = []
for key in metrics.keys():
for fold_index, score in enumerate(metrics[key]):
res.append(('AdaBoost', fold_index, key, score))
# %%
res_df = | pandas.DataFrame.from_dict(res) | pandas.DataFrame.from_dict |
import itertools
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from solarforecastarbiter.reference_forecasts import forecast
def assert_none_or_series(out, expected):
assert len(out) == len(expected)
for o, e in zip(out, expected):
if e is None:
assert o is None
else:
assert_series_equal(o, e)
def test_resample():
index = pd.date_range(start='20190101', freq='15min', periods=5)
arg = pd.Series([1, 0, 0, 0, 2], index=index)
idx_exp = pd.date_range(start='20190101', freq='1h', periods=2)
expected = pd.Series([0.25, 2.], index=idx_exp)
out = forecast.resample(arg)
assert_series_equal(out, expected)
assert forecast.resample(None) is None
@pytest.fixture
def rfs_series():
return pd.Series([1, 2],
index=pd.DatetimeIndex(['20190101 01', '20190101 02']))
@pytest.mark.parametrize(
'start,end,start_slice,end_slice,fill_method,exp_val,exp_idx', [
(None, None, None, None, 'interpolate', [1, 1.5, 2],
['20190101 01', '20190101 0130', '20190101 02']),
('20190101', '20190101 0230', None, None, 'interpolate',
[1, 1, 1, 1.5, 2, 2],
['20190101', '20190101 0030', '20190101 01', '20190101 0130',
'20190101 02', '20190101 0230']),
('20190101', '20190101 02', '20190101 0030', '20190101 0130', 'bfill',
[1., 1, 2], ['20190101 0030', '20190101 01', '20190101 0130'])
]
)
def test_reindex_fill_slice(rfs_series, start, end, start_slice, end_slice,
fill_method, exp_val, exp_idx):
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
assert_series_equal(out, exp)
def test_reindex_fill_slice_some_nan():
rfs_series = pd.Series([1, 2, None, 4], index=pd.DatetimeIndex([
'20190101 01', '20190101 02', '20190101 03', '20190101 04',
]))
start, end, start_slice, end_slice, fill_method = \
None, None, None, None, 'interpolate'
exp_val = [1, 1.5, 2, 2.5, 3, 3.5, 4]
exp_idx = [
'20190101 01', '20190101 0130', '20190101 02', '20190101 0230',
'20190101 03', '20190101 0330', '20190101 04']
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
assert_series_equal(out, exp)
def test_reindex_fill_slice_all_nan():
arg = pd.Series([None]*3, index=pd.DatetimeIndex(
['20190101 01', '20190101 02', '20190101 03']))
out = forecast.reindex_fill_slice(arg, freq='30min')
exp = pd.Series([None]*5, index=pd.DatetimeIndex(
['20190101 01', '20190101 0130', '20190101 02', '20190101 0230',
'20190101 03']))
assert_series_equal(out, exp)
def test_reindex_fill_slice_empty():
out = forecast.reindex_fill_slice(pd.Series(dtype=float), freq='30min')
assert_series_equal(out, pd.Series(dtype=float))
def test_reindex_fill_slice_none():
out = forecast.reindex_fill_slice(None, freq='30min')
assert out is None
def test_cloud_cover_to_ghi_linear():
cloud_cover = pd.Series([0, 50, 100.])
ghi_clear = pd.Series([1000, 1000, 1000.])
out = forecast.cloud_cover_to_ghi_linear(cloud_cover, ghi_clear)
expected = pd.Series([1000, 675, 350.])
| assert_series_equal(out, expected) | pandas.testing.assert_series_equal |
from nsetools import Nse
import requests
from datetime import datetime as dt, timedelta
import plotly.graph_objects as go
import pandas as pd
nse = Nse()
headers = {
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"DNT": "1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36",
"Sec-Fetch-User": "?1",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp, \
image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "none",
"Sec-Fetch-Mode": "navigate",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.9,hi;q=0.8",
}
def fetch_data(stock, start_date, end_date):
series = "EQ"
start_date = start_date.strftime("%d-%m-%Y")
end_date = end_date.strftime("%d-%m-%Y")
url = (
"https://www.nseindia.com/api/historical/cm/equity?symbol="
+ stock
+ "&series=[%22"
+ series
+ "%22]&from="
+ str(start_date)
+ "&to="
+ str(end_date)
+ ""
)
try:
data = requests.get(url, headers=headers).json()
except ValueError:
s = requests.Session()
data = s.get("http://nseindia.com", headers=headers)
data = s.get(url, headers=headers).json()
return | pd.DataFrame.from_records(data["data"]) | pandas.DataFrame.from_records |
from pandas.stats.tests.common import COLS
#edited by ryanrozewski
import numpy as np
import pandas as pd
import quandl
from datetime import date
import pickle, os
from Scheduler.Scheduler import Scheduler
from parameters import WORKING_DIR
from MonteCarloSimulators.Vasicek.vasicekMCSim import MC_Vasicek_Sim
from fredapi import Fred
from dateutil import relativedelta
QUANDL_API_KEY = '<KEY>'
FRED_API_KEY = "bcd1d85077e14239f5678a9fd38f4a59"
class CorporateRates(object):
def __init__(self):
self.OIS = []
self.filename = WORKING_DIR + '/CorpData.dat'
self.corporates = []
self.ratings = {'AAA':"BAMLC0A1CAAA",
'AA':"BAMLC0A2CAA",
'A':"BAMLC0A3CA",
'BBB':"BAMLC0A4CBBB",
'BB':"BAMLH0A1HYBB",
'B':"BAMLH0A2HYB",
'CCC':"BAMLH0A3HYC"}
self.corpSpreads = {}
self.corporates = | pd.DataFrame() | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import uuid
from datetime import datetime
import logging
from math import nan
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import tests.test_app
import superset.viz as viz
from superset import app
from superset.constants import NULL_STRING
from superset.exceptions import SpatialException
from superset.utils.core import DTTM_ALIAS
from .base_tests import SupersetTestCase
from .utils import load_fixture
logger = logging.getLogger(__name__)
class BaseVizTestCase(SupersetTestCase):
def test_constructor_exception_no_datasource(self):
form_data = {}
datasource = None
with self.assertRaises(Exception):
viz.BaseViz(datasource, form_data)
def test_process_metrics(self):
# test TableViz metrics in correct order
form_data = {
"url_params": {},
"row_limit": 500,
"metric": "sum__SP_POP_TOTL",
"entity": "country_code",
"secondary_metric": "sum__SP_POP_TOTL",
"granularity_sqla": "year",
"page_length": 0,
"all_columns": [],
"viz_type": "table",
"since": "2014-01-01",
"until": "2014-01-02",
"metrics": ["sum__SP_POP_TOTL", "SUM(SE_PRM_NENR_MA)", "SUM(SP_URB_TOTL)"],
"country_fieldtype": "cca3",
"percent_metrics": ["count"],
"slice_id": 74,
"time_grain_sqla": None,
"order_by_cols": [],
"groupby": ["country_name"],
"compare_lag": "10",
"limit": "25",
"datasource": "2__table",
"table_timestamp_format": "%Y-%m-%d %H:%M:%S",
"markup_type": "markdown",
"where": "",
"compare_suffix": "o10Y",
}
datasource = Mock()
datasource.type = "table"
test_viz = viz.BaseViz(datasource, form_data)
expect_metric_labels = [
u"sum__SP_POP_TOTL",
u"SUM(SE_PRM_NENR_MA)",
u"SUM(SP_URB_TOTL)",
u"count",
]
self.assertEqual(test_viz.metric_labels, expect_metric_labels)
self.assertEqual(test_viz.all_metrics, expect_metric_labels)
def test_get_df_returns_empty_df(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
datasource = self.get_datasource_mock()
test_viz = viz.BaseViz(datasource, form_data)
result = test_viz.get_df(query_obj)
self.assertEqual(type(result), pd.DataFrame)
self.assertTrue(result.empty)
def test_get_df_handles_dttm_col(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
results = Mock()
results.query = Mock()
results.status = Mock()
results.error_message = Mock()
datasource = Mock()
datasource.type = "table"
datasource.query = Mock(return_value=results)
mock_dttm_col = Mock()
datasource.get_column = Mock(return_value=mock_dttm_col)
test_viz = viz.BaseViz(datasource, form_data)
test_viz.df_metrics_to_num = Mock()
test_viz.get_fillna_for_columns = Mock(return_value=0)
results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01 05:00:00"]})
datasource.offset = 0
mock_dttm_col = Mock()
datasource.get_column = Mock(return_value=mock_dttm_col)
mock_dttm_col.python_date_format = "epoch_ms"
result = test_viz.get_df(query_obj)
import logging
logger.info(result)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
mock_dttm_col.python_date_format = None
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
datasource.offset = 1
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 6, 0)], name=DTTM_ALIAS)
)
datasource.offset = 0
results.df = | pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01"]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import glob
#Variable Type Label
#Communes Char NOM DE LA COMMUNE
#Codes_Insee Char NUMERO DE LA COMMUNE, CODE INSEE
#NB_enfant_AEEH Num NOMBRE D'ENFANTS BENEFICIAIRES DE L'AEEH VERSABLE
#AEEH_0A2 Num NOMBRE D'ENFANTS DE 0 A 2 ANS, BENEFICIAIRES DE L'AEEH VERSABLE
#AEEH_3A5 Num NOMBRE D'ENFANTS DE 3 A 5 ANS, BENEFICIAIRES DE L'AEEH VERSABLE
#AEEH_6A11 Num NOMBRE D'ENFANTS DE 6 A 11 ANS, BENEFICIAIRES DE L'AEEH VERSABLE
#AEEH_12A15 Num NOMBRE D'ENFANTS DE 12 A 15 ANS, BENEFICIAIRES DE L'AEEH VERSABLE
#AEEH_16A17 Num NOMBRE D'ENFANTS DE 16 A 17 ANS, BENEFICIAIRES DE L'AEEH VERSABLE
#AEEH_18A20 Num NOMBRE D'ENFANTS DE 18 A 20 ANS, BENEFICIAIRES DE L'AEEH VERSABLE
#
#
#***********REMARQUES***********
#
#1) Le foyer allocataire est l�entit� administrative � laquelle les Caf versent au moins une prestation. Il est compos� de l�allocataire
#(personne qui per�oit au moins une prestation au regard de sa situation familiale et/ou mon�taire), de son conjoint/concubin/pacs�
#�ventuel, des enfants � charge et autres personnes � charge au sens de la r�glementation en vigueur. Un foyer allocataire peut donc
#comporter une ou plusieurs personnes.
#
#2) Le droit versable signifie que le foyer allocataire remplit toutes les conditions pour �tre effectivement pay� au titre du mois
#d�observation. En particulier ne sont pas inclus dans ce p�rim�tre les b�n�ficiaires qui n�ont pas fourni l�int�gralit� de leurs
#pi�ces justificatives, ou ceux dont le montant de la prestation est inf�rieur au seuil de versement.
#
#3) Le champ g�ographique d�observation du jeu de donn�es correspond � la commune de r�sidence du foyer allocataire telle qu�elle
#est enregistr�e dans le fichier statistique des allocataires extrait d�but d�ann�e N+1 et ce quelle que soit la Caf de gestion.
#La premi�re ligne du fichier dont le num�ro commune est XXXXX recouvre deux cas possibles soit un code commune inconnu soit une
#commune de r�sidence de l'allocataire � l'�tranger.
#A partir de 2014 les r�sidants � l'�tranger et les codes communes inconnus sont dissoci�s en deux lignes.
#
#4) L'application d'un blanc ' ' est d� � deux cas de figure soit l'information est manquante, soit il y a eu application d'un secret
#statistique. Le secret statistique est appliqu� � toutes les valeurs inf�rieures � 5. De plus, pour �viter de d�duire certaines
#valeurs manquantes d'autres valeurs par croisement (exemple, diff�rence avec la totalit� dans le cas d'une seule valeur manquante), un
#secret statistique est appliqu� sur d'autres valeurs.
#
#5)Un enfant a droit � l'AEEH s'il remplit les conditions suivantes:
#�tre �g� de moins de 20 ans ;
#� avoir une incapacit� permanente d�au moins 80 %.Celle-ci peut aussi �tre comprise entre 50 % et 80 %
#si l�enfant fr�quente un �tablissement sp�cialis� ou sison �tat exige le recours � un service d��ducation
#sp�cialis�e ou de soins � domicile ;
#
#� ne pas r�sider en internat avec prise en charge int�grale des frais de s�jours par l�Assurance maladie,
#l��tat ou l�Aide sociale. C�est la Commission des droits et de l�autonomie des personnes handicap�es (Cdaph)
#qui appr�cie l��tat de sant� de l�enfant et propose l�attribution de l�Aeeh, pour une dur�e comprise entre
#1 et 5 ans, sauf aggravation du taux d�incapacit�.
#
#7)L��ge retenu est l��ge atteint dans l�ann�e (ou �ge par g�n�ration). Il correspond � la diff�rence entre l'ann�e d�observation et
#l'ann�e de naissance de l'individu. Les intervalles des tranches d'�ges sont des intervalles ferm�s.
#
#***********Titres des fichiers***********
#
#AEEH_Enf_Com_XXXX.csv
#o� XXXX est l'ann�e de r�f�rence
#
#***********Informations additionnelles***********
#
#Source : Cnaf, fichier FILEAS et BASE COMMUNALE ALLOCATAIRES (BCA)
#Fr�quence de diffusion : Annuelle
#Granularit� temporelle : Mois
#Unit� : Nombre d'enfants
#Champ : France, r�gime g�n�ral + r�gime agricole dans les Dom
#Zone g�ographique : Commune
#
#
#***********LIENS***********
#
#Retrouvez plus d'informations sur le site de la branche famille: http://www.caf.fr/
df = | pd.read_csv('source/EnfantAEEH2009.csv', sep=";") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon May 10 08:46:30 2021
@author: niven
"""
import os
import glob
from pathlib import Path
import shutil
import datetime
import imageio
import pandas as pd
import geopandas as gpd
import numpy as np
from scipy import stats
from shapely.geometry import Point, Polygon, LineString
from shapely import speedups
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.patches as mpatches
from matplotlib.collections import LineCollection
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from src.definitions import ROOT_DIR, filter_Prot, dict_Sale2Yr, cols_prebid, cols_bid, cols_company2
from src.definitions import cols_borehole_pos, cols_borehole
from src.definitions import dict_Brent
from src.data.utils import concatenate_files, df_col2dict, dryhole_label
# %% bid data directories
path_bid_data = ROOT_DIR /'src/data'
assert path_bid_data.exists()
path_scra = ROOT_DIR/'src/data/scra'
if path_scra.exists() == False:
path_scra.mkdir()
# %% block geometry
path_activelease = ROOT_DIR/'src/data/shape_files/activelease'
path_protractions = ROOT_DIR/'src/data/shape_files/protclip'
path_blocks = ROOT_DIR/'src/data/shape_files/blocks'
path_bath = ROOT_DIR/'src/data/shape_files/bathymetry'
gpd_ActiveLease = gpd.read_file(path_activelease/'al_20210201.shp')
gpd_Prot = gpd.read_file(path_protractions/'protclip.shp')
gpd_Blocks = gpd.read_file(path_blocks/'blocks.shp')
gpd_Bath500ft = gpd.read_file(path_bath/'contours_noaa_500ft.shp')
gpd_Blocks['Lon'] = gpd_Blocks.centroid.x
gpd_Blocks['Lat'] = gpd_Blocks.centroid.y
# %% List of Ewave
filter_Ewave = ['KC330','GB740','GB830','GC57','GC66','EW951','EW957','GC31','GC76','GC164','GC390','WR23','WR24',
'WR112','SE129','SE116','KC903','KC560','KC556','KC330']
df_filter = gpd_Blocks.loc[gpd_Blocks['AC_LAB'].isin(filter_Ewave) ].copy()
sorterIndex = dict(zip(filter_Ewave, range(len(filter_Ewave))))
df_filter['Order'] = df_filter['AC_LAB'].map(sorterIndex)
# % Order polygon points
df_filter.sort_values(by=['Order'], ascending = [True], inplace = True)
df_filter.set_index('Order', inplace = True)
df_filter = pd.concat([df_filter, df_filter.iloc[0:1] ], ignore_index=True )
# % extract centroid (block corner estimate)
df_filter['X'] = df_filter.centroid.x
df_filter['Y'] = df_filter.centroid.y + .02 # .02 about 1/2 block shift
poly = Polygon(list(zip(df_filter.X, df_filter.Y))) # in Python 3.x
d = {'Survey': 'Ewave', 'geometry':poly}
#
gdf_Ewave = gpd.GeoDataFrame(d, index=[0], crs=4267)
del d, df_filter, poly, filter_Ewave
# %% Borehole data
path_boreholes = ROOT_DIR / 'src/data/5010.DAT'
assert path_boreholes.exists()
df_Boreholes = pd.read_fwf(path_boreholes, colspecs=cols_borehole_pos, header=None)
df_Boreholes.columns = cols_borehole
# Clean Borehole Dataframe
df_Boreholes['BottomLon'] = df_Boreholes['BottomLon'].apply(lambda x: pd.to_numeric(x, errors='coerce'))
df_Boreholes['BottomLat'] = df_Boreholes['BottomLat'].apply(lambda x: pd.to_numeric(x, errors='coerce'))
df_Boreholes['MD'] = df_Boreholes['MD'].apply(lambda x: pd.to_numeric(x, errors='coerce'))
df_Boreholes['TVD'] = df_Boreholes['TVD'].apply(lambda x: pd.to_numeric(x, errors='coerce'))
# %% filter to DW protractions
df_Boreholes = df_Boreholes.loc[df_Boreholes['BottomArea'].isin(filter_Prot)]
# Filter for only "exploration wells and filter out canceled wells
df_Boreholes = df_Boreholes[df_Boreholes.StatCode != 'CNL']
#df_Boreholes = df_Boreholes[df_Boreholes.TypeCode == 'E']
df_Boreholes.dropna(axis=0, how='any', thresh=None, subset=['BottomLon','BottomLat'], inplace=True)
df_Boreholes['SpudYear']= df_Boreholes['Spud'].apply(lambda x: int(str(x)[0:4]))
# %% AC_LAB
df_Boreholes['BottomBlock'] = df_Boreholes['BottomBlock'].apply(lambda x: str(pd.to_numeric(x, errors='coerce')))
df_Boreholes['SurfBlock'] = df_Boreholes['SurfBlock'].apply(lambda x: str(pd.to_numeric(x, errors='coerce')))
df_Boreholes['AC_LAB'] = df_Boreholes['BottomArea'].str.cat(df_Boreholes['BottomBlock'],sep="")
df_Boreholes['Surf_LAB'] = df_Boreholes['SurfArea'].str.cat(df_Boreholes['SurfBlock'],sep="")
# %% Offshore Magazine DW Field Summary
path_OffshoreMag = ROOT_DIR / 'src/data/OffshoreMagFields.csv'
assert path_OffshoreMag.exists()
path_BOEM_Fields = ROOT_DIR/'src/data/BOEM_YE2018_Fields.csv'
assert path_BOEM_Fields.exists()
df_OffshoreMag = pd.read_csv(path_OffshoreMag)
df_BOEM_Fields = pd.read_csv(path_BOEM_Fields)
dict_OffshoreMag = df_OffshoreMag.set_index('AC_LAB')
# %% Map Boreholes to Offshore Mag list
#I. Assume Offshore Mag list is bottom hole block
df_bb = pd.merge(df_OffshoreMag, df_Boreholes, on='AC_LAB', how='outer')
#check spud is withing +- 1 year
mask1 = df_bb[abs(df_bb['YrDisc'] - df_bb['SpudYear']) <= 1 ]
#II. Assume Offshore Mag list is surface block
df_OffshoreMagSurf = df_OffshoreMag.copy().rename(columns={'AC_LAB':'Surf_LAB'})
df_sb = pd.merge(df_OffshoreMagSurf, df_Boreholes, on='Surf_LAB', how='outer')
mask2 = df_sb[abs(df_sb['YrDisc'] - df_sb['SpudYear']) <=1 ]
#IIb. Flip lables back
mask2.rename(columns={'Surf_LAB':'AC_LAB','AC_LAB':'Surf_LAB'}, inplace=True)
#III append and drop duplicate information
mask3 = mask1.append(mask2).drop_duplicates()
#IV group and get average point
mask4 = mask3.groupby(['AC_LAB'])[['BottomLon', 'BottomLat']].agg('mean')
del df_OffshoreMagSurf, df_sb, mask1, mask2, mask3
# %% Fillout Lat, Lon coordinates
df_OffshoreMag = | pd.merge(df_OffshoreMag, mask4, on='AC_LAB', how='outer') | pandas.merge |
from PyInvestor.utils import IEX_URL, timerange_split, timerange_chart , _endpoint, _correctdate
import requests
import pandas as pd
import collections
"""
TODOs
- implement a proper way to deal with out of connection requests
"""
class Stock:
"""Gathers data from the IEX endpoints for only one stock
"""
def __init__(self, symbol):
"""Initialization of the class Stock
"""
self.symbol = symbol.upper()
self.key = 'stock'
def Company(self):
""" returns information related to the company
"""
response = _endpoint(self.key, self.symbol, 'company')
df = pd.DataFrame(response)
df = df.drop(['tags'], axis=1)
return df.drop_duplicates()
def DelayedQuote(self):
""" returns the 15 minute delayed market quote
"""
response = _endpoint(self.key, self.symbol, 'delayed-quote')
df = pd.Series(response).to_frame().T
_correctdate(df)
return df
def Dividends(self, timerange):
""" returns the historical dividends based on the historical market data
"""
if timerange not in timerange_split:
raise ValueError('%s not a valid value. Please select: "5y","2y","1y","ytd","6m","3m","1m"')
else:
response = _endpoint(self.key, self.symbol, 'dividends/%s' %timerange)
return pd.DataFrame(response)
def Earnings(self):
""" returns data from the four most recent reported quarters
"""
response = _endpoint(self.key, self.symbol, 'earnings')
return pd.DataFrame(response['earnings'])
def EffectiveSpread(self):
""" returns an array of effective spread, eligible volume, and price
improvement of a stock, by market. Effective spread is designed to
measure marketable orders executable in relation to the market
center's quoted spread and takes into account hidden and midpoint
liquidity available at each market center.
"""
response = _endpoint(self.key, self.symbol, 'effective-spread')
return pd.DataFrame(response)
def Financials(self):
""" returns income statement, balance sheet, and cash flow data from the four
most recent reported quarters.
"""
response = _endpoint(self.key, self.symbol, 'financials')
return pd.DataFrame(response['financials'])
def Stats(self):
""" returns certain important number in relation with a stock
"""
response = _endpoint(self.key, self.symbol, 'stats')
return pd.Series(response).to_frame().T
def LargestTrades(self):
""" returns 15 minute delayed, last sale eligible trades
"""
response = _endpoint(self.key, self.symbol, 'largest-trades')
df = pd.DataFrame(response)
_correctdate(df)
return df
def News(self, lastndays=10):
if (lastndays > 50) or (lastndays < 1):
raise ValueError('Value of last is not correct. It must in between [1,50]')
else:
response = _endpoint(self.key, self.symbol, 'news/last/%s' %lastndays)
df = pd.DataFrame(response)
return df
def OHLC(self):
""" returns the official open, high, low and close for a given symbol with open and/or close official listing time
"""
response = _endpoint(self.key, self.symbol, 'ohlc')
dic = collections.defaultdict()
dic[self.symbol] = {}
dic[self.symbol]['open'] = response['open']['price']
dic[self.symbol]['close'] = response['close']['price']
dic[self.symbol]['high'] = response['high']
dic[self.symbol]['low'] = response['low']
dic[self.symbol]['close_time'] = response['close']['time']
dic[self.symbol]['open_time'] = response['open']['time']
df = pd.DataFrame(dic)
_correctdate(df)
return df
def Previous(self):
""" returns previous day adjusted price data for a single stock
"""
response = _endpoint(self.key, self.symbol, 'previous')
return pd.DataFrame(response, index=[response['symbol']])
def Price(self):
""" returns a single number, corresponding to the IEX real time price, the 15 minute delayed market price,
or the previous close price
"""
return _endpoint(self.key, self.symbol, 'price')
def Quote(self, displayPercent=False):
""" returns several quoting prices such as calculationPrice, latestPrice, delayedPrice
Option: displayPercent -- all percentage values will be multiplied by a factor 100
"""
if displayPercent == False:
response = _endpoint(self.key, self.symbol, 'quote')
else:
response = _endpoint(self.key, self.symbol, 'quote?displayPercent=true')
df = pd.Series(response).to_frame().T
_correctdate(df)
return df
def Relevant(self):
""" similar to peers endpoint, except this will return most active market symbols when peers
are not available.
"""
response = _endpoint(self.key, self.symbol, 'relevant')
return response['symbols']
def Splits(self, timerange):
""" returns the different splits that occured for a particular range of dates "timerange"
"""
if timerange not in timerange_split:
raise ValueError('%s not a valid value, please enter: "5y", "2y", "1y", "ytd", "6m", "3m", "1m"' %timerange)
else:
response = _endpoint(self.key, self.symbol, 'splits/%s' %timerange)
return pd.DataFrame(response)
def Tags(self):
response = _endpoint(self.key, self.symbol, 'company')
return response['tags']
def TimeSeries(self, timerange='1m'):
""" returns the historically adjusted market-wide data based on the timerange.
this turns out to be the same as the chart endpoint of IEX API.
"""
if timerange not in timerange_chart:
raise ValueError('%s not a valid value, please enter: "5y", "2y", "1y", "ytd", "6m", "3m", "1m", "1d", "date", "dynamic"' %timerange)
else:
response = _endpoint(self.key, self.symbol, 'time-series/%s' %timerange) # still to check if kwargs work
return | pd.DataFrame(response) | pandas.DataFrame |
import time # 引入time模块
import pandas as pd
import re
import sqlparse
attributeNameArray = ['tableName', 'createTime', 'lastModifyTime', 'owner', 'rowNumber', 'columnNumber',
'primaryKey', 'uniqueKey', 'foreignKey', 'notNullColumn', 'indexColumn', 'columnDataType']
remarksList = ['表名', '创建时间', '最后修改时间', '所有者', '数据行数', '字段数', '主键',
'唯一键', '外键', '不能为空字段', '索引字段', '数据类型']
# 这个函数是自己的拼接函数 str2TableClass 中会调用
def myConcat(array: list, separator: str):
temp = ""
for i in range(0, len(array)):
temp += array[i] + separator
temp = temp[:-1]
return temp
# 这个函数用来根据正则解析传入的create table指令 数据分解出来 tableinit 会调用
def str2TableClass(tempStr: str, tableName: str):
tempStr = re.search(r"[(](.*)[)]", tempStr).group(1) # 拿到括号里的内容
primaryKey = ""
uniqueKey = ""
foreignKey = ""
# primary key部分
p1 = re.search(r"primary key(.*?)[(](.*?)[)]", tempStr)
# print(p1.group(0))
# print(p1.group(2) + " 主键值")
if p1 is not None:
primaryKey = p1.group(2).strip()
primaryKeyList = primaryKey.split(",")
for index, ele in enumerate(primaryKeyList):
primaryKeyList[index] = ele.strip()
primaryKey = myConcat(primaryKeyList, ",")
tempStr = re.sub(r"primary key(.*?)[(](.*?)[)]", "", tempStr) # 删除primary key 防止影响到后边内容
# unique key部分
p2 = re.search(r"unique key(.*?)[(](.*?)[)]", tempStr)
# print(p2.group(0))
# print(p2.group(2) + " 唯一键值")
if p2 is not None:
uniqueKey = p2.group(2)
tempStr = re.sub(r"unique key(.*?)[(](.*?)[)]", "", tempStr)
# foreign key部分 这里其实有bug foreign key 可以有多个 但是我这里 search方法只能找到一个
p3 = re.search(r"foreign key(.*?)[(](.*?)[)](.*?)references(.*?)[(](.*?)[)]", tempStr)
# print(p2.group(0))
# print(p2.group(2) + " 当前表中值")
# print(p2.group(4).strip() + " 被参考的表名")
# print(p2.group(5).strip() + " 外表的键")
if p3 is not None:
foreignKey = p3.group(2) + "|" + p3.group(4).strip() + "|" + p3.group(5).strip()
tempStr = re.sub(r"foreign key(.*?)[(](.*?)[)](.*?)references(.*?)[(](.*?)[)]", "", tempStr)
# 分解 剩下的 这样里边全都是类似 school varchar not null 、 age int 或者是空格 的字符串
array = tempStr.split(",")
tempArray = [] # 用于临时记录去除空格的形如 school varchar not null 这样的
columnCount = 0 # 用来计数有多少个字段 因为存在全是空格的字符串
for ele in array:
if not ele.isspace(): # 自带函数 当全是空格的时候 为 true
columnCount += 1 # 用来计数有多少个字段 因为存在全是空格的字符串
tempArray.append(ele.strip()) # 去除前后两边的空格
columnNameArray = [] # 字段名数组
columnDataTypeArray = [] # 字段类型数组
notNullColumn = [] # 设置了不空的字段
for ele in tempArray:
p = re.search(r"(.*?)not( +)null", ele)
if p is None:
arrayAA = re.split(r" +", ele.strip())
else:
arrayAA = re.split(r" +", p.group(1).strip())
notNullColumn.append(arrayAA[0])
# 将提取出来的 字段名 和 字段类型 添加进去
columnNameArray.append(arrayAA[0])
columnDataTypeArray.append(arrayAA[1])
uniqueKeyList = uniqueKey.strip().split(",")
uniqueKey = myConcat(uniqueKeyList, ",")
# myConcat是自己写的函数 将notNull的column拼接起来 形如 school,home
notNullColumnStr = myConcat(notNullColumn, ",")
notNullColumnStr += "," + primaryKey + "," +uniqueKey # 加上主键也不能为空
# 拼接成形如 id#int,name#varchar,age#int,school#varchar,home#varchar,aad#varchar 的字符串
# 前边是 字段名称 后边是字段类型 两者用#分割 不同字段之间用, 分割
temp = ""
for i in range(0, len(columnNameArray)):
temp += columnNameArray[i] + "#" + columnDataTypeArray[i] + ","
columnDataTypeArrayStr = temp[:-1]
# 构造一个类 很好用
print(tempStr)
tableTemp = Table(tableName=tableName,
createTime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
lastModifyTime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
owner="root", rowNumber=0, columnNumber=columnCount,
primaryKey=primaryKey, uniqueKey=uniqueKey, foreignKey=foreignKey,
notNullColumn=notNullColumnStr, indexColumn="", columnDataType=columnDataTypeArrayStr)
# 将一些信息存入类中 后边还会用
tableTemp.columnNameArray = columnNameArray
tableTemp.columnDataTypeArray = columnDataTypeArray
return tableTemp
# 用来进行表的初始化 主要做的就是提取数据 然后把相关信息写入excel表中去
def tableInit(databaseLocation: str, databaseName: str, currentIndex: int, tokens):
for index in range(currentIndex, len(tokens)):
while str(tokens[index].ttype) != "None":
index += 1
tableName = str(tokens[index].tokens[0])
tempStr = str(tokens[index])
break
# 引入writer 防止覆盖 这样可以向两个工作表(sheet)中写入信息
src = databaseLocation + "\\" + databaseName.upper() + "\\" + tableName + ".xlsx"
writer = pd.ExcelWriter(src, engine='openpyxl')
initTableAttributeObject = str2TableClass(tempStr, tableName)
tempArray = list(range(1, len(attributeNameArray) + 1)) # 索引列需要
s1 = pd.Series(tempArray, index=tempArray, name="index") # 索引列 一共需要12个属性
s2 = pd.Series(attributeNameArray, index=tempArray, name="attribute") # 属性名列
s3 = pd.Series(initTableAttributeObject.toArray(), index=tempArray, name="value") # 这个是最麻烦的 注意调用了 Table类的toArray方法
s4 = pd.Series(remarksList, index=tempArray, name="备注") # 备注列 这个是写死的
attributeDf = pd.DataFrame({s1.name: s1, s2.name: s2, s3.name: s3, s4.name: s4}) # 插入4列
attributeDf = attributeDf.set_index("index") # 设置索引
dataDf = pd.DataFrame(columns=initTableAttributeObject.columnNameArray)
# 将内容写回excel表格
attributeDf.to_excel(writer, sheet_name="attribute")
dataDf.to_excel(writer, sheet_name="data", index=False)
writer.save()
writer.close()
return tableName # 返回创建表的名字
def checkSafety(attributeDf, dataDf, aa: list, dic):
primaryKeyList: list = attributeDf["value"].at[6].strip().split(",")
uniqueKeyList: list = attributeDf["value"].at[7].strip().split(",")
notNullStrArray: list = attributeDf["value"].at[9].strip().split(",")
error: str = ""
# 检查 非空约束 primary key
# print(notNullStrArray)
for ele in notNullStrArray:
if ele not in aa:
# print("字段 " + ele + " 不能为空,插入失败")
return "字段 " + ele + " 不能为空,插入失败"
# 主键不能重复
for ele in primaryKeyList:
dataDf = dataDf.loc[dataDf[ele].apply(lambda xx: str(xx) == dic[ele])]
# print(dataDf)
if dataDf.empty is False:
# print("主键重复,请重试")
return "主键重复,请重试"
return error
# 唯一键不能重复
# for ele in uniqueKeyList:
# temp = dataDf.loc[dataDf[ele].apply(lambda xx: str(xx) == dic[ele])]
# 这个函数是进行完整性校验无误后 将数据写入到excel表中 tableInsert会调用
def judgeAndInsert(src: str, aa: list, bb: list, all: list):
# 注意这里的地址 还是相对于main.py 这个文件而言的 而不是相对于 本文件Table.py
# print(aa)
# print(bb)
# aa 是需要插入列表字段列表 bb是值
writer = pd.ExcelWriter(src)
dic = {}
for index, ele in enumerate(bb):
dic[aa[index]] = ele
attributeDf = pd.read_excel(writer, sheet_name="attribute")
# print(attributeDf)
dataDf = pd.read_excel(writer, sheet_name="data", usecols=all)
# print(dataDf)
error = checkSafety(attributeDf, dataDf, aa, dic)
if error != "":
print(error)
return
dataDf = dataDf.append(dic, ignore_index=True)
attributeDf["value"].at[2] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # 更新时间
attributeDf["value"].at[4] += 1 # 增加行数
attributeDf.to_excel(writer, sheet_name="attribute", index=False)
dataDf.to_excel(writer, sheet_name="data", index=False)
writer.save()
writer.close()
print("插入成功")
# 提取关键词 比如 id > 20 key是 id algebraicSymbol 是 > 20是 value
def getDataframeByRequirement(key, value, algebraicSymbol, dataframe: pd.DataFrame):
#print(key)
#print(value)
#print(algebraicSymbol)
tempDataFrame = None
if algebraicSymbol == ">":
tempDataFrame = dataframe.loc[dataframe[key].apply(lambda xx: xx > int(value))]
if algebraicSymbol == ">=":
tempDataFrame = dataframe.loc[dataframe[key].apply(lambda xx: xx >= int(value))]
if algebraicSymbol == "<":
tempDataFrame = dataframe.loc[dataframe[key].apply(lambda xx: xx < int(value))]
if algebraicSymbol == "<=":
tempDataFrame = dataframe.loc[dataframe[key].apply(lambda xx: xx <= int(value))]
if algebraicSymbol == "=":
tempDataFrame = dataframe.loc[dataframe[key].apply(lambda xx: str(xx) == str(value))]
if algebraicSymbol == "!=":
tempDataFrame = dataframe.loc[dataframe[key].apply(lambda xx: str(xx) != str(value))]
return tempDataFrame
# 根据表达式 得到一个字符串数组 里边有 tempList = [key, value, algebraicSymbol]
def getKeyValueAndAlgebraicSymbol(expression: str):
key = ""
value = ""
algebraicSymbol = ""
if "=" in expression:
equalIndex = expression.index("=")
if expression[equalIndex - 1] == "!":
algebraicSymbol = "!="
elif expression[equalIndex - 1] == ">":
algebraicSymbol = ">="
elif expression[equalIndex - 1] == "<":
algebraicSymbol = "<="
else:
algebraicSymbol = "="
else:
if ">" in expression:
algebraicSymbol = ">"
elif "<" in expression:
algebraicSymbol = "<"
key = (expression.split(algebraicSymbol))[0].strip()
value = (expression.split(algebraicSymbol))[1].strip()
tempList = [key, value, algebraicSymbol]
return tempList
# 根据where条件 拿到dataframe数据
def parseWhereGetDf(src: str, whereStr: str):
dataDf = pd.read_excel(src, sheet_name="data")
# strTemp3 = "sno< 20 and sno > 5 and sno >=10 and sno > 17 or sno < 12"
# strTemp4 = "sno > 17 or sno < 12 "
noOrDataDf = dataDf
if whereStr == "":
# print(dataDf)
return dataDf
else:
andSplitStrArray = re.split(r" and ", whereStr)
orList = []
for ele in andSplitStrArray:
if " or " in ele:
orSplitStrArray = re.split(r" or ", ele)
orDfList = []
# 拿到所有的or 中的表达式 做一个交集
for factor in orSplitStrArray:
tempArray = getKeyValueAndAlgebraicSymbol(factor)
OrDataDf = getDataframeByRequirement(tempArray[0], tempArray[1], tempArray[2], dataDf)
orDfList.append(OrDataDf)
oneTempOrDf = orDfList[0]
# 取所有的并集 用or隔开的表达式的并集
for element in orDfList:
oneTempOrDf = pd.merge(oneTempOrDf, element, how="outer") # 取并集
orList.append(oneTempOrDf)
else:
tempArray = getKeyValueAndAlgebraicSymbol(ele)
key = tempArray[0]
value = tempArray[1]
algebraicSymbol = tempArray[2]
noOrDataDf = getDataframeByRequirement(key, value, algebraicSymbol, noOrDataDf)
finallyDf = noOrDataDf
# 举个例子 sno< 20 and sno > 5 and sno >=10 and sno > 17 or sno < 12 and sno > 17 or sno < 12
# orlist中有 2个元素 最终下方函数是对三个dataframe做交集
for ele in orList:
finallyDf = pd.merge(finallyDf, ele, how="inner")
# print(finallyDeleteDf)
return finallyDf
# 外界会调用这个全局函数
def tableInsert(currentDatabase, token):
# print(token) # INSERT INTO student (name, age) value('jack', 30)
tokenStr = "" # 直接提取出来所有的sql指令 进行正则匹配
for ele in token:
tokenStr += ele.normalized
columnNameArray = [] #
valueArray = []
allArray = []
src = ""
p1 = re.search(r'INSERT( +)INTO( +)(.*?)( +)[(](.*?)[)]( +)value[(](.*?)[)]', tokenStr)
if p1 is not None:
# print(p1.group(0)) # INSERT INTO student (name, age) value('jack', 30)
# print(p1.group(3)) # student
tableName = p1.group(3)
src = "databases/" + currentDatabase.upper() + "/" + tableName + ".xlsx"
# 求出所有属性 会用到
attributeDf = pd.read_excel(src, sheet_name="attribute")
array = str(attributeDf["value"].at[11]).split(",")
for ele in array:
allArray.append(ele.split("#")[0])
# print(p1.group(5)) # name, age
columnNameArray = p1.group(5).strip().split(",")
for index in range(0, len(columnNameArray)):
columnNameArray[index] = columnNameArray[index].strip()
# print(p1.group(7)) # 'jack', 30
valueArray = p1.group(7).strip().split(",")
for index in range(0, len(valueArray)):
valueArray[index] = valueArray[index].strip().strip("'")
print(valueArray)
# print("p1")
p2 = re.search(r'INSERT( +)INTO( +)(.*?)( +)values[(](.*?)[)]', tokenStr)
if p2 is not None:
# print(p2.group(0)) # INSERT INTO my_teacher values('lilei',28)
# print(p2.group(3)) # student
tableName = p2.group(3)
src = "databases/" + currentDatabase.upper() + "/" + tableName + ".xlsx"
attributeDf = pd.read_excel(src, sheet_name="attribute")
array = str(attributeDf["value"].at[11]).split(",")
for ele in array:
allArray.append(ele.split("#")[0])
columnNameArray = allArray
valueArray = p2.group(5).strip().split(",")
for index in range(0, len(valueArray)):
valueArray[index] = valueArray[index].strip().strip("'")
# 调用插入函数 传入 表的路径 字段名称数组 值数组 所有字段数组
judgeAndInsert(src, columnNameArray, valueArray, allArray)
def handleDeleteInExcel(src: str, whereStr: str):
# print(src)
# print(whereStr)
# 读取数据
writer = pd.ExcelWriter(src)
attributeDf = pd.read_excel(writer, sheet_name="attribute")
dataDf = pd.read_excel(writer, sheet_name="data")
# print(attributeDf)
# print(dataDf)
if whereStr == "":
# 修改数据
dataDf.drop(dataDf.index, inplace=True) # 删除所有数据
attributeDf["value"].at[4] = 0 # 把rowNumber数据行改成0 代表里面没有数据
else:
# print(whereStr)
# 提取出关键信息 进行筛选
tempDf = parseWhereGetDf(src=src, whereStr=whereStr)
# print(dataDf)
print("删除了{}行".format(len(tempDf)))
# print(tempDf)
dataDf = dataDf.append(tempDf)
dataDf = dataDf.drop_duplicates(subset=dataDf.columns, keep=False)
# print(dataDf)
attributeDf["value"].at[4] -= len(tempDf) # 减少行数
# 写回数据
attributeDf.to_excel(writer, sheet_name="attribute", index=False)
dataDf.to_excel(writer, sheet_name="data", index=False)
writer.save()
writer.close()
print("删除成功")
def tableDelete(currentDatabase: str, token):
tokenStr = "" # 直接提取出来所有的sql指令 进行正则匹配
for ele in token:
tokenStr += ele.normalized
# print(tokenStr)
# 去除多余的空格
tokenStr = re.sub(r" +", " ", tokenStr)
tableName: str = ""
src: str = ""
whereStr: str = ""
# 两个分支 如果存在
if "where" in tokenStr:
p1 = re.search(r'DELETE FROM (.*?) where (.*)', tokenStr)
# print(p1.group(0)) # 全语句 DELETE FROM student where home != 'shandong' or id = 30
# print(p1.group(1)) # 表名 student
# print(p1.group(2)) # 条件 home != 'shandong' or id = 30
tableName = p1.group(1).strip()
whereStr = p1.group(2).strip()
else:
p2 = re.search(r'DELETE FROM (.*)', tokenStr)
# print(p2.group(0)) # DELETE FROM student
# print(p2.group(1)) # student
tableName = p2.group(1).strip()
whereStr = ""
print("你真的想要删除 {} 表中所有数据吗(yes/no)".format(tableName))
if "n" in input():
return
src = "databases/" + currentDatabase.upper() + "/" + tableName + ".xlsx"
handleDeleteInExcel(src, whereStr)
# 处理orderby字句的 order by id asc, name desc; 返回列表如右侧 [['id', 'name'], [True, False]]
def getListOfOrderBy(orderByStr: str):
# print(orderByStr)
orderByKeyList = []
orderByValueList = []
tempArray1 = orderByStr.split(",")
for ele in tempArray1:
tempArray2 = ele.split()
orderByKeyList.append(tempArray2[0].strip())
if "asc" == tempArray2[1].strip():
orderByValueList.append(True)
else:
orderByValueList.append(False)
return [orderByKeyList, orderByValueList]
def tableSelect(currentDatabase: str, token):
tokenStr = "" # 直接提取出来所有的sql指令 进行正则匹配
for ele in token:
tokenStr += ele.normalized
# 去除多余的空格
tokenStr = re.sub(r" +", " ", tokenStr)
tableName: str = ""
src: str = ""
whereStr: str = ""
orderByList = None
columnStr = ""
columnStrList = []
# 处理 order by语句
if "ORDER BY" in tokenStr:
p3 = re.search("ORDER BY (.*)", tokenStr)
# print(p3.group(0))
# print(p3.group(1))
orderByStr = p3.group(1).strip()
orderByList = getListOfOrderBy(orderByStr)
# print(orderByList)
tokenStr = re.sub(r" ORDER BY (.*)", "", tokenStr)
# 正则区分出表名
if "where" not in tokenStr:
p1 = re.search(r'SELECT (.*?) FROM (.*)', tokenStr)
# print(p1.group(0)) # SELECT * FROM student
# print(p1.group(1)) # *
columnStr = p1.group(1)
# print(p1.group(2)) # student
tableName = p1.group(2)
else:
p2 = re.search(r'SELECT (.*?) FROM (.*?) where (.*)', tokenStr)
# print(p2.group(0)) # SELECT * FROM student where sno< 20 and sno > 5 and sno >=10 and sno > 17 or sno < 12
# print(p2.group(1)) # *
columnStr = p2.group(1)
# print(p2.group(2)) # student
# print(p2.group(3)) # sno< 20 and sno > 5 and sno >=10 and sno > 17 or sno < 12
tableName = p2.group(2)
whereStr = p2.group(3)
# 拿到要显示的字段列表
if columnStr != "*":
for ele in columnStr.split(","):
columnStrList.append(ele.strip())
# print(columnStrList)
src = "databases/" + currentDatabase.upper() + "/" + tableName + ".xlsx"
targetDataframe = parseWhereGetDf(src, whereStr)
if orderByList is not None:
targetDataframe.sort_values(by=orderByList[0], inplace=True, ascending=orderByList[1])
print(targetDataframe[columnStrList if columnStr!="*" else targetDataframe.columns])
# name=姓名测试,id=1
def getListOfUpdateSet(updateStr: str):
# print(updateStr)
updateKeyList = []
updateValueList = []
tempArray1 = updateStr.split(",")
for ele in tempArray1:
tempArray2 = ele.split("=")
updateKeyList.append(tempArray2[0].strip())
updateValueList.append(tempArray2[1].strip())
return [updateKeyList, updateValueList]
def handleUpdateInExcel(src: str, whereStr: str, modifyStr: str):
writer = pd.ExcelWriter(src)
attributeDf = pd.read_excel(writer, sheet_name="attribute")
# 先删除然后再插入
tempDataframe: pd.DataFrame = parseWhereGetDf(src, whereStr)
# print(tempDataframe)
handleDeleteInExcel(src, whereStr) # 需要进行删完再读
dataDf: pd.DataFrame = pd.read_excel(writer, sheet_name="data")
updateList = getListOfUpdateSet(modifyStr)
# print(updateList) # [['name', 'id'], ['姓名测试', '1']]
primaryKeyStr: str = attributeDf["value"].at[6].strip() # 读出主键
primaryKeyList = primaryKeyStr.strip().split(",")
for index, ele in enumerate(primaryKeyList):
primaryKeyList[index] = ele.strip()
backUpTempDataframe = tempDataframe.copy(deep=True)
# print(primaryKeyList) # 主键的列表
for index, ele in enumerate(updateList[0]):
tempDataframe[ele] = updateList[1][index]
dataTempDf = pd.concat([tempDataframe, dataDf], join="outer", ignore_index=True) # 取并集
dataTempDf.to_excel("./temp.xlsx", index=False)
dataTempDf = | pd.read_excel("./temp.xlsx") | pandas.read_excel |
import random
import time
import plotly
import plotly.graph_objs as go
import pandas as pd
import json
from bs4 import BeautifulSoup
import requests
colors = ['lightseagreen', 'lightsalmon', 'lightsteelblue',
'lightcoral', 'lightgoldenrodyellow', 'lime']
URL = "http://api.scraperapi.com/"
API_KEY = "<KEY>"
def create_plot_pie(c):
labels = []
values = []
for element in c:
labels.append(element)
values.append(int(c[element]))
data = [
go.Pie(
labels=labels,
values=values
)
]
graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON
def create_plot_violin(dic_data, message='total'):
data = []
counter = 0
for key, piece_of_data in dic_data.items():
df = | pd.DataFrame({'y': piece_of_data}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Library with methods for handling bounding boxes in images
License_info:
# ==============================================================================
# ISC License (ISC)
# Copyright 2020 <NAME> Laboratory for Embedded Machine Learning
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# The following script uses several method fragments from Tensorflow
https://github.com/tensorflow/models/blob/master/research/object_detection/dataset_tools/create_pascal_tf_record.py
Tensorflow has the following licence:
# ==============================================================================
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
# Futures
from __future__ import print_function
# Built-in/Generic Imports
import os
import time
# Libs
import argparse
import numpy as np
import glob
import xml.etree.ElementTree as ET
from multiprocessing import Pool
import matplotlib
from six import BytesIO
import re
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
import tkinter
# Own modules
import image_utils as im
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Christian Doppler Laboratory for ' \
'Embedded Machine Learning'
__credits__ = ['']
__license__ = 'ISC'
__version__ = '0.2.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Experiental'
def xml_to_csv(path, filter=None):
"""Iterates through all .xml files (generated by labelImg) in a given directory and combines
them in a single Pandas dataframe.
Parameters:
----------
path : str
The path containing the .xml files
filter: list of image file names. Default None. If no filter is given, all xml files are used
Returns
-------
Pandas DataFrame
The produced dataframe
"""
xml_file_list=[]
if filter is not None:
print("Filter available. Using only xml files with corresponding image files")
#xml_filename = os.path.join(xml_source, os.path.splitext(filename)[0] + '.xml')
xml_file_list = [os.path.join(path, os.path.splitext(image_name)[0] + '.xml') for image_name in filter]
else:
print("Filter not used. Select all xml files of the folder")
xml_file_list = glob.glob(path + '/*.xml')
xml_list = []
for xml_file in xml_file_list: #glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
#Check if detection or ground truth
if isinstance(member.find("score"), ET.Element):
score = float(member.find("score").text)
else:
score = None
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member.find("name").text,
int(member.find("bndbox")[0].text),
int(member.find("bndbox")[1].text),
int(member.find("bndbox")[2].text),
int(member.find("bndbox")[3].text),
score
)
xml_list.append(value)
column_name = ['filename', 'width', 'height',
'class', 'xmin', 'ymin', 'xmax', 'ymax', 'score']
xml_df = | pd.DataFrame(xml_list, columns=column_name) | pandas.DataFrame |
import unittest
import logging
import summer2020py.setup_logger as setup_logger
import summer2020py.make_genebody_coverage_graphs.make_genebody_coverage_graphs as mgcg
import pandas
import tempfile
import os
temp_wkdir_prefix = "TestMakeGeneBodyCoverageGraphs"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
# Some notes on testing conventions (more in cuppers convention doc):
# (1) Use "self.assert..." over "assert"
# - self.assert* methods: https://docs.python.org/2.7/library/unittest.html#assert-methods
# - This will ensure that if one assertion fails inside a test method,
# exectution won't halt and the rest of the test method will be executed
# and other assertions are also verified in the same run.
# (2) For testing exceptions use:
# with self.assertRaises(some_exception) as context:
# [call method that should raise some_exception]
# self.assertEqual(str(context.exception), "expected exception message")
#
# self.assertAlmostEquals(...) for comparing floats
class TestMakeGeneBodyCoverageGraphs(unittest.TestCase):
def test_main(self):
logger.debug("\n \n \n test_main \n \n ")
input_dir = os.path.join("assets", "notebook_inputs", "output_gbdy_cov")
logger.debug("input_dir: {}".format(input_dir))
with tempfile.TemporaryDirectory(prefix=temp_wkdir_prefix) as wkdir:
logger.debug("wkdir: {}".format(wkdir))
args = mgcg.build_parser().parse_args([
#"-s", source_dir,
"-i", input_dir,
"-o", wkdir,
"-of", "MYEXPERIMENTID"
])
mgcg.main(args)
#check that html files were outputted
self.assertTrue(os.path.exists(os.path.join(wkdir, "MYEXPERIMENTID_genebody_histogram_coverage_diff.html")))
self.assertTrue(os.path.exists(os.path.join(wkdir, "MYEXPERIMENTID_genebody_histogram_cov_diff_pct.html")))
self.assertTrue(os.path.exists(os.path.join(wkdir, "MYEXPERIMENTID_genebody_coverage_percentile.html")))
self.assertTrue(os.path.exists(os.path.join(wkdir, "MYEXPERIMENTID_genebody_coverage_counts.html")))
#check that the text files are the same as example outputss
#doesn't work for html files
outputted_files = [
os.path.join(wkdir, "MYEXPERIMENTID_all_genebody_coverage_r1200x6.txt"),
os.path.join(wkdir, "MYEXPERIMENTID_asymmetry_compare_80_20_r12x6.txt")
]
expected_files = [
os.path.join("assets", "example_notebook_outputs", "MYEXPERIMENTID_all_genebody_coverage_r1200x6.txt"),
os.path.join("assets", "example_notebook_outputs", "MYEXPERIMENTID_asymmetry_compare_80_20_r12x6.txt")
]
for i in range(0, len(outputted_files)):
opened_output = open(outputted_files[i], "r")
opened_expected = open(expected_files[i], "r")
logger.debug("checking {} against expected".format(outputted_files[i]))
self.assertEqual(opened_output.read(), opened_expected.read())
opened_output.close()
opened_expected.close()
def test_input_file_search(self):
logger.debug("\n \n \n test_input_file_search\n \n ")
input_dir = os.path.join("assets","notebook_inputs", "output_gbdy_cov")
logger.debug("input_dir: {}".format(input_dir))
input_files = mgcg.input_file_search(input_dir)
self.assertEqual(len(input_files), 12)
#check that the first 3 files are the correct ones
self.assertEqual(
os.path.join('assets','notebook_inputs','output_gbdy_cov','D121','D121.geneBodyCoverage.txt'),
input_files[0]
)
self.assertEqual(
os.path.join('assets','notebook_inputs','output_gbdy_cov','D122','D122.geneBodyCoverage.txt'),
input_files[1]
)
self.assertEqual(
os.path.join('assets','notebook_inputs','output_gbdy_cov','D123','D123.geneBodyCoverage.txt'),
input_files[2]
)
def test_load_genebody_coverage_data(self):
input_files = [
os.path.join("assets", "notebook_inputs", "output_gbdy_cov", "D121", "D121.geneBodyCoverage.txt"),
os.path.join("assets", "notebook_inputs", "output_gbdy_cov", "D122", "D122.geneBodyCoverage.txt")
]
inp_df_list = mgcg.load_genebody_coverage_data(input_files)
#check that there are two data frames
self.assertEqual(len(inp_df_list), 2)
#check that first df is the right shape
self.assertEqual(inp_df_list[0].shape[0], 100)
self.assertEqual(inp_df_list[0].shape[1], 2)
#check that second df is the right shape
self.assertEqual(inp_df_list[1].shape[0], 100)
self.assertEqual(inp_df_list[1].shape[1], 2)
#check that sample id are the right ones
self.assertEqual(inp_df_list[0].sample_id[0], "D121")
self.assertEqual(inp_df_list[1].sample_id[0], "D122")
def test_merge_dfs_into_one(self):
logger.debug("\n \n \n test_merge_dfs_into_one\n \n ")
#create first fake data frame
df = pandas.DataFrame({"coverage_counts":range(100000,500000, 4000), "sample_id":"FAKE"})
df.index.name = "genebody_pct"
df.index += 1
#create second fake data frame
df2 = pandas.DataFrame({"coverage_counts":range(120000,520000, 4000), "sample_id":"FACE"})
df2.index.name = "genebody_pct"
df2.index += 1
counts_df = mgcg.merge_dfs_into_one([df, df2])
logger.debug("counts_df: {}".format(counts_df))
#check that df is the right shape
self.assertEqual(counts_df.shape[0], 200)
self.assertEqual(counts_df.shape[1], 3)
#check that first sample id is fake and that 11th is face
self.assertEqual(counts_df.sample_id[0], "FAKE")
self.assertEqual(counts_df.sample_id[100], "FACE")
def test_sum_counts(self):
logger.debug("\n \n \n test_sum_counts\n \n ")
sample_ids =[]
for i in range(0, 200):
if i < 100:
sample_ids.append("FAKE")
else:
sample_ids.append("FACE")
#create fake data frame
counts_df = pandas.DataFrame({"coverage_counts":list(range(100000,500000, 4000)) + list(range(120000,520000, 4000)), "sample_id":sample_ids})
sum_counts_df = mgcg.sum_counts(counts_df)
logger.debug("counts_df: {}".format(counts_df))
logger.debug("sum_counts_df: {}".format(sum_counts_df))
#check that df is the right shape
self.assertEqual(sum_counts_df.shape[0], 2)
self.assertEqual(sum_counts_df.shape[1], 1)
#check that the sums are correct
self.assertEqual(sum_counts_df.total_coverage_counts[0], 31800000)
self.assertEqual(sum_counts_df.total_coverage_counts[1], 29800000)
def test_calculate_percentile_df(self):
logger.debug("\n \n \n test_jcalculate_percentile_df\n \n ")
sample_ids =[]
for i in range(0, 200):
if i < 100:
sample_ids.append("FAKE")
else:
sample_ids.append("FACE")
counts_df = pandas.DataFrame({"coverage_counts":list(range(100000,500000, 4000)) + list(range(120000,520000, 4000)), "sample_id":sample_ids})
sum_counts_df = pandas.DataFrame(data = {"total_coverage_counts":[31800000, 29800000]}, index = ["FACE", "FAKE"])
sum_counts_df.index.name = "sample_id"
percentile_df = mgcg.calculate_percentile_df(counts_df, sum_counts_df)
#check that df is the right shape
self.assertEqual(percentile_df.shape[0], 200)
self.assertEqual(percentile_df.shape[1], 4)
#check that first sample id is fake and that 11th is face
self.assertEqual(percentile_df.sample_id[0], "FAKE")
self.assertEqual(percentile_df.sample_id[100], "FACE")
#check that FAKE coveragecounts are 2.8 mil and FACE are 3 mil
self.assertEqual(percentile_df.total_coverage_counts[0], 29800000)
self.assertEqual(percentile_df.total_coverage_counts[100], 31800000)
#check first twenty percentiles to make sure they are correct
for i in range(0, 20):
self.assertEqual(percentile_df.coverage_percentile[i], percentile_df.coverage_counts[i] / percentile_df.total_coverage_counts[i])
def test_create_pct_df_list(self):
logger.debug("\n \n \n test_create_pct_df_list\n \n ")
sample_ids =[]
for i in range(0, 200):
if i < 100:
sample_ids.append("FAKE")
else:
sample_ids.append("FACE")
coverage_percentile = list(range(3356,16756,134)) + list(range(2726,16226, 135))
for num in range(len(coverage_percentile)):
coverage_percentile[num] = coverage_percentile[num] / 1000000
percentile_df = pandas.DataFrame({"coverage_percentile":coverage_percentile, "sample_id":sample_ids, "genebody_pct":list(range(1,101))+ list(range(1,101))})
pct_df_list = mgcg.create_pct_df_list(percentile_df)
logger.debug("pct_df_list: {}".format(pct_df_list))
#checking 20th
self.assertEqual(pct_df_list[0].coverage_20pct[0], 0.005902)
self.assertEqual(pct_df_list[0].coverage_20pct[1], 0.005291)
#checking 50th
self.assertEqual(pct_df_list[1].coverage_50pct[0], 0.009922)
self.assertEqual(pct_df_list[1].coverage_50pct[1], 0.009341)
#checking 80th
self.assertEqual(pct_df_list[2].coverage_80pct[0], 0.013942)
self.assertEqual(pct_df_list[2].coverage_80pct[1], 0.013391)
def test_create_pct_comp_df(self):
logger.debug("\n \n \n test_create_pct_comp_df\n \n ")
df20 = pandas.DataFrame(data = {"coverage_20pct":[0.005902,0.005291]}, index = ["FAKE", "FACE"])
df20.index.name = "sample_id"
df50 = pandas.DataFrame(data = {"coverage_50pct":[0.009922,0.009341]}, index = ["FAKE", "FACE"])
df50.index.name = "sample_id"
df80 = pandas.DataFrame(data = {"coverage_80pct":[0.013942,0.013391]}, index = ["FAKE", "FACE"])
df80.index.name = "sample_id"
pct_comp_df = mgcg.create_pct_comp_df([df20, df50, df80])
logger.debug("pct_comp_df: {}".format(pct_comp_df))
self.assertAlmostEqual(pct_comp_df.cov_diff_pct[0], 0.810320, places=5)
self.assertAlmostEqual(pct_comp_df.cov_diff_pct[1], 0.867145, places=5)
def test_add_label_col(self):
logger.debug("\n \n \n test_add_label_col\n \n ")
pct_comp_df = pandas.DataFrame(data = {"cov_diff_pct":[0.810320,0.867145]}, index = ["FAKE", "FACE"])
pct_comp_df.index.name = "sample_id"
pct_comp_df = mgcg.add_label_col(pct_comp_df)
logger.debug("pct_comp_df: {}".format(pct_comp_df))
self.assertEqual(pct_comp_df.label[0], "FAKE 0.81")
self.assertEqual(pct_comp_df.label[1], "FACE 0.87")
def test_add_labels_based_on_sample_id(self):
logger.debug("\n \n \n test_add_labels_based_on_sample_id\n \n ")
sample_ids =[]
for i in range(0, 200):
if i < 100:
sample_ids.append("FAKE")
else:
sample_ids.append("FACE")
pct_comp_df = pandas.DataFrame(data = {"cov_diff_pct":[0.810320,0.867145], "label":["FAKE 0.81", "FACE 0.87"]}, index = ["FAKE", "FACE"])
percentile_df = pandas.DataFrame({"sample_id":sample_ids})
percentile_df = mgcg.add_labels_based_on_sample_id(percentile_df, pct_comp_df)
self.assertEqual(percentile_df.label[0], "FAKE 0.81")
self.assertEqual(percentile_df.label[100], "FACE 0.87")
def test_save_to_tsv(self):
with tempfile.TemporaryDirectory(prefix=temp_wkdir_prefix) as wkdir:
logger.debug("\n \n \n test_save_to_tsv: {}\n \n ".format(wkdir))
output_all_pct_template = "{exp_id}_all_genebody_coverage_r{{}}x{{}}.txt".format(exp_id="MYEXPERIMENTID")
logger.debug("output_all_pct_template: {}".format(output_all_pct_template))
output_compare_80_20_template = "{exp_id}_asymmetry_compare_80_20_r{{}}x{{}}.txt".format(exp_id="MYEXPERIMENTID")
logger.debug("output_compare_80_20_template: {}".format(output_compare_80_20_template))
pct_comp_df = pandas.DataFrame(data = {"cov_diff_pct":[0.810320,0.867145], "label":["FAKE 0.81", "FACE 0.87"]}, index = ["FAKE", "FACE"])
sample_ids =[]
for i in range(0, 200):
if i < 100:
sample_ids.append("FAKE")
else:
sample_ids.append("FACE")
coverage_percentile = list(range(3356,16756,134)) + list(range(2726,16226, 135))
for num in range(len(coverage_percentile)):
coverage_percentile[num] = coverage_percentile[num] / 1000000
percentile_df = pandas.DataFrame({"coverage_percentile":coverage_percentile, "sample_id":sample_ids, "genebody_pct":list(range(1,101))+ list(range(1,101))})
out_f_pct = mgcg.save_to_tsv(wkdir, output_compare_80_20_template, pct_comp_df)
out_f_percentile = mgcg.save_to_tsv(wkdir, output_all_pct_template, percentile_df)
logger.debug("out_f_pct: {}".format(out_f_pct))
logger.debug("out_f_percentile: {}".format(out_f_percentile))
self.assertTrue(os.path.exists(out_f_pct))
self.assertTrue(os.path.exists(out_f_percentile))
def test_create_and_save_genebody_coverage_graph(self):
with tempfile.TemporaryDirectory(prefix=temp_wkdir_prefix) as wkdir:
logger.debug("\n \n \n test_create_and_save_genebody_coverage_graph: {}\n \n ".format(wkdir))
output_line_html_template = "{exp_id}_genebody_{{}}.html".format(exp_id="MYEXPERIMENTID")
logger.debug("output_line_html_template: {}".format(output_line_html_template))
sample_ids =[]
labels = []
for i in range(0, 200):
if i < 100:
sample_ids.append("FAKE")
labels.append("FAKE 0.81")
else:
sample_ids.append("FACE")
labels.append("FAcE 0.87")
coverage_percentile = list(range(3356,16756,134)) + list(range(2726,16226, 135))
for num in range(len(coverage_percentile)):
coverage_percentile[num] = coverage_percentile[num] / 1000000
percentile_df = pandas.DataFrame({"coverage_percentile":coverage_percentile, "sample_id":sample_ids, "genebody_pct":list(range(1,101))+ list(range(1,101)), "label":labels})
output_filepath = mgcg.create_and_save_genebody_coverage_graph("coverage_percentile", wkdir, percentile_df, output_line_html_template)
self.assertTrue(os.path.exists(output_filepath))
def test_create_and_save_histograms(self):
with tempfile.TemporaryDirectory(prefix=temp_wkdir_prefix) as wkdir:
logger.debug("\n \n \n test_create_and_save_histograms: {}\n \n ".format(wkdir))
output_histogram_html_template = "{exp_id}_genebody_histogram_{{}}.html".format(exp_id="MYEXPERIMENTID")
logger.debug("output_histogram_html_template: {}".format(output_histogram_html_template))
pct_comp_df = | pandas.DataFrame(data = {"cov_diff_pct":[0.810320,0.867145], "label":["FAKE 0.81", "FACE 0.87"]}, index = ["FAKE", "FACE"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
# summarize panel data by group and time period
def adjmeans(df, byvar, var, period, changeexclude=None, excludetype=None):
df = df.sort_values([byvar, period])
df = df.dropna(subset=[var])
# iterate using numpy arrays
prevbyvar = 'ZZZ'
prevvarvalue = 0
rowlist = []
varvalues = df[[byvar, var]].values
# convert exclusion ratio to absolute number
if (excludetype == 'ratio' and changeexclude is not None):
changeexclude = df[var].mean() * changeexclude
# loop through variable values
for j in range(len(varvalues)):
byvar = varvalues[j][0]
varvalue = varvalues[j][1]
if prevbyvar != byvar:
if prevbyvar != 'ZZZ':
rowlist.append({'byvar': prevbyvar, 'avgvar': varsum / byvarcount, 'sumvar': varsum, 'byvarcount': byvarcount})
varsum = 0
byvarcount = 0
prevbyvar = byvar
# exclude extreme changes in variable value
if ((changeexclude is None) or (0 <= abs(varvalue-prevvarvalue) <= changeexclude) or (byvarcount == 0)):
varsum += varvalue
byvarcount += 1
prevvarvalue = varvalue
rowlist.append({'byvar' : prevbyvar, 'avgvar' : varsum / byvarcount, 'sumvar':varsum, 'byvarcount': byvarcount})
return pd.DataFrame(rowlist)
# check matches of merge-by values
def checkmerge(dfleft, dfright, mergebyleft, mergebyright):
dfleft['inleft'] = 'Y'
dfright['inright'] = 'Y'
dfboth = pd.merge(dfleft[[mergebyleft, 'inleft']],
dfright[[mergebyright, 'inright']],
left_on=[mergebyleft],
right_on=[mergebyright], how='outer')
dfboth.fillna('N', inplace=True)
print(pd.crosstab(dfboth.inleft, dfboth.inright))
print(dfboth.loc[(dfboth.inleft == 'N') |
(dfboth.inright == 'N')].head(20))
# concatenate all pickle files in a folder, assuming they have the same structure
def addfiles(directory):
dfout = | pd.DataFrame() | pandas.DataFrame |
import time
import sqlite3
import os
import hashlib
import traceback
import pandas as pd
from flask import Flask, request, json, render_template, send_from_directory, abort, g
from passlib.apps import custom_app_context as pwd_context
from flask_httpauth import HTTPBasicAuth, HTTPTokenAuth
auth = HTTPBasicAuth()
tokenauth = HTTPTokenAuth()
working_directory = os.path.dirname(__file__)
app = Flask(__name__)
tokens = dict()
master_database = 'master.db'
def create_heatmap(schedules):
"""Represents upcoming tasks as a calendar heatmap."""
total = []
for item, schedule in schedules:
for day in schedule:
total.append((day, 1, item))
schedule_df = pd.DataFrame(total, columns=['date', 'check', 'item'])
schedule_df.index = schedule_df['date']
schedule_df = schedule_df.drop(columns=['date'])
resampled = schedule_df.resample('D').agg({'check': 'sum', 'item': list})
resampled = resampled[resampled['check'] > 0].reset_index()
return resampled
def generate_upcoming_tasks(merged, exclude_past=True):
"""Generates upcoming tasks given information about last checked dates and master data."""
today = pd.Timestamp.today()
schedules = []
for _, row in merged.iterrows():
schedule = pd.date_range(row['date_checked'], today+pd.Timedelta(13, 'W'), freq=f'{row["frequency"]*7}D')
schedule = schedule[1:]
if len(schedule) == 0:
continue
if exclude_past:
schedule = schedule[schedule >= today]
schedules.append((row['item'], schedule))
return schedules
def get_user_database_name(username):
connection = sqlite3.connect(os.path.join(working_directory, master_database))
df = pd.read_sql('select database from user_to_database where username = :username',
con = connection, params = {"username": username})
return df['database'].iloc[0]
def inspect_inventory_log(username):
"""Gathers observations and master data."""
today = pd.Timestamp.today()
user_database = get_user_database_name(username)
connection = sqlite3.connect(os.path.join(working_directory, user_database))
checks = pd.read_sql('SELECT * from inventory_log', con = connection)
checks['date'] = pd.to_datetime(checks['date'])
checks = checks.sort_values('date')
last_checked = checks.groupby(['item']).last().reset_index()
master_data = pd.read_sql('SELECT * from master_data', con = connection)
recent_master_data = master_data.sort_values('date_added').groupby('item').last().reset_index()
merged = recent_master_data.merge(last_checked, on='item', suffixes=('_initial','_checked'))
merged['week_difference'] = (today - merged['date_checked']).dt.days/7
merged['need_to_check'] = merged['week_difference'] > merged['frequency']
return merged
@auth.verify_password
def verify_password(username, password):
connection = sqlite3.connect(os.path.join(working_directory, master_database))
users = pd.read_sql('select * from users where username=:username',
con = connection,
params={"username": username})
if len(users) == 0:
return False
encrypted_password = users['password'].iloc[0]
g.user = username
return pwd_context.verify(password, encrypted_password)
@tokenauth.verify_token
def verify_token(token):
today = pd.Timestamp.today()
if token in tokens:
if tokens[token]['expiry'] > today:
g.user = tokens[token]['username']
return True
else:
tokens.pop(token, None)
return False
def create_user(username, password):
"""Creates a user in the database including its own set of tables."""
connection = sqlite3.connect(os.path.join(working_directory, master_database))
try:
existing_users = pd.read_sql('select * from users', con = connection)
except:
existing_users = []
current_id = len(existing_users) + 1 # we don't depend on id input anywhere so it's fine to not use better UUIDs
if len(existing_users) > 0:
if username in set(existing_users['username']):
return False
user = pd.DataFrame()
user['username'] = [username]
user['password'] = [pwd_context.hash(password)] # encryption
user['active'] = [True]
user['id'] = [current_id]
user.to_sql('users', con = connection, if_exists='append')
new_db = f'user{current_id}.db'
user_db_mapping = pd.DataFrame()
user_db_mapping['username'] = [username]
user_db_mapping['database'] = [new_db]
user_db_mapping.to_sql('user_to_database', con = connection, if_exists='append')
return True
@app.route('/')
def hello_world():
return render_template("index.html")
@app.route('/users/login', methods=['GET'])
@auth.login_required
def login_user():
today = pd.Timestamp.today()
current_tokens = list(tokens.keys())
for token in current_tokens:
if tokens[token]['expiry'] < today:
tokens.pop(token, None)
expiry = today + pd.Timedelta(11, 'H')
frontend_expiry = int((time.time() + (60*60*11)) * 1000)
token_string = hashlib.sha256((g.user+str(today)).encode()).hexdigest()
token = {'username': g.user, 'expiry': expiry}
print(token)
tokens[token_string] = token
return json.jsonify({'token_created': token_string, 'username': g.user, 'token_expiry': frontend_expiry})
@app.route('/users/register', methods=['POST'])
def register_user():
print(request)
username = request.form.get('username')
password = request.form.get('password')
if username is None or password is None:
abort(400)
created = create_user(username, password)
return json.jsonify({ 'username_created': created })
@app.route('/suggestion')
@tokenauth.login_required
def rx_suggestion():
"""Queries the DB for all rx names and return them to be used as suggestions"""
user_database = get_user_database_name(g.user)
try:
connection = sqlite3.connect(os.path.join(working_directory, user_database))
inventory = pd.read_sql('SELECT DISTINCT item from inventory_log', con = connection)
suggestions_dict = inventory.to_dict(orient='list')
print(suggestions_dict)
except:
suggestions_dict = {'item': []}
return json.jsonify(suggestions_dict)
@app.route('/search/<name>')
@tokenauth.login_required
def search_rx(name):
"""Queries the DB for the relevant rows, based on search bar"""
user_database = get_user_database_name(g.user)
try:
connection = sqlite3.connect(os.path.join(working_directory, user_database))
inventory = pd.read_sql('SELECT * from inventory_log', con = connection)
low_name = name.lower()
sub_inventory = inventory[inventory['item'].str.lower() == low_name]
actual_name = sub_inventory['item'].iloc[0]
checks_count = len(sub_inventory.index)
print(checks_count)
search_return_dict = {"checks_count": checks_count}
# What else should we return when someone asks for information about an item?
# TODO: next_check
search_return_dict["item"] = [actual_name]
search_return_dict["last_checked"] = sub_inventory["date"].max()
merged = inspect_inventory_log(username = g.user)
need_to_check = merged[merged['item'] == actual_name].iloc[0]['need_to_check'].astype(str)
search_return_dict["need_to_check"] = need_to_check
# Maybe also add the median time between checks
except:
search_return_dict = {'item': []}
return json.jsonify(search_return_dict)
@app.route('/add_item', methods=['POST'])
@tokenauth.login_required
def add_item(inventory_checked=True):
today = pd.Timestamp.today()
username = g.user
df = pd.DataFrame()
df['item'] = [request.form.get('name')]
df['date'] = pd.to_datetime([request.form.get('date')])
df['frequency'] = [int(request.form.get('frequency'))]
df['date_added'] = [today]
print(df)
user_database = get_user_database_name(username)
connection = sqlite3.connect(os.path.join(working_directory, user_database))
df.to_sql('master_data', con=connection, if_exists='append', index=False)
if inventory_checked:
df[['date', 'item']].to_sql('inventory_log', con=connection, if_exists='append', index=False)
return df.to_json(orient='split', index=False)
@app.route('/upload_master_data', methods=['POST'])
@tokenauth.login_required
def upload_master_data(inventory_checked=True):
"""Updates a master table from an input file."""
today = | pd.Timestamp.today() | pandas.Timestamp.today |
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual(Timedelta(np.nan).value, iNaT)
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
self.assertRaises(OverflowError, pd.Timedelta, value)
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days, -2)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
def test_nat_converters(self):
self.assertEqual(to_timedelta(
'nat', box=False).astype('int64'), iNaT)
self.assertEqual(to_timedelta(
'nan', box=False).astype('int64'), iNaT)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0, 'ns'))
self.assertEqual(ct(10), np.timedelta64(10, 'ns'))
self.assertEqual(ct(10, unit='ns'), np.timedelta64(
10, 'ns').astype('m8[ns]'))
self.assertEqual(ct(10, unit='us'), np.timedelta64(
10, 'us').astype('m8[ns]'))
self.assertEqual(ct(10, unit='ms'), np.timedelta64(
10, 'ms').astype('m8[ns]'))
self.assertEqual(ct(10, unit='s'), np.timedelta64(
10, 's').astype('m8[ns]'))
self.assertEqual(ct(10, unit='d'), np.timedelta64(
10, 'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)),
np.timedelta64(1, 's').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)),
np.timedelta64(1, 'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)),
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', | Timedelta('1 days 02:36:00') | pandas.Timedelta |
# -*- coding: utf-8 -*-
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import pandas as pd
from keras import utils, callbacks
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Flatten, Embedding, Dropout, Concatenate, Dot,Reshape,Merge
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam, SGD, RMSprop,Adamax
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import matplotlib.pyplot as plt
import math
from sklearn import preprocessing
from sklearn.ensemble import ExtraTreesClassifier,ExtraTreesRegressor,RandomForestClassifier,RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import Lasso
nbatch_size = 512
def rebuild_data():
"""
清洗选择特征数据
"""
user_header = ['user_id','gender', 'age', 'job']
user_df = pd.read_csv('./data/ml-1m/users.dat', sep='::', names=user_header, usecols=[0, 1, 2, 3], engine = 'python')
user_df.set_index(['user_id'], inplace = False)
movie_header = ['movie_id', 'title','category']
movie_df = pd.read_csv('./data/ml-1m/movies.dat', sep='::', names=movie_header, usecols=[0, 1, 2], engine = 'python')
movie_df.set_index(['movie_id'], inplace = False)
rating_header = ['user_id', 'movie_id', 'rating', 'timestamp']
rating_df = pd.read_csv('./data/ml-1m/ratings.dat',sep='::', names=rating_header, engine = 'python')[:100000]
rating_user = [user_df[user_df['user_id'] == mid].values[0] for uid, mid, r, _ in rating_df.values]
rating_movie = [movie_df[movie_df['movie_id'] == mid].values[0] for uid, mid, r, _ in rating_df.values]
user_df = pd.DataFrame(rating_user, index=None, columns=['user_id', 'gender', 'age', 'job'])
movie_df = pd.DataFrame(rating_movie, index=None, columns=['movie_id', 'title', 'category'])
rating_df = rating_df.rating
pd.to_pickle(user_df, './data/ml-1m/user_pick')
pd.to_pickle(movie_df, './data/ml-1m/movie_pick')
| pd.to_pickle(rating_df, './data/ml-1m/rating_pick') | pandas.to_pickle |
import pandas as pd
import numpy as np
import snscrape.modules.twitter as sntwitter # Ensure snscrape (dev. build) is installed
import os
import itertools
import time
from src.config import *
def crawler(keywords=KEYWORDS, countriesDict=COUNTRIES_DICT, num_tweets_per_tag=NUM_TWEETS_PER_TAG,
start_date=START_DATE, end_date=END_DATE):
'''
Arguments:
keywords - keywords to search for. If None, then default list of
keywords is used (Biden vs. Trump - US Presidential Elections 2020).
countriesDict - Python Dictionary of countries to scrape tweets from. Keys of
dictionary must be the country names, and values must be the
major cities in the respective countries. Defaults to a pre-defined
list of countries around the world.
num_tweets_per_tag - maximum number of tweets that can be scraped per tag (keyword).
Default - 5000.
start_date - beginning of date range for tweets to be scraped. Default - 2020-09-01.
end_date - end of date range for tweets to be scraped. Default - 2020-12-31.
Returns:
df_v2 - the Pandas DataFrame of tweets scraped using snscrape, which have been cleaned
to remove duplicates and generally improve quality of scraped tweets.
'''
if len(keywords) < 1:
raise RuntimeError("Keywords list is empty. Please enter keywords to scrape tweets in config.py.")
if len(countriesDict.keys()) < 1:
raise RuntimeError("Countries dictionary is empty. Please fill the dictionary in config.py.")
# Initializing Dictionary of DataFrames for Each of the 23 Countries
countriesDf = {}
# This code block scrapes data for each country in the countriesDict dictionary.
# For some countries, the range parameter for SNScrape has been specified.
for country in countriesDict.keys():
if country in countriesDf.keys():
continue
if country in ['Russia']:
withinRange=1000
elif country in ['Mexico']:
withinRange=500
elif country in ['Canada']:
withinRange=100
elif country in ['Singapore']:
withinRange=50
else:
withinRange=800
countriesDf[country] = scrape_data(keywords, country, start_date, end_date,
countriesDict, num_tweets_per_tag, withinRange)
# To check the Number of Tweets found for each Country
for country, countryDf in countriesDf.items():
print(f"{country}: {len(countryDf)}")
# To create the main DataFrame of tweets
df = pd.DataFrame()
for countryDf in countriesDf.values():
df = df.append(countryDf)
print("Shape of DataFrame before Cleaning:", df.shape)
# Cleaning Data
df_indexes_v2 = []
user_dict = {}
for i in range(len(df)):
tweet = df["content"].iloc[i]
# To remove tweets that have more hashtags than normal text
word_list = tweet.lower().split()
num_normal = 0
num_tags = 0
for j in range(len(word_list)):
temp = word_list[j]
if temp[0] == '#':
num_tags += 1
else:
num_normal += 1
if num_tags > num_normal:
continue
# To choose only the latest tweet from a user to prevent multiple tweets from same user
user = df["username"].iloc[i]
user_dict[user] = i
for value in user_dict.values():
df_indexes_v2.append(value)
df_v2 = df.iloc[df_indexes_v2]
print(f'Shape of DataFrame after cleaning: {df_v2.shape}')
# Shuffling tweets in version 2 of the dataframe, and saving to a CSV file
df_v2 = df_v2.drop_duplicates(subset='content')
df_v2 = df_v2.sample(frac=1).reset_index(drop=True)
print(df_v2.shape)
# To print the number of tweets for each country
print(f"Number of tweets per country:\n{df_v2.groupby('country')['content'].nunique()}")
# Save Scraped Data to Current Working Directory
cwd = os.getcwd()
df_v2.to_csv(f"{cwd}/scraped_data.csv", encoding = "utf-8-sig", index=False)
return df_v2
# Data Scraping (Crawling) Method
def scrape_data(keywords, countryName, start_date, end_date, countriesDict, num_tweets_per_tag, withinRange=1000):
start = time.time()
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
same as the 2x2 but now we use interval.
shows a multi output callback decorator
uses weather api
"""
# =============================================================================
# IMPORTS
# =============================================================================
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Output, Input
import pandas as pd
import numpy as np
import datetime as dt
import requests
import os
import plotly.offline as pyo
import plotly.graph_objs as go
from model_build_smooth import BuildModel
# =============================================================================
# FUNCTIONS
# =============================================================================
def getData():
"""API import. Return last 30 days of weather in a DataFrame"""
#load entrant (for Heroku)
entrant = 'VFJ5W4L3FNJLNDEMWN6JZSEWB'
#form url
url = ''.join(['https://weather.visualcrossing.com/',
'VisualCrossingWebServices/rest/services/timeline/',
'London%2C%20ENG%2C%20GB/last30days?unitGroup=uk&key={}',
'&include=obs'])\
.format(entrant)
#make request
req = requests.get(url)
req_json = req.json()
#convert to pandas dataframe
wdays = pd.DataFrame(req_json['days'])
#ensure its 30 days long
df = wdays.iloc[(len(wdays)-30):]
#get temp and time
df['datetime'] = pd.to_datetime(df['datetime'], format='%Y-%m-%d')
df = df.set_index('datetime')
return df
def loadModels():
"""Instantiates model class and then loads h5 model"""
#file names
files = os.listdir('./Colab_Models')
files = ['./Colab_Models/' + file for file in files]
#put temperature first
files[0] , files[2] = files[2] , files[0]
#assert we have 4 h5 models
assert len(files) == 4
assert files[0][-3:] == '.h5'
#instantiate model classes
model_dict = \
{key : BuildModel(model_name=key, length=30) for key in files}
#load h5 models
for key in model_dict.keys():
model_dict[key].loadModel()
return model_dict
def plotlyData(name: str, hist, fc):
"""plots history and forecast"""
trace1 = go.Scatter(x=hist.index,
y=hist.values,
name='History',
mode='lines+markers+text',
marker=dict(color='rgba(0,128,0, 1)', size=10,\
symbol=1, line={'width':1}),
line=dict(width=3),
text=hist.values,
textposition="top center",
texttemplate='%{text:.0f}',
textfont_size=12)
trace2 = go.Scatter(x=fc.index,
y=fc.values,
name='Forecast',
mode='lines+markers+text',
marker=dict(color = 'rgba(0,0,255, 0.8)', size=15,\
symbol=5, line = {'width':1}),
line=dict(width=2, dash='longdash'),
text=fc.values,
textposition="top center",
texttemplate='%{text:.0f}',
textfont_size=12)
return [trace1, trace2]
def plotlyLayout(title, y_label):
layout = go.Layout(
title={'text':title,
'x':0.5},
xaxis={'title':'Date',
'showgrid':True,
'gridwidth':1,
'gridcolor':'rgba(0,0,0,0.05)'},
yaxis={'title':y_label,
'showgrid':True,
'gridwidth':1.5,
'gridcolor':'rgba(0,0,0,0.15)'},
legend={'x':0.025, 'y':0.95,
'bgcolor':'rgba(255,255,255,1)',
'borderwidth':0.5},
plot_bgcolor='rgba(227,248,251,0)'
)
return layout
# =============================================================================
# DASH
# =============================================================================
app = dash.Dash()
server = app.server
colors = {'background': '#fff', 'text': '#1E1E1E'}
window_style = {'backgroundColor': colors['background'],
'width':'49%',
'display':'inline-block',
'border-color':'#1e1e1e',
'border-width':'1px',
'border-style':'solid'}
flex_grid_col = {'display':'flex',
'justify-content':'space-evenly',
'margin':'15px 26px'}
p_style = {'textAlign': 'center','color': colors['text'],
'font-family':'sans-serif',
'padding-bottom':'0px'}
model_dict = loadModels()
app.layout =\
html.Div(children=[
html.Div(children=[
html.H1(children='Neural Network Weather Forecasts for London (UK)',
style={'textAlign': 'center','color': colors['text'],
'font-family':'sans-serif'}),
html.P(children="All forecasts were generated by LSTM networks that were built using Python's Tensorflow 2.0",
style= p_style),
html.P(children="Models are fed by the visualcrossing.com weather API",
style= p_style),
html.P(children=[html.A(href='https://github.com/Joseph-Foley/neural_net_weather_forecasts_on_cloud/', children='Github for this dashboard and tensorflow models')],
style=p_style),
html.P(children=[html.A(href='https://www.linkedin.com/in/joseph-foley-b9a39058/', children='My LinkedIn Profile')],
style=p_style)],
style= {'margin':'15px 40px',
'padding':'20px 60px'}),
html.Div(children=[
html.Div(dcc.Graph(id='graph1'),
style=window_style),
html.Div(dcc.Graph(id='graph2'),
style=window_style)],
style=flex_grid_col),
html.Div(children=[
html.Div(dcc.Graph(id='graph3'),
style=window_style),
html.Div(dcc.Graph(id='graph4'),
style=window_style)],
style=flex_grid_col),
dcc.Interval(
id='interval-component',
interval=1*60*60*1000, # 1hr * 60mins * 60secs * 1000milisecs
n_intervals=0)
],
style={'backgroundColor': colors['background']})
@app.callback([Output('graph1','figure'),
Output('graph2','figure'),
Output('graph3','figure'),
Output('graph4','figure')],
[Input('interval-component', 'n_intervals')])
def updateGraphs(n):
#refresh data from the api
df = getData()
#make predictions
model_dict = loadModels()
dict_keys = list(model_dict.keys())
metrics = ['temp', 'precip', 'humidity', 'windspeed']
preds = \
[model_dict[dict_keys[i]].predAhead(7, df[metrics[i]]) for i in range(4)]
#format predictions
preds = | pd.DataFrame(preds, index=metrics) | pandas.DataFrame |
from IPython.core.display import display, HTML
import pandas as pd
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import glob
import os
import gc
from joblib import Parallel, delayed
from sklearn import preprocessing, model_selection
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import QuantileTransformer
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import numpy.matlib
path_submissions = '/'
target_name = 'target'
scores_folds = {}
import pandas as pd
print(pd.__version__)
# data directory
# data_dir = '../input/optiver-realized-volatility-prediction/'
data_dir = '/home/data/optiver-realized-volatility-prediction/'
# Function to calculate first WAP
def calc_wap1(df):
wap = (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (
df['bid_size1'] + df['ask_size1'])
return wap
# Function to calculate second WAP
def calc_wap2(df):
wap = (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (
df['bid_size2'] + df['ask_size2'])
return wap
def calc_wap3(df):
wap = (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (
df['bid_size1'] + df['ask_size1'])
return wap
def calc_wap4(df):
wap = (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (
df['bid_size2'] + df['ask_size2'])
return wap
# Function to calculate the log of the return
# Remember that logb(x / y) = logb(x) - logb(y)
def log_return(series):
return np.log(series).diff()
# Calculate the realized volatility
def realized_volatility(series):
return np.sqrt(np.sum(series ** 2))
# Function to count unique elements of a series
def count_unique(series):
return len(np.unique(series))
# Function to read our base train and test set
def read_train_test():
train = pd.read_csv(data_dir + 'train.csv')
test = pd.read_csv(data_dir + '/test.csv')
# Create a key to merge with book and trade data
train['row_id'] = train['stock_id'].astype(str) + '-' + train['time_id'].astype(str)
test['row_id'] = test['stock_id'].astype(str) + '-' + test['time_id'].astype(str)
print(f'Our training set has {train.shape[0]} rows')
train.head()
return train, test
# Function to preprocess book data (for each stock id)
def book_preprocessor(file_path):
df = | pd.read_parquet(file_path) | pandas.read_parquet |
# Define a custom model which takes 34 data points and output 8 classes
import numpy as np
import pandas as pd
from natsort import natsorted
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report, plot_confusion_matrix
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# set seed
np.random.seed(42)
torch.manual_seed(42)
EPOCHS = 250
LEARNING_RATE = 0.001
DROPOUT = 0.5
# Load data
test = pd.read_csv("./data/test_data.csv")
data = pd.read_csv("./data/train_data.csv")
labels = pd.read_csv("./data/train_labels.csv")
train_data_df = data.drop(['id'], axis=1)
train_labels = labels.drop(['id'], axis=1)
test_data = test.drop(['id'], axis=1)
# Split the data into training and validation data (80%-20%)
train_data, val_data, train_labels, val_labels = train_test_split(train_data_df, train_labels, test_size=0.2)
# Convert the data into tensors
train_data = torch.from_numpy(train_data.values).float()
val_data = torch.from_numpy(val_data.values).float()
train_labels = torch.from_numpy(train_labels.values).long()
val_labels = torch.from_numpy(val_labels.values).long()
test_data = torch.from_numpy(test_data.values).float()
# model with dropout
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(34, 128)
self.fc2 = nn.Linear(128, 256)
self.fc3 = nn.Linear(256, 512)
self.fc4 = nn.Linear(512, 1024)
self.fc5 = nn.Linear(1024, 512)
self.fc6 = nn.Linear(512, 256)
self.fc7 = nn.Linear(256, 128)
self.fc8 = nn.Linear(128, 8)
self.dropout = nn.Dropout(p=DROPOUT)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = F.relu(self.fc3(x))
x = self.dropout(x)
x = F.relu(self.fc4(x))
x = self.dropout(x)
x = F.relu(self.fc5(x))
x = self.dropout(x)
x = F.relu(self.fc6(x))
x = self.dropout(x)
x = F.relu(self.fc7(x))
x = self.dropout(x)
x = F.softmax(self.fc8(x), dim=1)
return x
# Create model
# model = Net()
model = Net()
# Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
for epoch in range(EPOCHS):
optimizer.zero_grad()
output = model(train_data)
loss = criterion(output, train_labels.squeeze())
loss.backward()
optimizer.step()
print("Epoch: {}, Training Loss: {}".format(epoch, loss.item()))
# Test the model on validation set and plot the prediction
with torch.no_grad():
test_output = model(val_data)
_, predicted = torch.max(test_output.data, 1)
# print(predicted)
# print(val_labels)
# print(test_output)
correct = 0
total = 0
for i in range(0, val_labels.size()[0]):
total += 1
if predicted[i] == val_labels.data[i]:
correct += 1
print("Accuracy: {} %".format(100 * correct / total))
# Plot the confusion matrix
class_names = ['Class 0', 'Class 1', 'Class 2', 'Class 3', 'Class 4', 'Class 5', 'Class 6', 'Class 7']
# Compute and plot the confusion matrix
cm = confusion_matrix(val_labels.numpy(), predicted.numpy())
np.set_printoptions(precision=2)
print(cm)
# Save the model
torch.save(model.state_dict(), "./weights.pth")
# Load the model
model.load_state_dict(torch.load("./weights.pth"))
# Test the model and save the predcition with ids
with torch.no_grad():
test_output = model(test_data)
_, predicted = torch.max(test_output.data, 1)
# Create a dataframe with two columns: `id` and `label`
submission = | pd.DataFrame({'id': test['id'], 'label': predicted}) | pandas.DataFrame |
import numpy as np
import pandas as pd
# Set global constants
KT = 1.38e-2 * 298
ETA = 0.89e-9
RHO = 0.99823e-21 # Water
NU = ETA / RHO # kinematic viscosity
def load_db477x() -> pd.core.frame.DataFrame: # Load filter data for NI447x filter
return pd.read_csv("dB447x.txt", sep="\t", index_col=0, header=None, names=None) # ni447x filter
def make_filter_params(db447x, n_downsample: int = 1, n_0: int = 4096, n_poles: int = 1, f_cutoff: int = 10000,
n_avg: int = 1, factor: int = 1):
"""
Function to turn filter parameters into a dictionary that is later read by the parse_filter function
:param db447x: dataframe of ni447x filter data
:param n_downsample: downsampling factor
:param n_0:
:param n_poles:
:param f_cutoff:
:param n_avg:
:param factor:
:return:
"""
return {
"n_downsample": n_downsample,
"n_0": n_0,
"n_poles": n_poles,
"f_cutoff": f_cutoff,
"n_avg": n_avg,
"factor": factor,
"db447x": db447x
}
def parse_filter(psd, parameters: dict, filter_dict: dict, filter_string: str = "", sep=";"):
filters = filter_string.split(sep=sep)
if len(filters) != 1 and filters[0] != '':
for filter_name in filters:
psd = filter_dict[filter_name](psd, parameters)
return np.sqrt(np.abs(np.trapz(psd, axis=0, x=psd.index)))
def apply_ni447x(row, db447x):
if row.name < db447x.index[0]:
row[0] = 1
elif row.name > db447x.index[-1]:
row[0] = 0
else:
res = 10 ** (db447x.iloc[db447x.index.get_loc(row.name, method='nearest')] / 20)
row[0] = res.values[0]
def ni447x(psd, parameters):
db447x = parameters['db447x']
coefs_mag = psd.copy()
coefs_mag.apply(apply_ni447x, axis=1, args=[db447x])
psd_filtered = psd * coefs_mag ** 2
return psd_filtered
def bessel8(psd, parameters):
f_cutoff = parameters['f_cutoff']
f_mod = f_cutoff / 3.17962
coefs_mag = psd.copy()
div = [coef / f_mod for coef in coefs_mag.index]
coefs = []
for i in range(len(div)):
coefs.append(2027025 / np.sqrt(81 * (-225225 * div[i] + 30030 * div[i] ** 3 - 770 *
div[i] ** 5 + 4 * div[i] ** 7) ** 2 + (
2027025 - 945945 * div[i] ** 2 + 51975 * div[i] ** 4 - 630 * div[
i] ** 6 +
div[i] ** 8) ** 2))
coefs_mag = [coef ** 2 for coef in coefs]
psd_filtered = psd.copy(deep=True)
psd_filtered.iloc[:, 0] *= coefs_mag
return psd_filtered
def boxcar(psd, parameters):
n_avg = parameters['n_avg']
psd_filtered = psd.copy(deep=True)
coefs_mag = psd.copy(deep=True)
max_freq = psd.index[-1]
for x, val in coefs_mag.iterrows():
try:
coefs_mag.iloc[coefs_mag.index.get_loc(x, method='nearest')] = 1 / n_avg * np.abs(
np.sin(x / max_freq * np.pi * n_avg / 2) / np.sin(x / max_freq * np.pi / 2))
except FloatingPointError: # To deal with case x = 0
pass
coefs_mag.iloc[coefs_mag.index.get_loc(coefs_mag.index[0], method='nearest')] = 1
psd_filtered = psd * coefs_mag ** 2
return psd_filtered
def butterworth(psd, parameters):
f_cutoff = parameters['f_cutoff']
coefs_mag = [1 / np.sqrt(1 + (coef / f_cutoff) ** 2) for coef in psd.index]
psd_filtered = psd.copy(deep=True)
psd_filtered.iloc[:, 0] *= coefs_mag
return psd_filtered
def qpd(psd, parameters):
gam = 0.44
f_0 = 11.1e3
coefs_mag = [gam ** 2 + ((1 - gam ** 2) / (1 + (coef / f_0) ** 2)) for coef in psd.index]
psd_filtered = psd.copy(deep=True)
psd_filtered.iloc[:, 0] *= coefs_mag
return psd_filtered
def interpolate_psd(psd, n_downsample: int, n_0: int):
"""
Helper function for psd_subsample()
"""
n_downsample = int(n_downsample)
# Make placeholders
indices = [np.NaN] * ((n_0 * n_downsample) - n_downsample + 1)
values = [np.NaN] * ((n_0 * n_downsample) - n_downsample + 1)
# Fill data
i = 0
for x, val in psd.iterrows():
indices[i * n_downsample] = x
values[i * n_downsample] = val[0]
i += 1
# Make temp dataframe to interpolate indices
temp_indices = pd.DataFrame(data=indices)
# Interpolate indices
temp_indices = temp_indices.interpolate()
# Make dataframe for interpolating values
temp_data = pd.DataFrame(data=values, index=temp_indices[0].to_list())
temp_data = temp_data.interpolate('index')
temp_data['f'] = temp_data.index
temp_data.index = range(len(temp_data))
return temp_data
def psd_resample_down(psd, parameters):
factor = parameters['factor']
coefs_mag = psd.copy(deep=True)
max_freq = psd.index[-1]
freq_sample = max_freq * 2
rs_coefs = load_resample_coefs(factor)
midpoint_num = int((len(rs_coefs) - 1) / 2)
x = [(-midpoint_num / freq_sample) + i * (1 / freq_sample) for i in range(len(rs_coefs))]
rs_coefs.index = x
length = int(len(psd) / 2) * 2
rs_coefs_ser = rs_coefs.squeeze() # Turn into a series so numpy's rfft works
rs_coefs_fft = np.abs(np.fft.rfft(rs_coefs_ser, n=length)) # FFT + 0-padding
indices_new = np.linspace(psd.index[0], psd.index[-1], int(length / 2) + 1)
rs_coefs_fft = | pd.DataFrame(data=rs_coefs_fft, index=indices_new) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script structures the dashboard streamlit in three applications. Its main role is to display results of
evaluation which have already been performed. This module is independent from others.
"""
import streamlit as st
import glob
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from statistics import mean
import pandas as pd
from math import nan
import os
import json
from dataset_helper import records, sampling_frequency
from algo_helper import algorithms_list
'''
# Benchmark of QRS detectors
'''
def get_layout(title: str) -> go.Layout:
"""
use for displaying a graph. create a Layout object
:param title: title of the displayed graph
:type title: str
:return: model of layout for graphs
:rtype: Layout
"""
return go.Layout(title=title, margin=dict(l=20, r=20, t=30, b=20))
# choose application of interest
applications = ['Comparison of different algorithms', 'Evaluation of one algorithm', 'Noise robustness']
application = st.sidebar.selectbox('What would you like to study ?', applications)
def print_error_no_evaluation(ds: str = '"#check --help#"', alg: str = '"#check --help#"', t: str = '#int(ms)#') \
-> None:
"""
display an error message when a result of a specific evaluation is selected, whereas the latter was not been
performed. Display make command to run the evaluation of interest.
:param ds: name of the dataset of interest
:type ds: str
:param alg: name of the algorithm of interest
:type alg: str
:param t: tolerance's value of interest
:type t: str
"""
st.write('The evaluation of your interest has not already being performed. You probably did not execute the '
'evaluation. Please compute the following command :')
st.write(f'\t make evaluation --DATASET="{ds}" --ALGO="{alg}" --TOLERANCE={t}')
# list of datasets used in the two first applications (comparison of performances on the entire datasets or on each
# record)
datasets_list = ['mit-bih-arrhythmia', 'mit-bih-supraventricular-arrhythmia', 'mit-bih-long-term-ecg', 'european-stt']
# colors used for graphs for each algorithm
colormap = {
'Pan-Tompkins-ecg-detector': 'rgb(41,58,143)',
'Hamilton-ecg-detector': 'rgb(215,48,39)',
'Christov-ecg-detector': 'rgb(26,152,80)',
'Engelse-Zeelenberg-ecg-detector': '#440154',
'SWT-ecg-detector': 'rgb(255,111,0)',
'Matched-filter-ecg-detector': 'rgb(179,88,6)',
'Two-average-ecg-detector': 'rgb(212,103,128)',
'Hamilton-biosppy': 'rgb(184,225,134)',
'Christov-biosppy': 'rgb(255,234,0)',
'Engelse-Zeelenberg-biosppy': 'rgb(197,27,125)',
'Gamboa-biosppy': 'rgb(153,204,255)',
'mne-ecg': 'rgb(61,89,65)',
'heartpy': 'rgb(44,255,150)',
'gqrs-wfdb': 'rgb(254,224,139)',
'xqrs-wfdb': 'rgb(10,136,186)'
}
# first application
if application == 'Comparison of different algorithms':
st.write('\n\n')
'''
## Comparison of performances of some algorithms on a dataset
'''
st.write('\n\n')
dataset = st.selectbox('Please choose a dataset:', datasets_list)
csv_files_dataset = glob.glob(f'output/perf/*_{dataset}_*.csv')
tolerance_list = []
for file in csv_files_dataset:
eval_tolerance = file[:-4].split('_')[-1]
tolerance_list.append(eval_tolerance)
tolerance = st.selectbox('Please choose tolerance of the evaluation (in ms):', list(set(tolerance_list)))
csv_files = [csv_file for csv_file in csv_files_dataset if csv_file[:-4].split('_')[-1] == tolerance]
# table of comparison
if len(csv_files) == 0:
print_error_no_evaluation(ds=dataset)
else:
comparison_df = pd.DataFrame(columns=['FP', 'FN', 'F', 'F(%)', 'P+(%)', 'Se(%)', 'F1(%)'])
number_of_beats = ''
st.write('Please select algorithms you would like to compare:')
if st.checkbox('Pan-Tompkins-ecg-detector'):
if not os.path.exists(f'output/perf/Pan-Tompkins-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Pan-Tompkins-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Pan-Tompkins-ecg-detector_{dataset}_{tolerance}.csv',
delimiter=',', index_col=0)
results_df.iloc[-1, :].name = 'Pan-Tompkins-ecg-detector'
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Pan-Tompkins-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Hamilton-ecg-detector'):
if not os.path.exists(f'output/perf/Hamilton-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Hamilton-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Hamilton-ecg-detector_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Hamilton-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Christov-ecg-detector'):
if not os.path.exists(f'output/perf/Christov-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Christov-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Christov-ecg-detector_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Christov-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Engelse-Zeelenberg-ecg-detector'):
if not os.path.exists(f'output/perf/Engelse-Zeelenberg-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Engelse-Zeelenberg-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Engelse-Zeelenberg-ecg-detector_{dataset}_{tolerance}.csv',
delimiter=',', index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Engelse-Zeelenberg-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('SWT-ecg-detector'):
if not os.path.exists(f'output/perf/SWT-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='SWT-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/SWT-ecg-detector_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'SWT-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Matched-filter-ecg-detector'):
if not os.path.exists(f'output/perf/Matched-filter-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Matched-filter-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Matched-filter-ecg-detector_{dataset}_{tolerance}.csv',
delimiter=',', index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Matched-filter-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Two-average-ecg-detector'):
if not os.path.exists(f'output/perf/Two-average-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Two-average-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Two-average-ecg-detector_{dataset}_{tolerance}.csv',
delimiter=',', index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Two-average-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Hamilton-biosppy'):
if not os.path.exists(f'output/perf/Hamilton-biosppy_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Hamilton-biosppy', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Hamilton-biosppy_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Hamilton-biosppy'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Christov-biosppy'):
if not os.path.exists(f'output/perf/Christov-biosppy_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Christov-biosppy', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Christov-biosppy_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Christov-biosppy'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Engelse-Zeelenberg-biosppy'):
if not os.path.exists(f'output/perf/Engelse-Zeelenberg-biosppy_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Engelse-Zeelenberg-biosppy', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Engelse-Zeelenberg-biosppy_{dataset}_{tolerance}.csv',
delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Engelse-Zeelenberg-biosppy'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Gamboa-biosppy'):
if not os.path.exists(f'output/perf/Gamboa-biosppy_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Gamboa-biosppy', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Gamboa-biosppy_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Gamboa-biosppy'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('mne-ecg'):
if not os.path.exists(f'output/perf/mne-ecg_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='mne-ecg', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/mne-ecg_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'mne-ecg'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('heartpy'):
if not os.path.exists(f'output/perf/heartpy_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='heartpy', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/heartpy_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'heartpy'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('gqrs-wfdb'):
if not os.path.exists(f'output/perf/gqrs-wfdb_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='gqrs-wfdb', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/gqrs-wfdb_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'gqrs-wfdb'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('xqrs-wfdb'):
if not os.path.exists(f'output/perf/xqrs-wfdb_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='xqrs-wfdb', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/xqrs-wfdb_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'xqrs-wfdb'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
st.write(f"Comparative table of global performances: ")
st.write(comparison_df)
st.write(f'Total number of beats for this dataset is : {number_of_beats}')
# graphs of comparison
'''
## Comparison of performances of algorithms on different datasets
'''
results_F1 = pd.DataFrame(columns=datasets_list, index=algorithms_list)
results_Fp = pd.DataFrame(columns=datasets_list, index=algorithms_list)
for algo in algorithms_list:
for dataset in datasets_list:
if os.path.exists(f'output/perf/{algo}_{dataset}_{tolerance}.csv'):
results_df = | pd.read_csv(f'output/perf/{algo}_{dataset}_{tolerance}.csv', delimiter=',') | pandas.read_csv |
from datetime import timedelta
import pandas as pd
def twdb_dot(df_row, drop_dcp_metadata=True):
"""Parser for twdb DOT dataloggers."""
return _twdb_stevens_or_dot(df_row, reverse=False, drop_dcp_metadata=drop_dcp_metadata)
def twdb_fts(df_row, drop_dcp_metadata=True):
"""Parser for twdb fts dataloggers
format examples:
C510D20018036133614G39-0NN170WXW00097 :WL 31 #60 -72.91 -72.89 -72.89 -72.89 -72.91 -72.92 -72.93 -72.96 -72.99 -72.97 -72.95 -72.95
"""
message = df_row['dcp_message'].lower()
message_timestamp = df_row['message_timestamp_utc']
battery_voltage = pd.np.nan
for line in message.split(':'):
if line.split() != []:
line = line.split()
# grab water level data
if line[0] == 'wl':
water_levels = [
field.strip('+- ') for field in line[3:]
]
# grab battery voltage
if line[0] == 'vb':
battery_voltage = line[3].strip('+- ')
df = _twdb_assemble_dataframe(
message_timestamp, battery_voltage, water_levels, reverse=False
)
return df
def twdb_stevens(df_row, drop_dcp_metadata=True):
"""Parser for twdb stevens dataloggers."""
return _twdb_stevens_or_dot(df_row, reverse=True, drop_dcp_metadata=drop_dcp_metadata)
def twdb_sutron(df_row, drop_dcp_metadata=True):
"""Parser for twdb sutron dataloggers.
Data is transmitted every 12 hours and each message contains 12 water level measurements on the hour
for the previous 12 hours and one battery voltage measurement for the current hour
format examples:
'":ott 60 #60 -190.56 -190.66 -190.69 -190.71 -190.74 -190.73 -190.71 -190.71 -190.71 -190.71 -190.72 -190.72 :BL 13.05 '
'":SENSE01 60 #60 -82.19 -82.19 -82.18 -82.19 -82.19 -82.22 -82.24 -82.26 -82.27 -82.28 -82.28 -82.26 :BL 12.41 '
'":OTT 703 60 #60 -231.47 -231.45 -231.44 -231.45 -231.47 -231.50 -231.51 -231.55 -231.56 -231.57 -231.55 -231.53 :6910704 60 #60 -261.85 -261.83 -261.81 -261.80 -261.81 -261.83 -261.85 -261.87 -261.89 -261.88 -261.86 -261.83 :BL 13.21'
'":Sense01 10 #10 -44.70 -44.68 -44.66 -44.65 -44.63 -44.61 -44.60 -44.57 -44.56 -44.54 -44.52 -44.50 :BL 13.29'
'"\r\n-101.11 \r\n-101.10 \r\n-101.09 \r\n-101.09 \r\n-101.08 \r\n-101.08 \r\n-101.08 \r\n-101.10 \r\n-101.11 \r\n-101.09 \r\n-101.09 \r\n-101.08'
'"\r\n// \r\n// \r\n// \r\n// \r\n// \r\n-199.88 \r\n-199.92 \r\n-199.96 \r\n-199.98 \r\n-200.05 \r\n-200.09 \r\n-200.15'
'":Sense01 60 #60 M M M M M M M M M M M M :BL 12.65'
"""
message = df_row['dcp_message'].lower()
message_timestamp = df_row['message_timestamp_utc']
lines = message.strip('":').split(':')
if len(lines) == 1:
water_levels = [field.strip('+- ') for field in lines[0].split()]
df = _twdb_assemble_dataframe(message_timestamp, None, water_levels, reverse=False)
else:
data = []
battery_voltage = lines[-1].split('bl')[-1].strip()
for line in lines[:-1]:
channel = line[:7]
split = line[7:].split()
water_levels = [field.strip('+-" ') for field in split[2:]]
df = _twdb_assemble_dataframe(message_timestamp, battery_voltage, water_levels, reverse=False)
df['channel'] = channel
data.append(df)
df = pd.concat(data)
if not drop_dcp_metadata:
for col in df_row.index:
df[col] = df_row[col]
return df
def twdb_texuni(dataframe, drop_dcp_metadata=True):
"""Parser for twdb texuni dataloggers.
Data is transmitted every 12 hours and each message contains 12 water level measurements on the hour
for the previous 12 hours
format examples:
'"\r\n+0.000,-245.3,\r\n+0.000,-245.3,\r\n+0.000,-245.3,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.5,\r\n+0.000,-245.5,\r\n+0.000,-245.6,\r\n+0.000,-245.6,\r\n+0.000,-245.6,\r\n+0.000,-245.6,\r\n+0.000,-245.6,\r\n+0.000,-245.6,\r\n+412.0,+2013.,+307.0,+1300.,+12.75,+0.000,-245.4,-245.3,-245.6,+29.55,'
' \r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-110.0,\r\n+0.000,-110.0,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-110.0,\r\n+0.000,-110.0,\r\n+0.000,-110.0,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+340.0,+2013.,+307.0,+1400.,+12.07,+0.000,-109.9,-109.8,-110.1,+30.57,'
"""
message = dataframe['dcp_message']
message_timestamp = dataframe['message_timestamp_utc']
water_levels = [row.split(',')[1].strip('+- ') for row in message.strip('" \r\n').splitlines()[:-1]]
df = _twdb_assemble_dataframe(message_timestamp, None, water_levels, reverse=True)
if not drop_dcp_metadata:
for col in dataframe.index:
df[col] = dataframe[col]
return df
def _twdb_assemble_dataframe(message_timestamp, battery_voltage, water_levels, reverse=False):
data = []
base_timestamp = message_timestamp.replace(minute=0, second=0, microsecond=0)
if reverse:
water_levels.reverse()
try:
battery_voltage = float(battery_voltage)
except:
battery_voltage = pd.np.nan
for hrs, water_level in enumerate(water_levels):
timestamp = base_timestamp - timedelta(hours=hrs)
try:
water_level = float(water_level)
except:
water_level = pd.np.nan
if hrs==0 and battery_voltage:
data.append([timestamp, battery_voltage, water_level])
else:
data.append([timestamp, pd.np.nan, water_level])
if len(data)>0:
df = pd.DataFrame(data, columns=['timestamp_utc', 'battery_voltage', 'water_level'])
df.index = pd.to_datetime(df['timestamp_utc'])
del df['timestamp_utc']
return df
else:
return | pd.DataFrame() | pandas.DataFrame |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
_testing as tm,
bdate_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
_default_compressor = "blosc"
pytestmark = pytest.mark.single
def test_conv_read_write(setup_path):
with tm.ensure_clean() as path:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame({"A": range(5), "B": range(5)})
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
def test_long_strings(setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
| _maybe_remove(store, "df") | pandas.tests.io.pytables.common._maybe_remove |
import numpy as np
import pandas as pd
data=pd.read_csv('iris.csv')
data=np.array(data)
data=np.mat(data[:,0:4])
#数据长度
length=len(data)
#通过核函数在输入空间计算核矩阵
k=np.mat(np.zeros((length,length)))
for i in range(0,length):
for j in range(i,length):
k[i,j]=(np.dot(data[i],data[j].T))**2
k[j,i]=k[i,j]
name=range(length)
test=pd.DataFrame(columns=name,data=k)
print('核矩阵\n',test)
len_k=len(k)
#中心化核矩阵
I=np.eye(len_k)
one=np.ones((len_k,len_k))
A=I-1.0/len_k*one
centered_k=np.dot(np.dot(A,k),A)
test=pd.DataFrame(columns=name,data=centered_k)
print('居中化核矩阵\n',test)
#标准化核矩阵
W_2=np.zeros((len_k,len_k))
for i in range(0,len_k):
W_2[i,i]=k[i,i]**(-0.5)
normalized_k=np.dot(np.dot(W_2,k),W_2)
test=pd.DataFrame(columns=name,data=normalized_k)
print('规范化核矩阵\n',test)
#标准化中心化核矩阵
W_3=np.zeros((len_k,len_k))
for i in range(0,len_k):
W_3[i,i]=centered_k[i,i]**(-0.5)
normalized_centered_k=np.dot(np.dot(W_3,centered_k),W_3)
test=pd.DataFrame(columns=name,data=normalized_centered_k)
print('居中规范化核矩阵\n',test)
#计算每个输入向量的特征函数φ
fai=np.mat(np.zeros((length,10)))
for i in range(0,length):
for j in range(0,4):
fai[i,j]=data[i,j]**2
for m in range(0,3):
for n in range(m+1,4):
j=j+1
fai[i,j]=2**0.5*data[i,m]*data[i,n]
name_f=range(10)
test=pd.DataFrame(columns=name_f,data=fai)
print('计算φ\n',test)
#通过φ计算核矩阵
k_f=np.mat(np.zeros((length,length)))
for i in range(0,length):
for j in range(i,length):
k_f[i,j]=(np.dot(fai[i],fai[j].T))
k_f[j,i]=k_f[i,j]
test=pd.DataFrame(columns=name,data=k_f)
print('通过φ计算核矩阵\n',test)
#中心化φ
rows=fai.shape[0]
cols=fai.shape[1]
centered_fai=np.mat(np.zeros((rows,cols)))
for i in range(0,cols):
centered_fai[:,i]=fai[:,i]-np.mean(fai[:,i])
test=pd.DataFrame(columns=name_f,data=centered_fai)
print('居中φ\n',test)
#通过中心化φ计算中心化的核函数
k_cf=np.mat(np.zeros((length,length)))
for i in range(0,length):
for j in range(i,length):
k_cf[i,j]=(np.dot(centered_fai[i],centered_fai[j].T))
k_cf[j,i]=k_cf[i,j]
test=pd.DataFrame(columns=name,data=k_cf)
print('通过居中φ计算居中核矩阵\n',test)
#规范化φ
normalized_fai=np.mat(np.zeros((rows,cols)))
for i in range(0,len(fai)):
temp=np.linalg.norm(fai[i])
normalized_fai[i]=fai[i]/np.linalg.norm(fai[i])
test=pd.DataFrame(columns=name_f,data=normalized_fai)
print('规范化φ\n',test)
#通过规范化φ计算规范化的核函数
k_nf=np.mat(np.zeros((length,length)))
for i in range(0,length):
for j in range(i,length):
k_nf[i,j]=(np.dot(normalized_fai[i],normalized_fai[j].T))
k_nf[j,i]=k_nf[i,j]
test=pd.DataFrame(columns=name,data=k_nf)
print('通过规范化φ计算规范化核矩阵\n',test)
#计算居中规范化φ
normalized_centered_fai=np.mat(np.zeros((rows,cols)))
for i in range(0,len(fai)):
temp=np.linalg.norm(fai[i])
normalized_centered_fai[i]=centered_fai[i]/np.linalg.norm(centered_fai[i])
test=pd.DataFrame(columns=name_f,data=normalized_centered_fai)
print('居中规范化φ\n',test)
#通过居中规范化φ计算剧中规范化核矩阵
kc_nf=np.mat(np.zeros((length,length)))
for i in range(0,length):
for j in range(i,length):
kc_nf[i,j]=(np.dot(normalized_centered_fai[i],normalized_centered_fai[j].T))
kc_nf[j,i]=kc_nf[i,j]
test= | pd.DataFrame(columns=name,data=kc_nf) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 12:36:48 2019
@author: adm
"""
# In[1]:
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
import cv2
import time
from mrcnn.config import Config
from datetime import datetime
# Root directory of the project
ROOT_DIR = r'C:\Users\adm\Mask_RCNN-master'
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# Import COCO config
sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version
#from samples.coco import coco
# In[2]:
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(MODEL_DIR ,"BT_full_two.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
print("cuiwei***********************")
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "Full_image_test")
class ShapesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "shapes"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + 3 shapes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 2048
IMAGE_MAX_DIM = 2048
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8 * 6, 16 * 6, 32 * 6, 64 * 6, 128 * 6) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE =100
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 50
#import train_tongue
#class InferenceConfig(coco.CocoConfig):
class InferenceConfig(ShapesConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# In[3]:
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
#model.load_weights(COCO_MODEL_PATH, by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc","mrcnn_bbox", "mrcnn_mask"])
model.load_weights(COCO_MODEL_PATH, by_name=True)
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['_background_', 'BT']
# Load a random image from the images folder
file_names = next(os.walk(IMAGE_DIR))[2]
image = skimage.io.imread(os.path.join(IMAGE_DIR, file_names[0]))
#image = img
a=datetime.now()
# Run detection
results = model.detect([image], verbose=1)
b=datetime.now()
# Visualize results
print("shijian",(b-a).seconds)
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
# In[3]:
mask = r['masks']
mask = mask.astype(int)
mask.shape
# In[]
AA = []
for i in range(mask.shape[2]):
temp = skimage.io.imread(os.path.join(IMAGE_DIR, file_names[16]))
for j in range(temp.shape[2]):
temp[:,:,j] = temp[:,:,j] * mask[:,:,i]
#temp1 =
#temp[i] = temp[i]
AA.append(temp)
plt.figure(figsize=(8,8))
plt.imshow(temp)
# In[3]:
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
#model.load_weights(COCO_MODEL_PATH, by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc","mrcnn_bbox", "mrcnn_mask"])
model.load_weights(COCO_MODEL_PATH, by_name=True)
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['_background_', 'BT']
# Load a random image from the images folder
file_names = next(os.walk(IMAGE_DIR))[2]
for i in range(10):
image = skimage.io.imread(os.path.join(IMAGE_DIR, file_names[i]))
#image = img
a=datetime.now()
# Run detection
results = model.detect([image], verbose=1)
b=datetime.now()
# Visualize results
print("shijian",(b-a).seconds)
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
# In[]
mrcnn = model.run_graph([image], [
("proposals", model.keras_model.get_layer("ROI").output),
("probs", model.keras_model.get_layer("mrcnn_class").output),
("deltas", model.keras_model.get_layer("mrcnn_bbox").output),
("masks", model.keras_model.get_layer("mrcnn_mask").output),
("detections", model.keras_model.get_layer("mrcnn_detection").output),
])
# In[]
det_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)
det_count = np.where(det_class_ids == 0)[0][0]
det_class_ids = det_class_ids[:det_count]
detections = mrcnn['detections'][0, :det_count]
#print("{} detections: {}".format(
#det_count, np.array(dataset.class_names)[det_class_ids]))
#captions = ["{} {:.3f}".format(dataset.class_names[int(c)], s) if c > 0 else ""
#for c, s in zip(detections[:, 4], detections[:, 5])]
visualize.draw_boxes(
image,
refined_boxes=utils.denorm_boxes(detections[:, :4], image.shape[:2]),
visibilities=[2] * len(detections),title="Detections",
ax=get_ax())
# In[]
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
mrcnn = model.run_graph([image], [
("proposals", model.keras_model.get_layer("ROI").output),
("probs", model.keras_model.get_layer("mrcnn_class").output),
("deltas", model.keras_model.get_layer("mrcnn_bbox").output),
("masks", model.keras_model.get_layer("mrcnn_mask").output),
("detections", model.keras_model.get_layer("mrcnn_detection").output),
])
# Get predictions of mask head
mrcnn = model.run_graph([image], [
("detections", model.keras_model.get_layer("mrcnn_detection").output),
("masks", model.keras_model.get_layer("mrcnn_mask").output),
])
# Get detection class IDs. Trim zero padding.
det_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)
det_count = np.where(det_class_ids == 0)[0][0]
det_class_ids = det_class_ids[:det_count]
# In[3]:
det_boxes = utils.denorm_boxes(mrcnn["detections"][0, :, :4], image.shape[:2])
det_mask_specific = np.array([mrcnn["masks"][0, i, :, :, c]
for i, c in enumerate(det_class_ids)])
det_masks = np.array([utils.unmold_mask(m, det_boxes[i], image.shape)
for i, m in enumerate(det_mask_specific)])
log("det_mask_specific", det_mask_specific)
log("det_masks", det_masks)
# In[]
det_masks = det_masks
# In[]
display_images(det_masks[:2] * 255, cmap="Blues", interpolation="none")
# In[]
hip2= det_masks[:1]*1
hip1 = det_masks[1:]*1
hip1 = hip1.reshape((860,1132,1))
hip2 = hip2.reshape((860,1132,1))
# In[]
GT1 = np.multiply(hip1,image)
# In[]
GT2 = np.multiply(hip2,image)
GT = GT1 + GT2
# In[]
plt.imshow(GT)
# In[]
out = GT[250:550,400:800]
# In[]
plt.imshow(out)
# In[]
import matplotlib.pyplot as plt
from scipy import misc
import scipy
#I = misc.imread('./cc_1.png')
scipy.misc.imsave('C:/Users/adm/Mask_RCNN-master/634判1/a605.jpg', out)
# In[]
0
# In[3]:
display_images(det_masks[:4] * 255, cmap="Blues", interpolation="none")
# In[]
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
display_images(det_mask_specific[:2] * 255, cmap="Blues", interpolation="none")
# In[3]:
#bb=det_mask_specific[:1]*255
#aa = det_mask_specific[1:] * 255
#a157 = np.concatenate((aa[0,:,2], bb[0,:,2]))
# In[3]:
aa=det_mask_specific[:1]*255
bb = det_mask_specific[1:] * 255
aa= aa.flatten()
bb = bb.flatten()
# In[3]:
a1049 = np.concatenate((aa, bb))
# In[3]:
dict = {"a1047":a1047,
"a1048":a1048,
"a1049":a1049}
# In[3]:
import pandas as pd
data = | pd.DataFrame(dict) | pandas.DataFrame |
import pandas as pd
import numpy as np
from typing import Tuple
class DACT(object):
def __init__(self, data: pd.DataFrame):
"""
Params:
data (DataFrame) - pandas DataFrame with columns 'object_id', 'time', 'cluster_id' containing objects,
timestamps, cluster belongings, features ..
Note: The first three columns can have custom names as long as they represent the object
identifier, the timestamp and the cluster identifier in the right order
"""
self._data = data.astype({data.columns.values[0]: str})
self._column_names = data.columns.values
self._object_column_name = self._column_names[0]
self._time_column_name = self._column_names[1]
self._cluster_column_name = self._column_names[2]
self._object_ids = self._data[self._object_column_name].unique()
self._num_objects = len(self._object_ids)
self._memberships = self.calc_membership_matrices()
self._outlier_rating = None
self._outlier_result = None
def get_outliers(self, tau: float) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Parameters:
tau (float) - threshold for outlier detection
Returns:
data (DataFrame) - pandas DataFrame with columns 'object_id', 'time', 'cluster_id', 'outlier'
outlier_result (DataFrame) - pandas DataFrame with columns 'object_id', 'start_time', 'end_time',
'cluster_end_time', 'rating', 'distance' and 'outlier'
"""
self.calc_outlier_degree()
return self.mark_outliers(tau)
def calc_membership_matrices(self) -> dict:
"""
Returns:
memberships (dict) - {<timestamp>: <membership_matrix>} containing the membership matrices for all timestamps
"""
memberships = {}
timestamps = self._data[self._time_column_name].unique()
for i in range(len(timestamps)):
relevant_data = self._data[self._data[self._time_column_name] == timestamps[i]]
memberships[timestamps[i]] = np.zeros((self._num_objects, self._num_objects))
cluster_ids = relevant_data[self._cluster_column_name].unique()
for cid in cluster_ids:
if cid >= 0:
members = relevant_data[relevant_data[self._cluster_column_name] == cid][self._object_column_name].unique()
for j in range(len(members)-1):
index_j = np.argwhere(self._object_ids == members[j])[0][0]
for k in range(j+1, len(members)):
index_k = np.argwhere(self._object_ids == members[k])[0][0]
memberships[timestamps[i]][index_j][index_k] = 1
memberships[timestamps[i]][index_k][index_j] = 1
return memberships
def calc_cohesion_matrix(self, start_time: int, end_time: int) -> np.ndarray:
"""
Params:
start_time (int) - time that should be considered as beginning
end_time (int) - int representing the timestamp which should be rated up to
Returns:
cohesion_matrix (array) - array of shape (num_objects, num_objects) containing the cohesion values of all time
series to each other for the considered time period
"""
timestamps = self._data[self._time_column_name].unique()
timestamps = timestamps[np.where(timestamps >= start_time)]
timestamps = timestamps[np.where(timestamps <= end_time)]
cohesion_matrix = np.zeros((self._num_objects, self._num_objects))
for time in timestamps:
cohesion_matrix = cohesion_matrix + self._memberships[time]
return cohesion_matrix
def calc_subsequence_ratings(self, start_time: int, end_time: int) -> pd.DataFrame:
"""
Params:
start_time (int) - time that should be considered as beginning
end_time (int) - int representing the timestamp which should be rated up to
Returns:
subsequence_ratings (pandas.DataFrame) - pandas DataFrame with columns 'object_id', 'start_time', 'end_time', 'rating'
"""
subsequence_ratings = | pd.DataFrame(columns=[self._object_column_name, 'start_time', 'end_time', 'rating']) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import unittest
import warnings
import pandas as pd
import numpy as np
from qiime2 import Artifact
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn)
from qiime2.core.testing.util import get_dummy_plugin, ReallyEqualMixin
class TestInvalidMetadataConstruction(unittest.TestCase):
def test_non_dataframe(self):
with self.assertRaisesRegex(
TypeError, 'Metadata constructor.*DataFrame.*not.*Series'):
Metadata(pd.Series([1, 2, 3], name='col',
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_ids(self):
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({}, index=pd.Index([], name='id')))
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({'column': []},
index=pd.Index([], name='id')))
def test_invalid_id_header(self):
# default index name
with self.assertRaisesRegex(ValueError, r'Index\.name.*None'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'])))
with self.assertRaisesRegex(ValueError, r'Index\.name.*my-id-header'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'c'], name='my-id-header')))
def test_non_str_id(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata ID.*type.*float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', np.nan, 'c'], name='id')))
def test_non_str_column_name(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata column name.*type.*'
'float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
np.nan: [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_empty_id(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata ID.*at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', '', 'c'], name='id')))
def test_empty_column_name(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata column name.*'
'at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_pound_sign_id(self):
with self.assertRaisesRegex(
ValueError, "metadata ID.*begins with a pound sign.*'#b'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', '#b', 'c'], name='id')))
def test_id_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata ID 'sample-id'.*conflicts.*reserved.*"
"ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'sample-id', 'c'], name='id')))
def test_column_name_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata column name 'featureid'.*conflicts.*"
"reserved.*ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'featureid': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_duplicate_ids(self):
with self.assertRaisesRegex(ValueError, "Metadata IDs.*unique.*'a'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'a'], name='id')))
def test_duplicate_column_names(self):
data = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
with self.assertRaisesRegex(ValueError,
"Metadata column names.*unique.*'col1'"):
Metadata(pd.DataFrame(data, columns=['col1', 'col2', 'col1'],
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_unsupported_column_dtype(self):
with self.assertRaisesRegex(
TypeError, "Metadata column 'col2'.*unsupported.*dtype.*bool"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': [True, False, True]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_unsupported_type(self):
with self.assertRaisesRegex(
TypeError, "CategoricalMetadataColumn.*strings or missing "
r"values.*42\.5.*float.*'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 42.5]},
index= | pd.Index(['a', 'b', 'c'], name='id') | pandas.Index |
import math
import datetime
from typing import Dict, Union
import pandas as pd
from enum import Enum
from decimal import Decimal
from pandas.core.frame import DataFrame
from pandas.core.indexes.datetimes import DatetimeIndex
class TimeseriesInterval(str, Enum):
HOURLY = ("H",)
DAILY = ("D",)
WEEKLY = ("W-SUN",)
MONTHLY = "MS"
class AggregateMethod(str, Enum):
MEAN = ("mean",)
SUM = ("sum",)
FIRST = ("first",)
LAST = ("last",)
MEDIAN = ("median",)
MIN = ("min",)
MAX = ("max",)
COUNT = "count"
class NaInterpolationMethod(str, Enum):
FORDWARD_FILL = ("ffill",)
BACKWARD_FILL = ("bfill",)
class ColumnType(str, Enum):
int = ("int32",)
str = ("str",)
float = ("float64",)
bigdecimal = ("float64",)
class ColumnConfig:
def __init__(
self,
name: str,
aggregate_method: AggregateMethod,
type:ColumnType=None,
na_fill_method: NaInterpolationMethod = None,
na_fill_value=None,
alias:str=None
) -> None:
self.name = name
self.aggregate_method = aggregate_method
self.na_fill_method = na_fill_method
self.na_fill_value = na_fill_value
self.type = type
self.alias = alias
pass
def _to_df(data: Union[Dict, "list[Dict]", DataFrame]):
return data if (isinstance(data, DataFrame)) else (pd.json_normalize(data))
def _last_day_of_month(any_day):
next_month = any_day.replace(day=28) + datetime.timedelta(days=4)
return next_month - datetime.timedelta(days=next_month.day)
def beta_aggregate_timeseries(
data: Union[Dict, "list[Dict]", DataFrame],
time_column: str,
interval: TimeseriesInterval,
columns: "list[ColumnConfig]",
start_timestamp: int = None,
end_timestamp: int = None,
):
df = _to_df(data)
if df.index.name == time_column:
df = df.reset_index()
if len(df) == 0:
return df
tmin = df[time_column].min()
tmax = df[time_column].max()
if hasattr(tmin, "timestamp"):
tmin = tmin.timestamp()
if hasattr(tmax, "timestamp"):
tmax = tmax.timestamp()
if start_timestamp != None:
tmin = start_timestamp
if end_timestamp != None:
tmax = end_timestamp
unit = 's'
if len(str(tmin)) == 13:
tmin /= 1000
tmax /= 1000
unit = 'ms'
if interval == TimeseriesInterval.HOURLY:
tmin = pd.to_datetime(math.floor(tmin / 3600) * 3600, unit="s")
tmax = pd.to_datetime(math.ceil(tmax / 3600) * 3600, unit="s")
else:
tmin = pd.to_datetime(math.floor(tmin / 86400) * 86400, unit="s")
tmax = pd.to_datetime(math.ceil(tmax / 86400) * 86400, unit="s")
if interval == TimeseriesInterval.WEEKLY:
if tmin.weekday() != 0:
tmin += pd.offsets.Day(6 - tmin.weekday())
if tmax.weekday() != 0:
tmax += pd.offsets.Day(6 - tmax.weekday())
elif interval == TimeseriesInterval.MONTHLY:
tmin = tmin.replace(day=1)
tmax = _last_day_of_month(tmax)
# print(f'time frame {tmin} - {tmax}')
# print(df[time_column].dtype)
if not isinstance(df[time_column], DatetimeIndex):
df[time_column] = | pd.to_datetime(df[time_column], unit=unit) | pandas.to_datetime |
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual( | Timedelta(np.nan) | pandas.Timedelta |
import logging
import os
import re
import shutil
from datetime import datetime
from itertools import combinations
from random import randint
import numpy as np
import pandas as pd
import psutil
import pytest
from dask import dataframe as dd
from distributed.utils_test import cluster
from tqdm import tqdm
import featuretools as ft
from featuretools import EntitySet, Timedelta, calculate_feature_matrix, dfs
from featuretools.computational_backends import utils
from featuretools.computational_backends.calculate_feature_matrix import (
FEATURE_CALCULATION_PERCENTAGE,
_chunk_dataframe_groups,
_handle_chunk_size,
scatter_warning
)
from featuretools.computational_backends.utils import (
bin_cutoff_times,
create_client_and_cluster,
n_jobs_to_workers
)
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
IdentityFeature
)
from featuretools.primitives import (
Count,
Max,
Min,
Percentile,
Sum,
TransformPrimitive
)
from featuretools.tests.testing_utils import (
backward_path,
get_mock_client_cluster,
to_pandas
)
from featuretools.utils.gen_utils import Library, import_or_none
ks = import_or_none('databricks.koalas')
def test_scatter_warning(caplog):
logger = logging.getLogger('featuretools')
match = "EntitySet was only scattered to {} out of {} workers"
warning_message = match.format(1, 2)
logger.propagate = True
scatter_warning(1, 2)
logger.propagate = False
assert warning_message in caplog.text
# TODO: final assert fails w/ Dask
def test_calc_feature_matrix(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed dataframe result not ordered')
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times, es['log'].index: instances})
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
verbose=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
error_text = 'features must be a non-empty list of features'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix('features', es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([], es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([1, 2, 3], es, cutoff_time=cutoff_time)
error_text = "cutoff_time times must be datetime type: try casting via "\
"pd\\.to_datetime\\(\\)"
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
instance_ids=range(17),
cutoff_time=17)
error_text = 'cutoff_time must be a single value or DataFrame'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
instance_ids=range(17),
cutoff_time=times)
cutoff_times_dup = pd.DataFrame({'time': [datetime(2018, 3, 1),
datetime(2018, 3, 1)],
es['log'].index: [1, 1]})
error_text = 'Duplicated rows in cutoff time dataframe.'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([property_feature],
entityset=es,
cutoff_time=cutoff_times_dup)
cutoff_reordered = cutoff_time.iloc[[-1, 10, 1]] # 3 ids not ordered by cutoff time
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_reordered,
verbose=True)
assert all(feature_matrix.index == cutoff_reordered["id"].values)
# fails with Dask and Koalas entitysets, cutoff time not reordered; cannot verify out of order
# - can't tell if wrong/different all are false so can't check positional
def test_cfm_warns_dask_cutoff_time(es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times,
es['log'].index: instances})
cutoff_time = dd.from_pandas(cutoff_time, npartitions=4)
property_feature = ft.Feature(es['log']['value']) > 10
match = "cutoff_time should be a Pandas DataFrame: " \
"computing cutoff_time, this may take a while"
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
def test_cfm_compose(es, lt):
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=lt,
verbose=True)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] ==
feature_matrix['label_func']).values.all()
def test_cfm_compose_approximate(es, lt):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('dask does not support approximate')
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=lt,
approximate='1s',
verbose=True)
assert(type(feature_matrix) == pd.core.frame.DataFrame)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] ==
feature_matrix['label_func']).values.all()
def test_cfm_dask_compose(dask_es, lt):
property_feature = ft.Feature(dask_es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
dask_es,
cutoff_time=lt,
verbose=True)
feature_matrix = feature_matrix.compute()
assert (feature_matrix[property_feature.get_name()] == feature_matrix['label_func']).values.all()
# tests approximate, skip for dask/koalas
def test_cfm_approximate_correct_ordering():
trips = {
'trip_id': [i for i in range(1000)],
'flight_time': [datetime(1998, 4, 2) for i in range(350)] + [datetime(1997, 4, 3) for i in range(650)],
'flight_id': [randint(1, 25) for i in range(1000)],
'trip_duration': [randint(1, 999) for i in range(1000)]
}
df = pd.DataFrame.from_dict(trips)
es = EntitySet('flights')
es.entity_from_dataframe("trips",
dataframe=df,
index="trip_id",
time_index='flight_time')
es.normalize_entity(base_entity_id="trips",
new_entity_id="flights",
index="flight_id",
make_time_index=True)
features = dfs(entityset=es, target_entity='trips', features_only=True)
flight_features = [feature for feature in features
if isinstance(feature, DirectFeature) and
isinstance(feature.base_features[0],
AggregationFeature)]
property_feature = IdentityFeature(es['trips']['trip_id'])
cutoff_time = pd.DataFrame.from_dict({'instance_id': df['trip_id'],
'time': df['flight_time']})
time_feature = IdentityFeature(es['trips']['flight_time'])
feature_matrix = calculate_feature_matrix(flight_features + [property_feature, time_feature],
es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
feature_matrix.index.names = ['instance', 'time']
assert(np.all(feature_matrix.reset_index('time').reset_index()[['instance', 'time']].values == feature_matrix[['trip_id', 'flight_time']].values))
feature_matrix_2 = calculate_feature_matrix(flight_features + [property_feature, time_feature],
es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
approximate=Timedelta(2, 'd'))
feature_matrix_2.index.names = ['instance', 'time']
assert(np.all(feature_matrix_2.reset_index('time').reset_index()[['instance', 'time']].values == feature_matrix_2[['trip_id', 'flight_time']].values))
for column in feature_matrix:
for x, y in zip(feature_matrix[column], feature_matrix_2[column]):
assert ((pd.isnull(x) and pd.isnull(y)) or (x == y))
# uses approximate, skip for dask/koalas entitysets
def test_cfm_no_cutoff_time_index(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat4 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat4, pd_es['sessions'])
cutoff_time = pd.DataFrame({
'time': [datetime(2013, 4, 9, 10, 31, 19), datetime(2013, 4, 9, 11, 0, 0)],
'instance_id': [0, 2]
})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(12, 's'),
cutoff_time=cutoff_time)
assert feature_matrix.index.name == 'id'
assert feature_matrix.index.values.tolist() == [0, 2]
assert feature_matrix[dfeat.get_name()].tolist() == [10, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
cutoff_time = pd.DataFrame({
'time': [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)],
'instance_id': [0, 2]
})
feature_matrix_2 = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix_2.index.name == 'id'
assert feature_matrix_2.index.tolist() == [0, 2]
assert feature_matrix_2[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix_2[agg_feat.get_name()].tolist() == [5, 1]
# TODO: fails with dask entitysets
# TODO: fails with koalas entitysets
def test_cfm_duplicated_index_in_cutoff_time(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed results not ordered, missing duplicates')
times = [datetime(2011, 4, 1), datetime(2011, 5, 1),
datetime(2011, 4, 1), datetime(2011, 5, 1)]
instances = [1, 1, 2, 2]
property_feature = ft.Feature(es['log']['value']) > 10
cutoff_time = pd.DataFrame({'id': instances, 'time': times},
index=[1, 1, 1, 1])
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
chunk_size=1)
assert (feature_matrix.shape[0] == cutoff_time.shape[0])
# TODO: fails with Dask, Koalas
def test_saveprogress(es, tmpdir):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('saveprogress fails with distributed entitysets')
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = ft.Feature(es['log']['value']) > 10
save_progress = str(tmpdir)
fm_save = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
save_progress=save_progress)
_, _, files = next(os.walk(save_progress))
files = [os.path.join(save_progress, file) for file in files]
# there are 17 datetime files created above
assert len(files) == 17
list_df = []
for file_ in files:
df = pd.read_csv(file_, index_col="id", header=0)
list_df.append(df)
merged_df = pd.concat(list_df)
merged_df.set_index(pd.DatetimeIndex(times), inplace=True, append=True)
fm_no_save = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
assert np.all((merged_df.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (merged_df.sort_index().values))
shutil.rmtree(save_progress)
def test_cutoff_time_correctly(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 1, 2]})
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
labels = [10, 5, 0]
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_binning():
cutoff_time = pd.DataFrame({
'time': [
datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1)
],
'instance_id': [1, 2, 3]
})
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(4, 'h'))
labels = [datetime(2011, 4, 9, 12),
datetime(2011, 4, 10, 8),
datetime(2011, 4, 10, 12)]
for i in binned_cutoff_times.index:
assert binned_cutoff_times['time'][i] == labels[i]
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(25, 'h'))
labels = [datetime(2011, 4, 8, 22),
datetime(2011, 4, 9, 23),
datetime(2011, 4, 9, 23)]
for i in binned_cutoff_times.index:
assert binned_cutoff_times['time'][i] == labels[i]
error_text = "Unit is relative"
with pytest.raises(ValueError, match=error_text):
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(1, 'mo'))
def test_training_window_fails_dask(dask_es):
property_feature = ft.Feature(dask_es['log']['id'],
parent_entity=dask_es['customers'],
primitive=Count)
error_text = "Using training_window is not supported with Dask Entities"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([property_feature],
dask_es,
training_window='2 hours')
def test_cutoff_time_columns_order(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
id_col_names = ['instance_id', es['customers'].index]
time_col_names = ['time', es['customers'].time_index]
for id_col in id_col_names:
for time_col in time_col_names:
cutoff_time = pd.DataFrame({'dummy_col_1': [1, 2, 3],
id_col: [0, 1, 2],
'dummy_col_2': [True, False, False],
time_col: times})
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
labels = [10, 5, 0]
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_df_redundant_column_names(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({es['customers'].index: [0, 1, 2],
'instance_id': [0, 1, 2],
'dummy_col': [True, False, False],
'time': times})
err_msg = 'Cutoff time DataFrame cannot contain both a column named "instance_id" and a column' \
' with the same name as the target entity index'
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
cutoff_time = pd.DataFrame({es['customers'].time_index: [0, 1, 2],
'instance_id': [0, 1, 2],
'dummy_col': [True, False, False],
'time': times})
err_msg = 'Cutoff time DataFrame cannot contain both a column named "time" and a column' \
' with the same name as the target entity time index'
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
def test_training_window(pd_es):
property_feature = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
top_level_agg = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
# make sure features that have a direct to a higher level agg
# so we have multiple "filter eids" in get_pandas_data_slice,
# and we go through the loop to pull data with a training_window param more than once
dagg = DirectFeature(top_level_agg, pd_es['customers'])
# for now, warns if last_time_index not present
times = [datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 1, 2]})
warn_text = "Using training_window but last_time_index is not set on entity customers"
with pytest.warns(UserWarning, match=warn_text):
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours')
pd_es.add_last_time_indexes()
error_text = 'Training window cannot be in observations'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([property_feature],
pd_es,
cutoff_time=cutoff_time,
training_window=Timedelta(2, 'observations'))
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=True)
prop_values = [4, 5, 1]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=False)
prop_values = [5, 5, 2]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case3. include_cutoff_time = False with single cutoff time value
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-09 10:40:00"),
training_window='9 minutes',
include_cutoff_time=False)
prop_values = [0, 4, 0]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case4. include_cutoff_time = True with single cutoff time value
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-10 10:40:00"),
training_window='2 days',
include_cutoff_time=True)
prop_values = [0, 10, 1]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
def test_training_window_overlap(pd_es):
pd_es.add_last_time_indexes()
count_log = ft.Feature(
base=pd_es['log']['id'],
parent_entity=pd_es['customers'],
primitive=Count,
)
cutoff_time = pd.DataFrame({
'id': [0, 0],
'time': ['2011-04-09 10:30:00', '2011-04-09 10:40:00'],
}).astype({'time': 'datetime64[ns]'})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window='10 minutes',
include_cutoff_time=True,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [1, 9])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window='10 minutes',
include_cutoff_time=False,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [0, 9])
def test_include_cutoff_time_without_training_window(es):
es.add_last_time_indexes()
count_log = ft.Feature(
base=es['log']['id'],
parent_entity=es['customers'],
primitive=Count,
)
cutoff_time = pd.DataFrame({
'id': [0, 0],
'time': ['2011-04-09 10:30:00', '2011-04-09 10:31:00'],
}).astype({'time': 'datetime64[ns]'})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=True,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [1, 6])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=False,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [0, 5])
# Case3. include_cutoff_time = True with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=True,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [6])
# Case4. include_cutoff_time = False with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=False,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [5])
def test_approximate_dfeat_of_agg_on_target_include_cutoff_time(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
cutoff_time = pd.DataFrame({'time': [datetime(2011, 4, 9, 10, 31, 19)], 'instance_id': [0]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat2, agg_feat],
pd_es,
approximate=Timedelta(20, 's'),
cutoff_time=cutoff_time,
include_cutoff_time=False)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# excluded due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [5]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(20, 's'),
cutoff_time=cutoff_time,
include_cutoff_time=True)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# included due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [6]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
def test_training_window_recent_time_index(pd_es):
# customer with no sessions
row = {
'id': [3],
'age': [73],
u'région_id': ['United States'],
'cohort': [1],
'cancel_reason': ["Lost interest"],
'loves_ice_cream': [True],
'favorite_quote': ["Don't look back. Something might be gaining on you."],
'signup_date': [datetime(2011, 4, 10)],
'upgrade_date': [datetime(2011, 4, 12)],
'cancel_date': [datetime(2011, 5, 13)],
'date_of_birth': [datetime(1938, 2, 1)],
'engagement_level': [2],
}
to_add_df = pd.DataFrame(row)
to_add_df.index = range(3, 4)
# have to convert category to int in order to concat
old_df = pd_es['customers'].df
old_df.index = old_df.index.astype("int")
old_df["id"] = old_df["id"].astype(int)
df = pd.concat([old_df, to_add_df], sort=True)
# convert back after
df.index = df.index.astype("category")
df["id"] = df["id"].astype("category")
pd_es['customers'].update_data(df=df, recalculate_last_time_indexes=False)
pd_es.add_last_time_indexes()
property_feature = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
top_level_agg = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dagg = DirectFeature(top_level_agg, pd_es['customers'])
instance_ids = [0, 1, 2, 3]
times = [datetime(2011, 4, 9, 12, 31), datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1), datetime(2011, 4, 10, 1, 59, 59)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': instance_ids})
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=True,
)
prop_values = [4, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=False,
)
prop_values = [5, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# TODO: add test to fail w/ koalas
def test_approximate_fails_dask(dask_es):
agg_feat = ft.Feature(dask_es['log']['id'],
parent_entity=dask_es['sessions'],
primitive=Count)
error_text = "Using approximate is not supported with Dask Entities"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([agg_feat],
dask_es,
approximate=Timedelta(1, 'week'))
def test_approximate_multiple_instances_per_cutoff_time(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(1, 'week'),
cutoff_time=cutoff_time)
assert feature_matrix.shape[0] == 2
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_with_multiple_paths(pd_diamond_es):
pd_es = pd_diamond_es
path = backward_path(pd_es, ['regions', 'customers', 'transactions'])
agg_feat = ft.AggregationFeature(pd_es['transactions']['id'],
parent_entity=pd_es['regions'],
relationship_path=path,
primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(1, 'week'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [6, 2]
def test_approximate_dfeat_of_agg_on_target(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
instance_ids=[0, 2],
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_dfeat_of_need_all_values(pd_es):
p = ft.Feature(pd_es['log']['value'], primitive=Percentile)
agg_feat = ft.Feature(p, parent_entity=pd_es['sessions'], primitive=Sum)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
log_df = pd_es['log'].df
instances = [0, 2]
cutoffs = [pd.Timestamp('2011-04-09 10:31:19'), pd.Timestamp('2011-04-09 11:00:00')]
approxes = [pd.Timestamp('2011-04-09 10:31:10'), pd.Timestamp('2011-04-09 11:00:00')]
true_vals = []
true_vals_approx = []
for instance, cutoff, approx in zip(instances, cutoffs, approxes):
log_data_cutoff = log_df[log_df['datetime'] < cutoff]
log_data_cutoff['percentile'] = log_data_cutoff['value'].rank(pct=True)
true_agg = log_data_cutoff.loc[log_data_cutoff['session_id'] == instance, 'percentile'].fillna(0).sum()
true_vals.append(round(true_agg, 3))
log_data_approx = log_df[log_df['datetime'] < approx]
log_data_approx['percentile'] = log_data_approx['value'].rank(pct=True)
true_agg_approx = log_data_approx.loc[log_data_approx['session_id'].isin([0, 1, 2]), 'percentile'].fillna(0).sum()
true_vals_approx.append(round(true_agg_approx, 3))
lapprox = [round(x, 3) for x in feature_matrix[dfeat.get_name()].tolist()]
test_list = [round(x, 3) for x in feature_matrix[agg_feat.get_name()].tolist()]
assert lapprox == true_vals_approx
assert test_list == true_vals
def test_uses_full_entity_feat_of_approximate(pd_es):
agg_feat = ft.Feature(pd_es['log']['value'], parent_entity=pd_es['sessions'], primitive=Sum)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
agg_feat3 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Max)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
dfeat2 = DirectFeature(agg_feat3, pd_es['sessions'])
p = ft.Feature(dfeat, primitive=Percentile)
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
# only dfeat2 should be approximated
# because Percentile needs all values
feature_matrix_only_dfeat2 = calculate_feature_matrix(
[dfeat2],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
assert feature_matrix_only_dfeat2[dfeat2.get_name()].tolist() == [50, 50]
feature_matrix_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
assert feature_matrix_only_dfeat2[dfeat2.get_name()].tolist() == feature_matrix_approx[dfeat2.get_name()].tolist()
feature_matrix_small_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
approximate=Timedelta(10, 'ms'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
feature_matrix_no_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
for f in [p, dfeat, agg_feat]:
for fm1, fm2 in combinations([feature_matrix_approx,
feature_matrix_small_approx,
feature_matrix_no_approx], 2):
assert fm1[f.get_name()].tolist() == fm2[f.get_name()].tolist()
def test_approximate_dfeat_of_dfeat_of_agg_on_target(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(ft.Feature(agg_feat2, pd_es["sessions"]), pd_es['log'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [7, 10]
def test_empty_path_approximate_full(pd_es):
pd_es['sessions'].df['customer_id'] = pd.Series([np.nan, np.nan, np.nan, 1, 1, 2], dtype="category")
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
vals1 = feature_matrix[dfeat.get_name()].tolist()
assert (vals1[0] == 0)
assert (vals1[1] == 0)
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
# todo: do we need to test this situation?
# def test_empty_path_approximate_partial(pd_es):
# pd_es = copy.deepcopy(pd_es)
# pd_es['sessions'].df['customer_id'] = pd.Categorical([0, 0, np.nan, 1, 1, 2])
# agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
# agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
# dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
# times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
# cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
# feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
# pd_es,
# approximate=Timedelta(10, 's'),
# cutoff_time=cutoff_time)
# vals1 = feature_matrix[dfeat.get_name()].tolist()
# assert vals1[0] == 7
# assert np.isnan(vals1[1])
# assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approx_base_feature_is_also_first_class_feature(pd_es):
log_to_products = DirectFeature(pd_es['products']['rating'], pd_es['log'])
# This should still be computed properly
agg_feat = ft.Feature(log_to_products, parent_entity=pd_es['sessions'], primitive=Min)
customer_agg_feat = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
# This is to be approximated
sess_to_cust = DirectFeature(customer_agg_feat, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([sess_to_cust, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
vals1 = feature_matrix[sess_to_cust.get_name()].tolist()
assert vals1 == [8.5, 7]
vals2 = feature_matrix[agg_feat.get_name()].tolist()
assert vals2 == [4, 1.5]
def test_approximate_time_split_returns_the_same_result(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:07:30'),
pd.Timestamp('2011-04-09 10:07:40')],
'instance_id': [0, 0]})
feature_matrix_at_once = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
divided_matrices = []
separate_cutoff = [cutoff_df.iloc[0:1], cutoff_df.iloc[1:]]
# Make sure indexes are different
# Not that this step is unecessary and done to showcase the issue here
separate_cutoff[0].index = [0]
separate_cutoff[1].index = [1]
for ct in separate_cutoff:
fm = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=ct)
divided_matrices.append(fm)
feature_matrix_from_split = pd.concat(divided_matrices)
assert feature_matrix_from_split.shape == feature_matrix_at_once.shape
for i1, i2 in zip(feature_matrix_at_once.index, feature_matrix_from_split.index):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
for c in feature_matrix_from_split:
for i1, i2 in zip(feature_matrix_at_once[c], feature_matrix_from_split[c]):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
def test_approximate_returns_correct_empty_default_values(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['sessions'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-08 11:00:00'),
pd.Timestamp('2011-04-09 11:00:00')],
'instance_id': [0, 0]})
fm = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
assert fm[dfeat.get_name()].tolist() == [0, 10]
# def test_approximate_deep_recurse(pd_es):
# pd_es = pd_es
# agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
# dfeat1 = DirectFeature(agg_feat, pd_es['sessions'])
# agg_feat2 = Sum(dfeat1, pd_es['customers'])
# dfeat2 = DirectFeature(agg_feat2, pd_es['sessions'])
# agg_feat3 = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['products'], primitive=Count)
# dfeat3 = DirectFeature(agg_feat3, pd_es['log'])
# agg_feat4 = Sum(dfeat3, pd_es['sessions'])
# feature_matrix = calculate_feature_matrix([dfeat2, agg_feat4],
# pd_es,
# instance_ids=[0, 2],
# approximate=Timedelta(10, 's'),
# cutoff_time=[datetime(2011, 4, 9, 10, 31, 19),
# datetime(2011, 4, 9, 11, 0, 0)])
# # dfeat2 and agg_feat4 should both be approximated
def test_approximate_child_aggs_handled_correctly(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
agg_feat_2 = ft.Feature(pd_es['log']['value'], parent_entity=pd_es['customers'], primitive=Sum)
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-08 10:30:00'),
pd.Timestamp('2011-04-09 10:30:06')],
'instance_id': [0, 0]})
fm = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
fm_2 = calculate_feature_matrix([dfeat, agg_feat_2],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
assert fm[dfeat.get_name()].tolist() == [2, 3]
assert fm_2[agg_feat_2.get_name()].tolist() == [0, 5]
def test_cutoff_time_naming(es):
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-08 10:30:00'),
pd.Timestamp('2011-04-09 10:30:06')],
'instance_id': [0, 0]})
cutoff_df_index_name = cutoff_df.rename(columns={"instance_id": "id"})
cutoff_df_wrong_index_name = cutoff_df.rename(columns={"instance_id": "wrong_id"})
cutoff_df_wrong_time_name = cutoff_df.rename(columns={"time": "cutoff_time"})
fm1 = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
fm1 = to_pandas(fm1, index='id', sort_index=True)
fm2 = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_index_name)
fm2 = to_pandas(fm2, index='id', sort_index=True)
assert all((fm1 == fm2.values).values)
error_text = 'Cutoff time DataFrame must contain a column with either the same name' \
' as the target entity index or a column named "instance_id"'
with pytest.raises(AttributeError, match=error_text):
calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_wrong_index_name)
time_error_text = 'Cutoff time DataFrame must contain a column with either the same name' \
' as the target entity time_index or a column named "time"'
with pytest.raises(AttributeError, match=time_error_text):
calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_wrong_time_name)
# TODO: order doesn't match, but output matches
def test_cutoff_time_extra_columns(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed result not ordered')
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'label': [True, True, False]},
columns=['time', 'instance_id', 'label'])
fm = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
# check column was added to end of matrix
assert 'label' == fm.columns[-1]
assert (fm['label'].values == cutoff_df['label'].values).all()
def test_cutoff_time_extra_columns_approximate(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'label': [True, True, False]},
columns=['time', 'instance_id', 'label'])
fm = calculate_feature_matrix([dfeat],
pd_es,
cutoff_time=cutoff_df,
approximate="2 days")
# check column was added to end of matrix
assert 'label' in fm.columns
assert (fm['label'].values == cutoff_df['label'].values).all()
def test_cutoff_time_extra_columns_same_name(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed result not ordered')
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'régions.COUNT(customers)': [False, False, True]},
columns=['time', 'instance_id', 'régions.COUNT(customers)'])
fm = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
assert (fm['régions.COUNT(customers)'].values == cutoff_df['régions.COUNT(customers)'].values).all()
def test_cutoff_time_extra_columns_same_name_approximate(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'régions.COUNT(customers)': [False, False, True]},
columns=['time', 'instance_id', 'régions.COUNT(customers)'])
fm = calculate_feature_matrix([dfeat],
pd_es,
cutoff_time=cutoff_df,
approximate="2 days")
assert (fm['régions.COUNT(customers)'].values == cutoff_df['régions.COUNT(customers)'].values).all()
def test_instances_after_cutoff_time_removed(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
cutoff_time = datetime(2011, 4, 8)
fm = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True)
fm = to_pandas(fm, index='id', sort_index=True)
actual_ids = [id for (id, _) in fm.index] if isinstance(fm.index, pd.MultiIndex) else fm.index
# Customer with id 1 should be removed
assert set(actual_ids) == set([2, 0])
# TODO: Dask and Koalas do not keep instance_id after cutoff
def test_instances_with_id_kept_after_cutoff(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed result not ordered, missing extra instances')
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
cutoff_time = datetime(2011, 4, 8)
fm = calculate_feature_matrix([property_feature],
es,
instance_ids=[0, 1, 2],
cutoff_time=cutoff_time,
cutoff_time_in_index=True)
# Customer #1 is after cutoff, but since it is included in instance_ids it
# should be kept.
actual_ids = [id for (id, _) in fm.index] if isinstance(fm.index, pd.MultiIndex) else fm.index
assert set(actual_ids) == set([0, 1, 2])
# TODO: Fails with Dask
# TODO: Fails with Koalas
def test_cfm_returns_original_time_indexes(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed result not ordered, indexes are lost due to not multiindexing')
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0]})
fm = calculate_feature_matrix([dfeat],
es, cutoff_time=cutoff_df,
cutoff_time_in_index=True)
instance_level_vals = fm.index.get_level_values(0).values
time_level_vals = fm.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
def test_cfm_returns_original_time_indexes_approximate(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
agg_feat_2 = ft.Feature(pd_es['sessions']['id'], parent_entity=pd_es['customers'], primitive=Count)
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0]})
# approximate, in different windows, no unapproximated aggs
fm = calculate_feature_matrix([dfeat], pd_es, cutoff_time=cutoff_df,
cutoff_time_in_index=True, approximate="1 m")
instance_level_vals = fm.index.get_level_values(0).values
time_level_vals = fm.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
# approximate, in different windows, unapproximated aggs
fm = calculate_feature_matrix([dfeat, agg_feat_2], pd_es, cutoff_time=cutoff_df,
cutoff_time_in_index=True, approximate="1 m")
instance_level_vals = fm.index.get_level_values(0).values
time_level_vals = fm.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
# approximate, in same window, no unapproximated aggs
fm2 = calculate_feature_matrix([dfeat], pd_es, cutoff_time=cutoff_df,
cutoff_time_in_index=True, approximate="2 d")
instance_level_vals = fm2.index.get_level_values(0).values
time_level_vals = fm2.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
# approximate, in same window, unapproximated aggs
fm3 = calculate_feature_matrix([dfeat, agg_feat_2], pd_es, cutoff_time=cutoff_df,
cutoff_time_in_index=True, approximate="2 d")
instance_level_vals = fm3.index.get_level_values(0).values
time_level_vals = fm3.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
def test_dask_kwargs(pd_es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(pd_es['log']['value']) > 10
with cluster() as (scheduler, [a, b]):
dkwargs = {'cluster': scheduler['address']}
feature_matrix = calculate_feature_matrix([property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=.13,
dask_kwargs=dkwargs,
approximate='1 hour')
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_dask_persisted_es(pd_es, capsys):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(pd_es['log']['value']) > 10
with cluster() as (scheduler, [a, b]):
dkwargs = {'cluster': scheduler['address']}
feature_matrix = calculate_feature_matrix([property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=.13,
dask_kwargs=dkwargs,
approximate='1 hour')
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
feature_matrix = calculate_feature_matrix([property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=.13,
dask_kwargs=dkwargs,
approximate='1 hour')
captured = capsys.readouterr()
assert "Using EntitySet persisted on the cluster as dataset " in captured[0]
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
class TestCreateClientAndCluster(object):
def test_user_cluster_as_string(self, monkeypatch):
monkeypatch.setattr(utils, "get_client_cluster",
get_mock_client_cluster)
# cluster in dask_kwargs case
client, cluster = create_client_and_cluster(n_jobs=2,
dask_kwargs={'cluster': 'tcp://127.0.0.1:54321'},
entityset_size=1)
assert cluster == 'tcp://127.0.0.1:54321'
def test_cluster_creation(self, monkeypatch):
total_memory = psutil.virtual_memory().total
monkeypatch.setattr(utils, "get_client_cluster",
get_mock_client_cluster)
try:
cpus = len(psutil.Process().cpu_affinity())
except AttributeError:
cpus = psutil.cpu_count()
# jobs < tasks case
client, cluster = create_client_and_cluster(n_jobs=2,
dask_kwargs={},
entityset_size=1)
num_workers = min(cpus, 2)
memory_limit = int(total_memory / float(num_workers))
assert cluster == (min(cpus, 2), 1, None, memory_limit)
# jobs > tasks case
match = r'.*workers requested, but only .* workers created'
with pytest.warns(UserWarning, match=match) as record:
client, cluster = create_client_and_cluster(n_jobs=1000,
dask_kwargs={'diagnostics_port': 8789},
entityset_size=1)
assert len(record) == 1
num_workers = cpus
memory_limit = int(total_memory / float(num_workers))
assert cluster == (num_workers, 1, 8789, memory_limit)
# dask_kwargs sets memory limit
client, cluster = create_client_and_cluster(n_jobs=2,
dask_kwargs={'diagnostics_port': 8789,
'memory_limit': 1000},
entityset_size=1)
num_workers = min(cpus, 2)
assert cluster == (num_workers, 1, 8789, 1000)
def test_not_enough_memory(self, monkeypatch):
total_memory = psutil.virtual_memory().total
monkeypatch.setattr(utils, "get_client_cluster",
get_mock_client_cluster)
# errors if not enough memory for each worker to store the entityset
with pytest.raises(ValueError, match=''):
create_client_and_cluster(n_jobs=1,
dask_kwargs={},
entityset_size=total_memory * 2)
# does not error even if worker memory is less than 2x entityset size
create_client_and_cluster(n_jobs=1,
dask_kwargs={},
entityset_size=total_memory * .75)
def test_parallel_failure_raises_correct_error(pd_es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(pd_es['log']['value']) > 10
error_text = 'Need at least one worker'
with pytest.raises(AssertionError, match=error_text):
calculate_feature_matrix([property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=.13,
n_jobs=0,
approximate='1 hour')
def test_warning_not_enough_chunks(pd_es, capsys):
property_feature = IdentityFeature(pd_es['log']['value']) > 10
with cluster(nworkers=3) as (scheduler, [a, b, c]):
dkwargs = {'cluster': scheduler['address']}
calculate_feature_matrix([property_feature],
entityset=pd_es,
chunk_size=.5,
verbose=True,
dask_kwargs=dkwargs)
captured = capsys.readouterr()
pattern = r'Fewer chunks \([0-9]+\), than workers \([0-9]+\) consider reducing the chunk size'
assert re.search(pattern, captured.out) is not None
def test_n_jobs():
try:
cpus = len(psutil.Process().cpu_affinity())
except AttributeError:
cpus = psutil.cpu_count()
assert n_jobs_to_workers(1) == 1
assert n_jobs_to_workers(-1) == cpus
assert n_jobs_to_workers(cpus) == cpus
assert n_jobs_to_workers((cpus + 1) * -1) == 1
if cpus > 1:
assert n_jobs_to_workers(-2) == cpus - 1
error_text = 'Need at least one worker'
with pytest.raises(AssertionError, match=error_text):
n_jobs_to_workers(0)
# TODO: add dask version of int_es
def test_integer_time_index(int_es):
times = list(range(8, 18)) + list(range(19, 26))
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_df = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(int_es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True)
time_level_vals = feature_matrix.index.get_level_values(1).values
sorted_df = cutoff_df.sort_values(['time', 'instance_id'], kind='mergesort')
assert (time_level_vals == sorted_df['time'].values).all()
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_integer_time_index_single_cutoff_value(int_es):
labels = [False] * 3 + [True] * 2 + [False] * 4
property_feature = IdentityFeature(int_es['log']['value']) > 10
cutoff_times = [16, pd.Series([16])[0], 16.0, pd.Series([16.0])[0]]
for cutoff_time in cutoff_times:
feature_matrix = calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True)
time_level_vals = feature_matrix.index.get_level_values(1).values
assert (time_level_vals == [16] * 9).all()
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
# TODO: add dask version of int_es
def test_integer_time_index_datetime_cutoffs(int_es):
times = [datetime.now()] * 17
cutoff_df = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(int_es['log']['value']) > 10
error_text = "cutoff_time times must be numeric: try casting via pd\\.to_numeric\\(\\)"
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True)
# TODO: add Dask version of int_es
def test_integer_time_index_passes_extra_columns(int_es):
times = list(range(8, 18)) + list(range(19, 23)) + [25, 24, 23]
labels = [False] * 3 + [True] * 2 + [False] * 9 + [False] * 2 + [True]
instances = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 15, 14]
cutoff_df = pd.DataFrame({'time': times,
'instance_id': instances,
'labels': labels})
cutoff_df = cutoff_df[['time', 'instance_id', 'labels']]
property_feature = IdentityFeature(int_es['log']['value']) > 10
fm = calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True)
assert (fm[property_feature.get_name()] == fm['labels']).all()
# TODO: add Dask version of int_es
def test_integer_time_index_mixed_cutoff(int_es):
times_dt = list(range(8, 17)) + [datetime(2011, 1, 1), 19, 20, 21, 22, 25, 24, 23]
labels = [False] * 3 + [True] * 2 + [False] * 9 + [False] * 2 + [True]
instances = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 15, 14]
cutoff_df = pd.DataFrame({'time': times_dt,
'instance_id': instances,
'labels': labels})
cutoff_df = cutoff_df[['time', 'instance_id', 'labels']]
property_feature = IdentityFeature(int_es['log']['value']) > 10
error_text = 'cutoff_time times must be.*try casting via.*'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df)
times_str = list(range(8, 17)) + ["foobar", 19, 20, 21, 22, 25, 24, 23]
cutoff_df['time'] = times_str
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df)
times_date_str = list(range(8, 17)) + ['2018-04-02', 19, 20, 21, 22, 25, 24, 23]
cutoff_df['time'] = times_date_str
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df)
times_int_str = [0, 1, 2, 3, 4, 5, '6', 7, 8, 9, 9, 10, 11, 12, 15, 14, 13]
times_int_str = list(range(8, 17)) + ['17', 19, 20, 21, 22, 25, 24, 23]
cutoff_df['time'] = times_int_str
# calculate_feature_matrix should convert time column to ints successfully here
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df)
def test_datetime_index_mixed_cutoff(es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[17] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
labels = [False] * 3 + [True] * 2 + [False] * 9 + [False] * 2 + [True]
instances = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 15, 14]
cutoff_df = pd.DataFrame({'time': times,
'instance_id': instances,
'labels': labels})
cutoff_df = cutoff_df[['time', 'instance_id', 'labels']]
property_feature = IdentityFeature(es['log']['value']) > 10
error_text = 'cutoff_time times must be.*try casting via.*'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_df)
times[9] = "foobar"
cutoff_df['time'] = times
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_df)
cutoff_df['time'].iloc[9] = '2018-04-02 18:50:45.453216'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_df)
times[9] = '17'
cutoff_df['time'] = times
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_df)
def test_string_time_values_in_cutoff_time(es):
times = ['2011-04-09 10:31:27', '2011-04-09 10:30:18']
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 0]})
agg_feature = ft.Feature(es['log']['value'], parent_entity=es['customers'], primitive=Sum)
error_text = 'cutoff_time times must be.*try casting via.*'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([agg_feature], es, cutoff_time=cutoff_time)
# TODO: Dask version fails (feature matrix is empty)
# TODO: Koalas version fails (koalas groupby agg doesn't support custom functions)
def test_no_data_for_cutoff_time(mock_customer):
if not all(isinstance(entity.df, pd.DataFrame) for entity in mock_customer.entities):
pytest.xfail("Dask fails because returned feature matrix is empty; Koalas doesn't support custom agg functions")
es = mock_customer
cutoff_times = pd.DataFrame({"customer_id": [4],
"time": pd.Timestamp('2011-04-08 20:08:13')})
trans_per_session = ft.Feature(es["transactions"]["transaction_id"], parent_entity=es["sessions"], primitive=Count)
trans_per_customer = ft.Feature(es["transactions"]["transaction_id"], parent_entity=es["customers"], primitive=Count)
features = [trans_per_customer, ft.Feature(trans_per_session, parent_entity=es["customers"], primitive=Max)]
fm = calculate_feature_matrix(features, entityset=es, cutoff_time=cutoff_times)
# due to default values for each primitive
# count will be 0, but max will nan
np.testing.assert_array_equal(fm.values, [[0, np.nan]])
# adding missing instances not supported in Dask or Koalas
def test_instances_not_in_data(pd_es):
last_instance = max(pd_es['log'].df.index.values)
instances = list(range(last_instance + 1, last_instance + 11))
identity_feature = IdentityFeature(pd_es['log']['value'])
property_feature = identity_feature > 10
agg_feat = AggregationFeature(pd_es['log']['value'],
parent_entity=pd_es["sessions"],
primitive=Max)
direct_feature = DirectFeature(agg_feat, pd_es["log"])
features = [identity_feature, property_feature, direct_feature]
fm = calculate_feature_matrix(features, entityset=pd_es, instance_ids=instances)
assert all(fm.index.values == instances)
for column in fm.columns:
assert fm[column].isnull().all()
fm = calculate_feature_matrix(features,
entityset=pd_es,
instance_ids=instances,
approximate="730 days")
assert all(fm.index.values == instances)
for column in fm.columns:
assert fm[column].isnull().all()
def test_some_instances_not_in_data(pd_es):
a_time = datetime(2011, 4, 10, 10, 41, 9) # only valid data
b_time = datetime(2011, 4, 10, 11, 10, 5) # some missing data
c_time = datetime(2011, 4, 10, 12, 0, 0) # all missing data
times = [a_time, b_time, a_time, a_time, b_time, b_time] + [c_time] * 4
cutoff_time = pd.DataFrame({"instance_id": list(range(12, 22)),
"time": times})
identity_feature = IdentityFeature(pd_es['log']['value'])
property_feature = identity_feature > 10
agg_feat = AggregationFeature(pd_es['log']['value'],
parent_entity=pd_es["sessions"],
primitive=Max)
direct_feature = DirectFeature(agg_feat, pd_es["log"])
features = [identity_feature, property_feature, direct_feature]
fm = calculate_feature_matrix(features,
entityset=pd_es,
cutoff_time=cutoff_time)
ifeat_answer = [0, 7, 14, np.nan] + [np.nan] * 6
prop_answer = [0, 0, 1, np.nan, 0] + [np.nan] * 5
dfeat_answer = [14, 14, 14, np.nan] + [np.nan] * 6
assert all(fm.index.values == cutoff_time["instance_id"].values)
for x, y in zip(fm.columns, [ifeat_answer, prop_answer, dfeat_answer]):
np.testing.assert_array_equal(fm[x], y)
fm = calculate_feature_matrix(features,
entityset=pd_es,
cutoff_time=cutoff_time,
approximate="5 seconds")
dfeat_answer[0] = 7 # approximate calculated before 14 appears
dfeat_answer[2] = 7 # approximate calculated before 14 appears
prop_answer[3] = 0 # no_unapproximated_aggs code ignores cutoff time
assert all(fm.index.values == cutoff_time["instance_id"].values)
for x, y in zip(fm.columns, [ifeat_answer, prop_answer, dfeat_answer]):
np.testing.assert_array_equal(fm[x], y)
def test_missing_instances_with_categorical_index(pd_es):
instance_ids = [0, 1, 3, 2]
features = ft.dfs(entityset=pd_es, target_entity='customers', features_only=True)
fm = calculate_feature_matrix(entityset=pd_es,
features=features,
instance_ids=instance_ids)
assert all(fm.index.values == instance_ids)
assert isinstance(fm.index, pd.CategoricalIndex)
def test_handle_chunk_size():
total_size = 100
# user provides no chunk size
assert _handle_chunk_size(None, total_size) is None
# user provides fractional size
assert _handle_chunk_size(.1, total_size) == total_size * .1
assert _handle_chunk_size(.001, total_size) == 1 # rounds up
assert _handle_chunk_size(.345, total_size) == 35 # rounds up
# user provides absolute size
assert _handle_chunk_size(1, total_size) == 1
assert _handle_chunk_size(100, total_size) == 100
assert isinstance(_handle_chunk_size(100.0, total_size), int)
# test invalid cases
with pytest.raises(AssertionError, match="Chunk size must be greater than 0"):
_handle_chunk_size(0, total_size)
with pytest.raises(AssertionError, match="Chunk size must be greater than 0"):
_handle_chunk_size(-1, total_size)
def test_chunk_dataframe_groups():
df = pd.DataFrame({
"group": [1, 1, 1, 1, 2, 2, 3]
})
grouped = df.groupby("group")
chunked_grouped = _chunk_dataframe_groups(grouped, 2)
# test group larger than chunk size gets split up
first = next(chunked_grouped)
assert first[0] == 1 and first[1].shape[0] == 2
second = next(chunked_grouped)
assert second[0] == 1 and second[1].shape[0] == 2
# test that equal to and less than chunk size stays together
third = next(chunked_grouped)
assert third[0] == 2 and third[1].shape[0] == 2
fourth = next(chunked_grouped)
assert fourth[0] == 3 and fourth[1].shape[0] == 1
def test_calls_progress_callback(mock_customer):
class MockProgressCallback:
def __init__(self):
self.progress_history = []
self.total_update = 0
self.total_progress_percent = 0
def __call__(self, update, progress_percent, time_elapsed):
self.total_update += update
self.total_progress_percent = progress_percent
self.progress_history.append(progress_percent)
mock_progress_callback = MockProgressCallback()
es = mock_customer
# make sure to calculate features that have different paths to same base feature
trans_per_session = ft.Feature(es["transactions"]["transaction_id"], parent_entity=es["sessions"], primitive=Count)
trans_per_customer = ft.Feature(es["transactions"]["transaction_id"], parent_entity=es["customers"], primitive=Count)
features = [trans_per_session, ft.Feature(trans_per_customer, entity=es["sessions"])]
calculate_feature_matrix(features, entityset=es, progress_callback=mock_progress_callback)
# second to last entry is the last update from feature calculation
assert np.isclose(mock_progress_callback.progress_history[-2], FEATURE_CALCULATION_PERCENTAGE * 100)
assert np.isclose(mock_progress_callback.total_update, 100.0)
assert np.isclose(mock_progress_callback.total_progress_percent, 100.0)
# test with cutoff time dataframe
mock_progress_callback = MockProgressCallback()
cutoff_time = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [pd.to_datetime("2014-01-01 01:00:00"),
pd.to_datetime("2014-01-01 02:00:00"),
pd.to_datetime("2014-01-01 03:00:00")]})
calculate_feature_matrix(features, entityset=es, cutoff_time=cutoff_time, progress_callback=mock_progress_callback)
assert np.isclose(mock_progress_callback.progress_history[-2], FEATURE_CALCULATION_PERCENTAGE * 100)
assert np.isclose(mock_progress_callback.total_update, 100.0)
assert np.isclose(mock_progress_callback.total_progress_percent, 100.0)
def test_calls_progress_callback_cluster(pd_mock_customer):
class MockProgressCallback:
def __init__(self):
self.progress_history = []
self.total_update = 0
self.total_progress_percent = 0
def __call__(self, update, progress_percent, time_elapsed):
self.total_update += update
self.total_progress_percent = progress_percent
self.progress_history.append(progress_percent)
mock_progress_callback = MockProgressCallback()
trans_per_session = ft.Feature(pd_mock_customer["transactions"]["transaction_id"], parent_entity=pd_mock_customer["sessions"], primitive=Count)
trans_per_customer = ft.Feature(pd_mock_customer["transactions"]["transaction_id"], parent_entity=pd_mock_customer["customers"], primitive=Count)
features = [trans_per_session, ft.Feature(trans_per_customer, entity=pd_mock_customer["sessions"])]
with cluster() as (scheduler, [a, b]):
dkwargs = {'cluster': scheduler['address']}
calculate_feature_matrix(features,
entityset=pd_mock_customer,
progress_callback=mock_progress_callback,
dask_kwargs=dkwargs)
assert np.isclose(mock_progress_callback.total_update, 100.0)
assert np.isclose(mock_progress_callback.total_progress_percent, 100.0)
def test_closes_tqdm(es):
class ErrorPrim(TransformPrimitive):
'''A primitive whose function raises an error'''
name = "error_prim"
input_types = [ft.variable_types.Numeric]
return_type = "Numeric"
compatibility = [Library.PANDAS, Library.DASK, Library.KOALAS]
def get_function(self):
def error(s):
raise RuntimeError("This primitive has errored")
return error
value = ft.Feature(es['log']['value'])
property_feature = value > 10
error_feature = ft.Feature(value, primitive=ErrorPrim)
calculate_feature_matrix([property_feature],
es,
verbose=True)
assert len(tqdm._instances) == 0
try:
calculate_feature_matrix([value, error_feature],
es,
verbose=True)
assert False
except RuntimeError as e:
assert e.args[0] == "This primitive has errored"
finally:
assert len(tqdm._instances) == 0
def test_approximate_with_single_cutoff_warns(pd_es):
features = dfs(entityset=pd_es,
target_entity='customers',
features_only=True,
ignore_entities=['cohorts'],
agg_primitives=['sum'])
match = "Using approximate with a single cutoff_time value or no cutoff_time " \
"provides no computational efficiency benefit"
# test warning with single cutoff time
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix(features,
pd_es,
cutoff_time=pd.to_datetime("2020-01-01"),
approximate="1 day")
# test warning with no cutoff time
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix(features,
pd_es,
approximate="1 day")
# check proper handling of approximate
feature_matrix = calculate_feature_matrix(features,
pd_es,
cutoff_time= | pd.to_datetime("2011-04-09 10:31:30") | pandas.to_datetime |
import sys, os
import numpy as np
import pandas as pd
import ujson
from scipy.interpolate import interp1d
import scipy.ndimage
from ast import literal_eval
from get_workflow_info import get_workflow_info
project_name = "planetary-response-network-and-rescue-global-caribbean-storms-2017"
# st thomas DG
#ssid = 14759
# St John DG
ssid = 14806
# St John Planet
#ssid = 14813
# Puerto Rico before only
#ssid = 14929
# Turks and Caicos Cockburn Town DG/Planet
ssid = 14827
# DG - Barbuda
ssid = 14896
# DG - Antigua
ssid = 14930
# Planet - Dominica
ssid = 14988
active_subject_sets = [ssid]
#infile = "%s-classifications.csv" % project_name
#infile = 'damage-floods-blockages-shelters-landsat-classifications.csv'
infile = 'damage-floods-blockages-shelters-classifications.csv'
#infile = 'damages-floods-blockages-shelters-planet-labs-classifications.csv'
#infile = 'planetary-response-network-and-rescue-global-caribbean-storms-2017-classifications_wfid4958_nodups_inclnonlive.csv'
try:
infile = sys.argv[1]
except:
pass
workflow_version = -1
workflow_id = 4958
freetext = ''
outdir = "outfiles"
subject_file_set_by_user = False
# check for other command-line arguments
if len(sys.argv) > 1:
# if there are additional arguments, loop through them
for i_arg, argstr in enumerate(sys.argv[1:]):
arg = argstr.split('=')
if (arg[0] == "workflow_id") | (arg[0] == "wfid"):
workflow_id = int(arg[1])
elif (arg[0] == "workflow_version") | (arg[0] == "wfv"):
workflow_version = arg[1]
elif (arg[0] == "subject_set_id") | (arg[0] == "ssid"):
# might be passed as an int, might be passed as a list
try:
ssid = int(arg[1])
ssid_str = arg[1]
active_subject_sets = [ssid]
except:
active_subject_sets = literal_eval(arg[1])
ssid = active_subject_sets[0]
ssid_str = '%d' % ssid
for i in range(len(active_subject_sets)):
if i > 0:
ssid_str = '%s_%d' % (ssid_str, active_subject_sets[i])
elif (arg[0] == "name") | (arg[0] == "stub") | (arg[0] == "freetext"):
freetext = arg[1]
elif (arg[0] == "outdir"):
outdir = arg[1]
elif (arg[0] == "subj") | (arg[0] == "subjects") | (arg[0] == "subjectfile") | (arg[0] == "subject_file"):
subjectfile = arg[1]
subject_file_set_by_user = True
workflow_file = "%s-workflows.csv" % project_name
workflow_contents_file = "%s-workflow_contents.csv" % project_name
# if this subject file doesn't already exist, run get_subject_sizes.py
# note it has to download images to determine imsize (in pixels) so generate it some
# other way if you already have that info
if not subject_file_set_by_user:
subjectfile = "%s-subjects_enhancedinfo_ssids_%s.csv" % (project_name, ssid_str)
# these files will/may be written to
outfile_nodir = "%s-marks-points_wfid_%d.csv" % (project_name, workflow_id)
blankfile_nodir = "%s-marks-blank_wfid_%d.csv" % (project_name, workflow_id)
shortcutfile_nodir = "%s-marks-unclassifiable_wfid_%d.csv" % (project_name, workflow_id)
questionfile_nodir = "%s-questions_wfid_%d.csv" % (project_name, workflow_id)
outfile = "%s/%s" % (outdir, outfile_nodir)
blankfile = "%s/%s" % (outdir, blankfile_nodir)
shortcutfile = "%s/%s" % (outdir, shortcutfile_nodir)
questionfile = "%s/%s" % (outdir, questionfile_nodir)
# the order of tools is from the workflow information - as is the fact the
# marks are in task T2
tools = ['Road Blockage', 'Flood', 'Temporary Settlement', 'Structural Damage']
mark_count = [0, 0, 0, 0]
shortcuts = ['Unclassifiable Image', 'Ocean Only (no land)']
shortcut_mark_count = [0, 0]
# for the structural damage subtask, if it exists
details = ['Minor', 'Moderate', 'Catastrophic']
def get_wf_basics(workflow_id):
# I should be able to do this marking_tasks, shortcuts, questions etc
# automatically from workflow_info BUT NOT RIGHT NOW
# Guadeloupe
if workflow_id == 4928:
workflow_version = '18.53'
marking_tasks = ['T0']
question_tasks = ['']
shortcut_tasks = ['T1']
struc_subtask = False
# Turks and Caicos - Landsat 8
elif workflow_id == 4970:
workflow_version = '5.8'
marking_tasks = ['T0']
question_tasks = ['T2']
shortcut_tasks = ['T1', 'T3']
struc_subtask = False
# St Thomas - Digital Globe
# also anything that uses DG
elif workflow_id == 4958:
workflow_version = '17.60'
marking_tasks = ['T0']
question_tasks = ['T2']
shortcut_tasks = ['T1', 'T3']
struc_subtask = True
# Clone of the DG workflow but for Planet data
elif workflow_id == 4975:
workflow_version = '1.1' # could also be 2.2 if Dominica or later
marking_tasks = ['T0']
question_tasks = ['T2']
shortcut_tasks = ['T1', 'T3']
#struc_subtask = True # even though I doubt these are trustworthy
struc_subtask = False
# <NAME> before only
elif workflow_id == 5030:
workflow_version = '3.8'
marking_tasks = []
question_tasks = ['T2']
shortcut_tasks = ['T1', 'T3']
struc_subtask = False
# Clone of the Planet-only workflow but only the damage marking question
elif workflow_id == 5071:
workflow_version = '2.3' # could also be 2.2 if Dominica or later
marking_tasks = ['T0']
question_tasks = []
shortcut_tasks = ['T1', 'T3']
struc_subtask = False
return workflow_version, marking_tasks, question_tasks, shortcut_tasks, struc_subtask
def get_coords_mark(markinfo):
row = markinfo[1]
# print(markinfo)
# print("-----")
# print(row)
# print("\n\n")
mark_x = row['x']
mark_y = row['y']
the_x = np.array([row['x_min'], row['imsize_x_pix']])
the_y = np.array([row['y_min'], row['imsize_y_pix']])
the_lon = np.array([row['lon_min'], row['lon_max']])
the_lat = np.array([row['lat_min'], row['lat_max']])
# don't throw an error if the coords are out of bounds, but also don't extrapolate
f_x_lon = interp1d(the_x, the_lon, bounds_error=False, fill_value=(None, None))
f_y_lat = interp1d(the_y, the_lat, bounds_error=False, fill_value=(None, None))
return f_x_lon(mark_x), f_y_lat(mark_y)
def get_projection(projection_in):
# # for now let's just return the same projection for everything
# # this is for Sentinel 2
# return Proj(init='epsg:32620')
# if you're supplying anything with a colon like 'epsg:32619', you need init=.
# if you are supplying something more like '+proj=utm +zone=19 +datum=WGS84 +units=m +no_defs ', which comes from e.g. gdal, using init= will crash things
# even though those two strings represent the same projection
# what fun this is
try:
inProj = Proj(projection_in)
except:
try:
inProj = Proj(init=projection_in)
except:
# just assume a default
inProj = Proj(init='epsg:32620')
return inProj
# takes a single metadata row
def get_corner_latlong(meta_json, projection_in):
# in some cases we've included the corner lat and long in the metadata, in other cases not quite, but we can get that info
# recall that longitude is the x direction, latitude is the y direction
# BDS-created subjects have min and max lat and long so we can read it directly
try:
lon_min = meta_json['lon_min']
lon_max = meta_json['lon_max']
lat_min = meta_json['lat_min']
lat_max = meta_json['lat_max']
except:
# some of the subjects have the corners given in unprojected units
# which are in meters, but with actual value set by a global grid
x_m_min = meta_json['#tile_UL_x']
y_m_max = meta_json['#tile_UL_y']
x_m_max = meta_json['#tile_LR_x']
y_m_min = meta_json['#tile_LR_y']
#print(meta_json)
#print((x_m_min, y_m_min, x_m_max, y_m_max))
#f_x_lon, f_y_lat = get_interp_grid(subjects, ssid)
try:
inProj = get_projection(meta_json['projection_orig'])
except:
inProj = get_projection(ssid)
outProj = Proj(init='epsg:4326')
lon_min, lat_min = transform(inProj,outProj,x_m_min,y_m_min)
lon_max, lat_max = transform(inProj,outProj,x_m_max,y_m_max)
#print((lon_min, lat_min, lon_max, lat_max))
#print("\n")
return lon_min, lon_max, lat_min, lat_max
wfv, marking_tasks, question_tasks, shortcut_tasks, struc_subtask = get_wf_basics(workflow_id)
# don't overwrite the workflow version if it's specified at the prompt
if workflow_version < 1:
workflow_version = wfv
# okay turns out we didn't really need this but I'm hoping it will make it easier to generalize later
workflow_df = pd.read_csv(workflow_file)
workflow_cdf = pd.read_csv(workflow_contents_file)
workflow_info = get_workflow_info(workflow_df, workflow_cdf, workflow_id, workflow_version)
classifications_all = | pd.read_csv(infile) | pandas.read_csv |
from sensible_raw.loaders import loader
from world_viewer.cns_world import CNSWorld
from world_viewer.glasses import Glasses
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
from matplotlib.colors import LogNorm
from sklearn.utils import shuffle
from matplotlib.figure import figaspect
# set analysis parameters
analysis = 'expo_frac'
opinion_type = "op_fitness"
binning = True
n_bins = 15
save_plots = False
show_plot = True
# load not shuffeld copenhagen data for comparision
cns_rw = CNSWorld()
cns_rw.load_world(opinions = ['fitness'], read_cached = False, stop=False, write_pickle = False, continous_op=False)
cns_glasses_rw = Glasses(cns_rw)
cns_data_rw = pd.read_pickle("tmp/final/spring_data.pkl")
# Function for loading Surrogate data
def load_data(runs, mode, analysis = "expo_frac", path= "tmp/ShuffledV4", shuffle_type="complete"):
cns = CNSWorld()
cns.load_world(opinions = ['fitness'], read_cached = True, stop=False, write_pickle = False, continous_op=False)
cns_glasses = Glasses(cns)
cns_glasses.output_folder = ""
cns.d_ij = None
start = "2014-02-01"
end = "2014-04-30"
cns.time = cns.time.loc[(cns.time.time >= start) & (cns.time.time <= end)]
cns.op_nodes = cns.op_nodes.loc[(cns.op_nodes.time >= start) & (cns.op_nodes.time <= end)]
cns.a_ij = cns.a_ij.loc[(cns.a_ij.time >= start) & (cns.a_ij.time <= end)]
data_all = []
expo_all = []
for run in runs:
# load
if mode == "edges":
exposure = pd.read_pickle(f"{path}/exposure_fitness_7_1_shuffled_time_edges_{shuffle_type}_{run}.pkl")
cns.a_ij = pd.read_pickle(f"{path}/a_ij_shuffled_time_{shuffle_type}_{run}.pkl")
elif mode == "traits":
exposure = pd.read_pickle(f"{path}/exposure_fitness_7_1_shuffled_time_traits_{shuffle_type}_{run}.pkl")
cns.op_nodes = pd.read_pickle(f"{path}/op_nodes_shuffled_time_{shuffle_type}_{run}.pkl")
if analysis == "expo_nmb":
exposure.rename(columns={"exposure":"exposure_old", "n_influencer_summed":"exposure"},inplace=True)
#restrict
exposure.reset_index(inplace=True)
exposure = exposure.loc[(exposure.time >= pd.to_datetime(start)) & (exposure.time <= | pd.to_datetime(end) | pandas.to_datetime |
### MOVE TO UTIL
import urllib
import os
import re
import sklearn.metrics as metrics
import numpy as np
import stanfordnlp
import pandas as pd
from bllipparser import RerankingParser
from nltk import Tree
from nltk.draw.util import CanvasFrame
from nltk.draw import TreeWidget
import svgling
import pickle
from negbio.pipeline import text2bioc
import bioc
import itertools
from textblob import TextBlob
from tqdm import tqdm_notebook
import os
import bioc
import tqdm
from pathlib2 import Path
from negbio.chexpert.stages.aggregate import NegBioAggregator
from negbio.chexpert.stages.classify import ModifiedDetector, CATEGORIES
from negbio.chexpert.stages.extract import NegBioExtractor
from negbio.chexpert.stages.load import NegBioLoader
from negbio.pipeline import text2bioc, negdetect
from negbio.pipeline.parse import NegBioParser
from negbio.pipeline.ptb2ud import NegBioPtb2DepConverter, Lemmatizer
from negbio.pipeline.ssplit import NegBioSSplitter
from negbio.main_chexpert import pipeline
PARSING_MODEL_DIR = "~/.local/share/bllipparser/GENIA+PubMed"
CHEXPERT_PATH = "NegBio/negbio/chexpert/"
MENTION_PATH =f"{CHEXPERT_PATH}phrases/mention"
UNMENTION_PATH = f"{CHEXPERT_PATH}phrases/"
NEG_PATH = f'{CHEXPERT_PATH}patterns/negation.txt'
PRE_NEG_PATH = f'{CHEXPERT_PATH}patterns/pre_negation_uncertainty.txt'
POST_NEG_PATH = f'{CHEXPERT_PATH}patterns/post_negation_uncertainty.txt'
PHRASES_PATH = f"{CHEXPERT_PATH}phrases/"
TEST_PATH = "stanford_report_test.csv"
test_df = pd.read_csv(TEST_PATH)
CATEGORIES = ["Cardiomegaly",
"Lung Lesion", "Airspace Opacity", "Edema", "Consolidation",
"Pneumonia", "Atelectasis", "Pneumothorax", "Pleural Effusion",
"Pleural Other", "Fracture"]
test_df = test_df[['Report Impression'] + CATEGORIES]
test_df = test_df.replace(1, True).fillna(False).replace(0, False).replace(-1, False)
def get_dict(path):
label_to_mention = {}
mention_files = os.listdir(path)
for f in mention_files:
with open(os.path.join(path, f)) as mention_file:
condition = os.path.basename(f)[:-4]
condition = condition.replace("_", " ").title()
if condition not in label_to_mention:
label_to_mention[condition] = []
for line in mention_file:
label_to_mention[condition].append(line.split("\n")[0])
return label_to_mention
mentions = get_dict(PHRASES_PATH + "mention")
unmentions = get_dict(PHRASES_PATH + "unmention")
mentions_pk = "mentions.pkl"
unmentions_pk = "unmentions.pkl"
pickle.dump(mentions, open(mentions_pk, "wb"))
pickle.dump(unmentions, open(unmentions_pk, "wb"))
mentions = pickle.load(open(mentions_pk, "rb"))
unmentions = pickle.load(open(unmentions_pk, "rb"))
## MOVE TO UTIL
def get_mention_keywords(observation):
if observation in mentions:
return mentions[observation]
else:
return []
chexpert_results_mention = {
'No Finding': 0.769,
'Lung Lesion': 0.896,
'Fracture': 0.975,
'Pleural Other': 0.850,
'Pleural Effusion': 0.985,
'Pneumonia': 0.660,
'Pneumothorax': 1.000,
'Lung Opacity': 0.966,
'Edema': 0.996,
'Support Devices': 0.933,
'Atelectasis': 0.998,
'Enlarged Cardiomediastinum': 0.935,
'Cardiomegaly': 0.973,
'Consolidation': 0.999
}
chexpert_results_unmention = {
'No Finding': float("nan"),
'Lung Lesion': 0.900,
'Fracture': 0.807,
'Pleural Other': 1.00,
'Pleural Effusion': 0.971,
'Pneumonia': 0.750,
'Pneumothorax': 0.977,
'Lung Opacity': 0.914,
'Edema': 0.962,
'Support Devices': 0.720,
'Atelectasis': 0.833,
'Enlarged Cardiomediastinum': 0.959,
'Cardiomegaly': 0.909,
'Consolidation': 0.981
}
## MOVE TO UTIL
def get_bioc_collection(df):
collection = bioc.BioCCollection()
splitter = NegBioSSplitter()
for i, report in enumerate(df["Report Impression"]):
document = text2bioc.text2document(str(i), report)
document = splitter.split_doc(document)
collection.add_document(document)
return collection
def clean(sentence):
"""Clean the text."""
punctuation_spacer = str.maketrans({key: f"{key} " for key in ".,"})
lower_sentence = sentence.lower()
# Change `and/or` to `or`.
corrected_sentence = re.sub('and/or',
'or',
lower_sentence)
# Change any `XXX/YYY` to `XXX or YYY`.
corrected_sentence = re.sub('(?<=[a-zA-Z])/(?=[a-zA-Z])',
' or ',
corrected_sentence)
# Clean double periods
clean_sentence = corrected_sentence.replace("..", ".")
# Insert space after commas and periods.
clean_sentence = clean_sentence.translate(punctuation_spacer)
# Convert any multi white spaces to single white spaces.
clean_sentence = ' '.join(clean_sentence.split())
return clean_sentence
def calculate_f1(df, pred_frame):
# calculate F1
results = | pd.DataFrame() | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = | tm.makeDataFrame() | pandas.util.testing.makeDataFrame |
# -*- coding: utf-8 -*-
import re,pandas as pd,numpy as np
from pandas import DataFrame
import os
pathDir=os.listdir(r'C:\Users\aklasim\Desktop\Py6.11 Pdf\t1')
pt=(r'C:\Users\aklasim\Desktop\Py6.11 Pdf\t1')
cols=['工单编号','上级工单编号','项目编号','工单描述','上级工单描述','施工单位','合同号','计划服务费','开工日期','完工日期','作业类型','通知单创建','通知单批准','计划','待审','下达','验收确认','完工确认','完工时间','打印者','打印日期','工序号','工作中心','控制码','工序内容','计划量','签证','物料编码','物料描述','单位计划量','出库量','签证']
l=[]
x=0
l1=[]
dfb = pd.DataFrame(columns=['工单编号', '上级工单编号', '项目编号', '工单描述', '上级工单描述', '施工单位', '合同号', '计划服务费','开工日期', '完工日期', '作业类型', '通知单创建', '通知单批准', '计划', '待审', '下达', '验收确认','完工确认', '完工时间', '打印者', '打印日期', '工序号', '工作中心', '控制码', '工序内容', '计划量',
'签证', '物料编码', '物料描述', '单位计划量', '出库量', '签证', '单位', '数量确认'])
for filename in pathDir:
x=x+1
df = pd.DataFrame(index=range(30), columns=cols)
def gg(rg,n):
e=[]
f = open(pt + '\\' + filename, encoding='gbk')
for line in f:
d=re.search(rg,line)
if d:
d=str(d.group())
e.append(d)
print(e)
df[n]=pd.Series(e)
f.close()
desc=gg('工单描述\s\S+','工单描述')#desc = re.findall('工单描述\s\S+', line)
n=gg('工单编号\s\d+','工单编号')
up_n=gg('上级工单编号\s\d+','上级工单编号') #sup_desc = re.findall('上级工单描述\s\d+', line)
pro_n=gg('项目编号\s\d+','项目编号') #pro_co=re.findall('项目编号\s\d+',line)
unit=gg('施工单位\s\S+','施工单位')#unit= re.findall('施工单位\s\S+', line)
contr_co=gg('合同号\s\d+','合同号') #contr_co = re.findall('合同号\s\d+', line)
cost=gg('计划服务费\s+\d+\,*\d*\.\d+','计划服务费')#cost = re.findall('计划服务费\s+\d+\,*\d*\.\d+', line)
#if len(cost)>0:
# money=cost[0].split()[1]
start_d=gg('开工日期\s\S+','开工日期')#start_d = re.findall('开工日期\s\S+', line)
over_d=gg('完工日期\s\S+','完工日期')#over_d = re.findall('完工日期\s\S+', line)
worktp = gg('作业类型\s\S+', '作业类型')#worktp = re.findall('作业类型\s\S+', line)
#ntc_crt = re.findall('通知单创建\s\S+', line)
#ntc_pmt = re.findall('通知单批准\s\S+', line)
#plan = re.findall('计划\s\S+', line)
#ass= re.findall('待审\s\S+', line)
#order= re.findall('下达\s\S+', line)
#acpt_ck = re.findall('验收确认\s\S+', line)
#fns_ck = re.findall('完工确认\s\S+', line)
#fns_tm = re.findall('完工时间\s\S+', line)
#printer = re.findall('打印者:\S+', line)
#prt_d = re.findall('打印日期:\d+-\d+-\d+', line)
ntc_crt = gg('通知单创建\s\S+', '通知单创建')
ntc_pmt = gg('通知单批准\s\S+', '通知单批准')
plan = gg('计划\s\S+', '计划')
ass= gg('待审\s\S+', '待审')
order= gg('下达\s\S+', '下达')
acpt_ck = gg('验收确认\s\S+', '验收确认')
fns_ck = gg('完工确认\s\S+', '完工确认')
fns_tm = gg('完工时间\s\S+', '完工时间')
printer = gg('打印者:\S+', '打印者')
prt_d = gg('打印日期:\d+-\d+-\d+', '打印日期')
wp_num = []
wk_ctr = []
ctr_code = []
wp_contts = []
cert = []
f = open(pt + '\\' + filename, encoding='gbk')
for line in f:
proc_set = re.findall('(^\d+)\s(\D+\d*)(\D+\d*)\s((\S*\d*\s*\.*)+)(\d+\.*\d*\D+)+\n', line)#426
if proc_set:# 工序号/工作中心/控制码/工序内容/签证
sets=list(proc_set[0])
wp_num.append(sets[0])
wk_ctr.append (sets[1])
ctr_code.append (sets[2])
wp_contts.append (sets[3])
cert.append (sets[5])
df['工序号']=pd.Series(wp_num)
df['工作中心']=pd.Series(wk_ctr)
df['控制码']=pd.Series(ctr_code)
df['工序内容']=pd.Series(wp_contts)
df['签证']=pd.Series(cert)
wp_num = []
mat_code = []
mat_descr = []
msr_unit = []
all_num = []
cert=[]
f.close()
f = open(pt + '\\' + filename, encoding='gbk')
for line in f:
mat_set = re.findall('(^\d+)\s(\d+)\s((\S*\s*)+)\s(\D)\s((\d\.*\d*\s*)+)\n', line) # 140
if mat_set: # 工序号/物料编码/物料描述/单位/数量确认/计划量/出库量/签证
sets = list(mat_set[0])
wp_num.append(sets[0])
mat_code.append(sets[1])
mat_descr.append(sets[2])
msr_unit.append(sets[4])
all_num.append(sets[5])
cert.append(sets[6])
df['工序号']=pd.Series(wp_num)
df['物料编码']=pd.Series(mat_code)
df['物料描述']=pd.Series(mat_descr)
df['单位']=pd.Series(msr_unit)
df['数量确认']=pd.Series(all_num)
df['签证']=pd.Series(cert)
filename=int(x)
print(dfb.columns)
print(df.columns)
dfb= | pd.concat([dfb,df]) | pandas.concat |
import pandas as pd
import plotly.graph_objs as go
import requests
import numpy as np
import plotly.colors
#from collections import OrderedDict
from pandas.io.json import json_normalize
#country_default = OrderedDict([['Bulgaria', 'a'], ['Canada', 'h'], ['Croatia', 'g'], ['Cyprus', 'gh'], ['Egypt', 'hg'], ['France', 'go'], ['Germany', 'n'], ['Greece', 'gr'], ['Italy', 'it'], ['Jordan', 'j'], ['Kenya', 'ke'], ['Kuwait', 'ku'], ['Lebanon', 'lb'], ['Morocco', 'fg'], ['Romania', 'ro'], ['Russia', 'ru'], ['Spain', 'sp'], ['Turkey', 'tr'], ['UAE', 'ae'], ['UK', 'yl'], ['USA', 'usa'], ['Ukraine', 'uk']])
#countries=country_default
def return_figures():
"""Creates four plotly visualizations
Args:
None
Returns:
list (dict): list containing the four plotly visualizations
"""
# when the countries variable is empty, use the country_default dictionary
#if not bool(countries):
#countries = country_default
urls = []
url = 'https://corona.lmao.ninja/v3/covid-19/countries'
urls.append(url)
r = requests.get(url)
df = json_normalize(r.json())
df = df.drop(['countryInfo._id', 'countryInfo.iso2', 'countryInfo.flag', 'updated','oneCasePerPeople', 'oneDeathPerPeople', 'oneTestPerPeople'], axis=1)
options = ['Bulgaria', 'Croatia', 'Cyprus', 'Egypt', 'France', 'Germany', 'Greece', 'Italy', 'Jordan', 'Kenya', 'Kuwait', 'Lebanon', 'Morocco', 'Romania', 'Russia', 'Spain', 'Turkey', 'UAE', 'Ukraine']
df = df[df['country'].isin(options)]
df = df.reset_index()
df = df.drop('index', axis=1)
country_list = df['country'].tolist()
# first chart plots Today's: Cases, Deaths, Recovered per Country
# as a relative bar chart
graph_one = []
df_one = pd.DataFrame(df)
df_one.sort_values('todayCases', ascending=False, inplace=True)
for country in country_list:
x_val = df_one.country.tolist()
graph_one.append(
go.Bar(
x = x_val,
y = df_one.todayCases.tolist(),
name = "Today's Cases"
)
)
graph_one.append(go.Bar(
x = x_val,
y= df_one.todayDeaths.tolist(),
name = "Today's Deaths"
)
)
graph_one.append(go.Bar(
x = x_val,
y= df_one.todayRecovered.tolist(),
name = "Today's Recovered",
)
)
layout_one = dict(title = "Today's: Cases, Deaths, Recovered per Country",
xaxis = dict(title = 'Country',),
yaxis = dict(title = 'Population'),
barmode='relative',
autosize=False,
width=600,
height=500
)
# second chart plots Cases, Deaths, Recovered and Critical per Country as a relative bar chart
graph_two = []
df_two = | pd.DataFrame(df) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn import tree
import sklearn as sk
import collections
import pydotplus
from sklearn.externals import joblib
from sklearn.utils import resample
import sys
import os
output_dir = '/home/strikermx/output_dir/model_'+sys.argv[1] +'2'
data_dir = '/home/strikermx/data_dir/model_'+sys.argv[1]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
#
#load data to dataframe
df = pd.read_csv(data_dir + '/training_' + sys.argv[1] + '.dat', sep='|')
#downsampling dataset to maintain 0.5 label prior
df_majority = df[df.label==0]
df_minority = df[df.label==1]
df_minority.count()
df_majority_downsampled = resample(df_majority,
replace=True, # sample with replacement
n_samples=len(df_minority.index), # to match majority class
random_state=123) # reproducible results
balanced_df = pd.concat([df_majority_downsampled, df_minority])
#write file
balanced_df.to_csv(data_dir + '/train_up20bl_' + sys.argv[1] + '.dat', sep='|')
# convert label data to y axis
Y = balanced_df.values[:,88]
#drop unable to use column and label column
balanced_df.drop(['analytic_id', 'label','voice_pay_per_use_p3','voice_quota_minute_p3'], axis=1, inplace=True)
# convert features data to x axis
X = balanced_df.values[:, :]
#convert type of X and Y
X = X.astype(np.float32)
Y = Y.astype(np.int)
#train model
clf_gini = DecisionTreeClassifier(criterion = "gini", max_leaf_nodes = 80, max_depth = 14)
clf_gini.fit(X, Y)
#save model to file
joblib.dump(clf_gini, output_dir + '/dt_' + sys.argv[1] +'.pkl')
#make fitted prediction
y_pred = clf_gini.predict(X)
#get all model decision node
node = clf_gini.tree_.apply(X)
#make prob fitted prediction
prob = clf_gini.predict_proba(X)
#convert to dataframe
probdf = pd.DataFrame(prob)
predictpd = pd.DataFrame(y_pred)
nodepd = pd.DataFrame(node)
#renaming dataframe column
predictpd.columns = ['predict']
nodepd.columns = ['leaf_node']
probdf.columns = ['prob_0','prob_1']
#join probdf and nodepd dataframe
probdf = pd.concat([probdf,nodepd], axis=1)
#make feature dict from balanced_df
data_feature_names = list(balanced_df.columns.values)
feature_dict = dict(enumerate(data_feature_names))
#get sample analytic_id
sample = df['analytic_id']
sampledf = | pd.DataFrame(sample) | pandas.DataFrame |
import logging
from os import path
import pandas as pd
import numpy as np
from activitysim.core import (
inject,
config,
assign,
pipeline,
tracing,
)
from asimtbm.utils import skims
from asimtbm.utils import trips
from asimtbm.utils import tracing as trace
logger = logging.getLogger(__name__)
YAML_FILENAME = 'destination_choice.yaml'
ORIGIN_TRIPS_KEY = 'orig_zone_trips'
@inject.step()
def destination_choice(zones, data_dir, trace_od):
"""ActivitySim step that creates a raw destination choice table
and calculates utilities.
settings.yaml must specify 'destination_choice' under 'models' for
this step to run in the pipeline. destination_choice.yaml must also
specify the following:
- spec_file_name: <expressions csv>
- aggregate_od_matrices:
- skims: <skims omx file>
- dest_zone: <list of destination zone attribute columns>
- orig_zone_trips: <dict of num trips for each segment>
@inject.step before the method definition registers this step with the pipeline.
Parameters
----------
zones : pandas DataFrame of zone attributes
data_dir : str, data directory path
trace_od : list or dict
origin-destination pair to trace.
Returns
-------
None
but writes final dataframe to csv and registers it
to the pipeline.
"""
logger.info('running destination choice step ...')
model_settings = config.read_model_settings(YAML_FILENAME)
locals_dict = create_locals_dict(model_settings)
od_index = create_od_index(zones.to_frame())
skims_dict = skims.read_skims(zones.index, data_dir, model_settings)
locals_dict.update(skims_dict)
zone_matrices = create_zone_matrices(zones.to_frame(), od_index, model_settings)
locals_dict.update(zone_matrices)
segments = model_settings.get(ORIGIN_TRIPS_KEY)
spec = read_spec_file(model_settings, segments)
od_table = create_od_table(od_index, spec, locals_dict, trace_od)
trips.calculate_num_trips(od_table, zones, spec, locals_dict,
segments, trace_od=trace_od)
# This step is not strictly necessary since the pipeline
# closes remaining open files on exit. This just closes them
# now instead of leaving them open consuming memory for subsequent steps.
skims.close_skims(locals_dict)
logger.info('finished destination choice step.')
def create_locals_dict(model_settings):
"""Initial local parameters for the destination choice step.
These will be expanded later and used in subsequent evaluations.
Gets both constants and math expressions from model settings
Parameters
----------
model_settings : dict
Returns
-------
dict
"""
locals_dict = {}
constants = config.get_model_constants(model_settings)
math = model_settings.get('numpy')
math_functions = {name: getattr(np, name) for name in math}
locals_dict.update(constants)
locals_dict.update(math_functions)
return locals_dict
def create_od_index(zones_df):
orig = np.repeat(np.asanyarray(zones_df.index), zones_df.shape[0])
dest = np.tile(np.asanyarray(zones_df.index), zones_df.shape[0])
od_df = pd.DataFrame({'orig': orig, 'dest': dest})
return | pd.MultiIndex.from_frame(od_df) | pandas.MultiIndex.from_frame |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 5 09:18:08 2018
@author: a.heude
"""
#Objectif du script: cross-valider le modèle géologique à l'aide d'une reconnaissance de la litho à partir des parmètres physico-chimiques
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from collections import Counter
import sys
print(sys.executable)
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.cm as cm
import sklearn.mixture as mix
from sklearn.decomposition import PCA
import matplotlib.font_manager
matplotlib.rcParams.update({'font.size': 20})
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 20}
matplotlib.rc('font', **font)
pre_tun=pd.read_csv('file_1.csv',sep=';',encoding = "ISO-8859-1")
tun=pd.read_csv('file_2.csv',sep=';',encoding = "ISO-8859-1")
def select_dominant_litho(data,list_lithos,nom_ref):
#function that selects the dominant lithology in the training dataset
#return a dataset with the samples and their dominant lithology
#####
#data is the dataframe containing the data
#list_lithos is the list of columns names for the lithologies
#nom_ref is the name of the column containing sample references
#####
final_df=pd.DataFrame()
final_df=pd.concat([final_df,data[nom_ref]],axis=1)
list_=[]
for rows in range(len(data)):
i=0
for lithos in list_lithos:
if data[lithos][rows]>i:
i=data[lithos][rows]
main_litho=lithos
list_.append(main_litho)
final_df=pd.concat([final_df,pd.DataFrame(list_)],axis=1)
return final_df
def clust_model_training(data,pred,nom_ref,list_lithos,n_clusters,cv_type='full',pca=False,n_comp=2):
#function that trains a clustering algorithm
#return a dataset with the samples and their dominant lithology
#####
#data is the dataframe containing the training data
#pred is the list of names for the predictors
#list_lithos is the list of columns names for the lithologies
#nom_ref is the name of the column containing sample references
#n_clusters is the number of clusters to compute
#cv_type is the type of covariance (for GM algo)
#pca=True or False if a PCA is to be computed on the predictors (allows the visualisation of clusters and data, etc..)
#n_comp is the number of components to consider if pca is activated
#####
final_df=pd.DataFrame()
'built in of predictors'
clean_up=pd.DataFrame()
for i in pred:
clean_up=pd.concat([clean_up,data[i]],axis=1)
clean_up_scale=clean_up
for j in list_lithos:
clean_up=pd.concat([clean_up,data[j]],axis=1)
clean_up=pd.concat([clean_up,data[nom_ref]],axis=1)
clean_up.dropna(inplace=True)
clean_up_scale.dropna(inplace=True)
clean_up.index = pd.RangeIndex(len(clean_up.index))
clean_up_scale.index = pd.RangeIndex(len(clean_up_scale.index))
predictors=pd.DataFrame()
for i in pred:
predictors=pd.concat([predictors,clean_up[i]],axis=1)
list_pred=list(predictors.columns)
predictors.index = pd.RangeIndex(len(predictors.index))
scaler=StandardScaler()
scaler.fit(clean_up_scale)
predictors=scaler.transform(predictors)
predictors=pd.DataFrame(predictors)
predictors.columns=list_pred
if pca==True:
predictors=dim_reduction_pca(predictors,n_comp)
#trying other clustering algorithms can be interesting
#kmeans = KMeans(n_clusters=n_clusters)
#kmeans=SpectralClustering(n_clusters=n_clusters, affinity='nearest_neighbors',assign_labels='kmeans')
#Here GM has shown good results
kmeans=mix.GaussianMixture(n_components=n_clusters, covariance_type = cv_type)
try:
y_kmeans = kmeans.fit_predict(predictors)
except:
y_kmeans = kmeans.fit(predictors).predict(predictors)
dataframe_litho=select_dominant_litho(clean_up,list_lithos,nom_ref)
final_df=pd.concat([final_df,pd.DataFrame(y_kmeans),dataframe_litho],axis=1)
final_df.columns=['clusters','sam_ref','lithos']
return final_df,predictors,kmeans, y_kmeans,scaler
def assess_cluster_models(range_n_clusters,data,pred,nom_ref,list_lithos,cv_type,pca=False,n_comp=2):
#function that assess a clustering model through different number of clusters with silhouettes
#code largely extracted from: http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_silhouette_analysis.html
#####
#range_n_clusters is the range of clusters to test
#data is the dataframe containing the training data
#pred is the list of names for the predictors
#list_lithos is the list of columns names for the lithologies
#nom_ref is the name of the column containing sample references
#n_clusters is the number of clusters to compute
#cv_type is the type of covariance (for GM algo)
#pca=True or False if a PCA is to be computed on the predictors (allows the visualisation of clusters and data, etc..)
#n_comp is the number of components to consider if pca is activated
#####
#Figures are saved in a path
for n_clusters in range_n_clusters:
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
final_df,predictors,clusterer,cluster_labels,scaler = clust_model_training(data,pred,nom_ref,list_lithos,n_clusters,cv_type,pca,n_comp)
# Create a subplot with 1 row and 2 columns
if pca==True:
fig, (ax1, ax2) = plt.subplots(1, 2)
if pca==False:
fig, (ax1) = plt.subplots(1, 1)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(predictors) + (n_clusters + 1) * 10])
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(predictors, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(predictors, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
if pca==True:
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(predictors.iloc[:, 0], predictors.iloc[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
try:
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
except:
print("les centres ne peuvent pas être affichés")
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
figure=plt
name_figure="PATH_%s"%(str(n_clusters) + '_clusters')
filename_lc="%s.png"%name_figure
figure.savefig(filename_lc)
plt.clf()
return
def swarm_plot(dataframe,nom_classifier,nom_elem):
#function that performs a swarm plot on the repartition of lithologies within each cluster
#####
#dataframe is the dataframe containing the data
#nom_classifier is the x-axis (lithologies)
#nom_elem is the y-axis (clusters)
#####
#Figures are saved in a path
lithos={'geo_p2': '','geo_p3':'','geo_p4':'','geo_p5':''}
codes=list(dataframe[nom_classifier].astype('category').cat.categories)
global df_bp,dict_prop
df_bp=pd.concat([dataframe[nom_classifier],dataframe[nom_elem]],axis=1)
#Swarm plot
ident=nom_elem
figure2 = plt.figure(figsize = (10,10))
splot=sns.swarmplot(df_bp.iloc[:,0],df_bp.iloc[:,1])
splot.set_xlim(-1,len(codes))
plt.ylabel('distribution en fonction de la géologie décrite sur le terrain')
plt.title(ident,size=20)
plt.xlabel(nom_classifier,size=20)
plt.ylabel(nom_elem,size=20)
plt.legend(lithos.items())
plt.tight_layout(pad=1.5, w_pad=1.5, h_pad=1.5)
figure2.savefig("PATH_beeswarm_%s.png"%(nom_classifier + '_' + ident))
plt.clf()
return
def dim_reduction_pca(data,n_digits):
#function that performs a PCA dimension reduction
#####
#data is a dataframe
#n_digits is the number of components to keep
#####
pca=PCA(n_components=n_digits)
reduced_data = pca.fit_transform(data)
reduced_data=pd.DataFrame(reduced_data)
diag=pca.explained_variance_ratio_
print(diag)
return reduced_data
def home_weighted_scoring(dataframe,target,clusters):
#function that performs a scoring according to how well clusters can delimitate lithologies
#####
#dataframe is a dataframe of the data
#target is the classification we want to be well delimited
#clusters are the clusers assigned to each sample
#####
#dictionnary containing the results
dct=dict()
n_samples=len(dataframe)
for rows in range(len(dataframe)):
if dataframe[clusters][rows] not in dct.keys():
dct[dataframe[clusters][rows]]=[dataframe[target][rows]]
else:
dct[dataframe[clusters][rows]].append(dataframe[target][rows])
#find the dominant litho in each cluster
def dominant_litho(dictionnary,key):
liste = dictionnary[key]
count=Counter(liste)
maxi=0
for keys, values in count.items():
if values > maxi:
maxi=values
dominant=keys
return dominant,maxi
#calculate scoring for a cluster as the proportion of dominant over the other classes
def score(dictionnary, key):
liste = dictionnary[key]
dominant,maximum=dominant_litho(dictionnary,key)
score = maximum/len(liste)
n_sample_cluster=len(liste)
return score,n_sample_cluster
final_score=0
for keys in dct.keys():
scoring,n_sample_cluster=score(dct,keys)
final_score=final_score+ scoring*(n_sample_cluster/n_samples)
#returns final score
return final_score
def assign_values_clusters(dataframe,target,clusters):
#function that assigns, for each cluster its proportions of classes from the classification in target
#####
#dataframe is a dataframe of the data
#target is the classification we want to be well delimited
#clusters are the clusers assigned to each sample
#####
#dictionnary containing the results
dct=dict()
for rows in range(len(dataframe)):
if dataframe[clusters][rows] not in dct.keys():
dct[dataframe[clusters][rows]]=[dataframe[target][rows]]
else:
dct[dataframe[clusters][rows]].append(dataframe[target][rows])
def proportions_litho(dictionnary,key):
liste = dictionnary[key]
count=Counter(liste)
proportions=[(i, count[i] / len(liste)) for i in count]
return proportions
for keys in dct.keys():
proportions=proportions_litho(dct,keys)
dct[keys]=proportions
#return a dictionnary of the results
return dct
def predict_only_clusters(data,pred,nom_ref,model_clusters,scaler):
#function that predicts clusters from a clustering model - on a new dataset, with writing the results in a csv file
#####
#data is the dataframe containing the training data
#pred is the list of names for the predictors
#nom_ref is the name of the column containing sample references
#model_clusters is the clustering model fitted on a training set
#scaler is the scaling step used for the training set
#####
#built in of predictors
clean_up= | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import csv
from os import listdir
from os.path import isfile, join
import warnings
from datetime import date, timedelta, datetime
class TransitionMap():
def __init__(self):
self._transition_maps = self._download_transition_maps()
self._energy_type = _energy_type
def get_transition_map_hour(self, hour):
"""Get the transition map for a specific hour
Args:
hour: timestamp
Return:
transition_map_hour: pandas.DataFrame
"""
return self._transition_maps[hour]
def _download_transition_maps(self):
"""Load the CSV transition maps
JOE -- your input should match this
Return:
transition_maps: list of pandas.DataFrames indexed by hour
"""
working_directory = os.getcwd()
transition_maps = {}
for hour in range(0,23):
filepath = (working_directory + str(self._energy_type)/
+ "_hour_" + str(hour))
transition_maps[hour] = pd.read_csv(filepath)
return transition_maps
def consolidate_csvs(folder_path, csv_path):
"""
This will take a folder with MMS csvs and create a new csv with just the demand and energy data.
"""
# with open(csv_path) as output:
# writer = csv.writer(output)
# # write header of output file
# writer.writerow(["Timestamp", "RRP", "Total_Demand"])
df = | pd.DataFrame(columns=["Timestamp", "Region", "Price", "Demand"]) | pandas.DataFrame |
# coding: utf-8
# # Windows 10 Coin
#
# train: (row: 1,347,190, columns: 1,085)
# test: (row: 374,136, columns: 1,084)
#
# y value: if HasClicked == True, app 1.8%
#
# How to run
# 1. Put the train and test files in ..\input
# 2. Put the script file in ..\script
# 3. In Jupyter Notebook, run all and get submission file in the same script folder
# In[1]:
# Timer and file info
import math
import time
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc # We're gonna be clearing memory a lot
import matplotlib.pyplot as plt
import seaborn as sns
import random
import lightgbm as lgb
import hashlib
#from ml_metrics import mapk
from datetime import datetime
import re
import csv
#import pickle
#from sklearn.tree import DecisionTreeClassifier
#from sklearn.ensemble import ExtraTreesClassifier
#from sklearn.ensemble import RandomForestClassifier
from sklearn import ensemble
from sklearn import model_selection
from sklearn.metrics import matthews_corrcoef, f1_score, classification_report, confusion_matrix, precision_score, recall_score
# Timer
class Timer:
def __init__(self, text=None):
self.text = text
def __enter__(self):
self.cpu = time.clock()
self.time = time.time()
if self.text:
print("{}...".format(self.text))
print(datetime.now())
return self
def __exit__(self, *args):
self.cpu = time.clock() - self.cpu
self.time = time.time() - self.time
if self.text:
print("%s: cpu %0.2f, time %0.2f\n" % (self.text, self.cpu, self.time))
# Split to train and holdout sets with counts
def sample_train_holdout(_df, sample_count, holdout_count):
random.seed(7)
sample_RowNumber = random.sample(list(_df['RowNumber']), (sample_count + holdout_count))
train_RowNumber = random.sample(sample_RowNumber, sample_count)
holdout_RowNumber = list(set(sample_RowNumber) - set(train_RowNumber))
holdout = _df[_df['RowNumber'].isin(holdout_RowNumber)].copy()
_df = _df[_df['RowNumber'].isin(train_RowNumber)]
return _df, holdout
# Sampling for train and holdout with imbalanced binary label
def trainHoldoutSampling(_df, _id, _label, _seed=7, t_tr=0.5, t_ho=0.5, f_tr=0.05, f_ho=0.5):
random.seed(_seed)
positive_id = list(_df[_df[_label]==True][_id].values)
negative_id = list(_df[_df[_label]==False][_id].values)
train_positive_id = random.sample(positive_id, int(len(positive_id) * t_tr))
holdout_positive_id = random.sample(list(set(positive_id)-set(train_positive_id)), int(len(positive_id) * t_ho))
train_negative_id = random.sample(negative_id, int(len(negative_id) * f_tr))
holdout_negative_id = random.sample(list(set(negative_id)-set(train_negative_id)), int(len(negative_id) * f_ho))
train_id = list(set(train_positive_id)|set(train_negative_id))
holdout_id = list(set(holdout_positive_id)|set(holdout_negative_id))
print('train count: {}, train positive count: {}'.format(len(train_id),len(train_positive_id)))
print('holdout count: {}, holdout positive count: {}'.format(len(holdout_id),len(holdout_positive_id)))
return _df[_df[_id].isin(train_id)], _df[_df[_id].isin(holdout_id)]
def datetime_features2(_df, _col):
_format='%m/%d/%Y %I:%M:%S %p'
_df[_col] = _df[_col].apply(lambda x: datetime.strptime(x, _format))
colYear = _col+'Year'
colMonth = _col+'Month'
colDay = _col+'Day'
colHour = _col+'Hour'
#colYearMonthDay = _col+'YearMonthDay'
#colYearMonthDayHour = _col+'YearMonthDayHour'
_df[colYear] = _df[_col].apply(lambda x: x.year)
_df[colMonth] = _df[_col].apply(lambda x: x.month)
_df[colDay] = _df[_col].apply(lambda x: x.day)
_df[colHour] = _df[_col].apply(lambda x: x.hour)
#ymd = [colYear, colMonth, colDay]
#ymdh = [colYear, colMonth, colDay, colHour]
#_df[colYearMonthDay] = _df[ymd].apply(lambda x: '_'.join(str(x)), axis=1)
#_df[colYearMonthDayHour] = _df[ymdh].apply(lambda x: '_'.join(str(x)), axis=1)
return _df
# Change date column datetime type and add date time features
def datetime_features(_df, _col, isDelete = False):
# 1. For years greater than 2017, create year folder with regex and change year to 2017 in datetime column
# find and return 4 digit number (1st finding) in dataframe string columns
year_col = _col + 'Year'
_df[year_col] = _df[_col].apply(lambda x: int(re.findall(r"\D(\d{4})\D", " "+ str(x) +" ")[0]))
years = sorted(list(_df[year_col].unique()))
yearsGreaterThan2017 = sorted(i for i in years if i > 2017)
# Two ways for strange year data (1) change it to 2017 temporarily (2) remove from data; we will go with (1)
# because we cannot remove test rows anyway
if isDelete:
_df = _df[~_df[year_col].isin(yearsGreaterThan2017)]
else:
for i in yearsGreaterThan2017:
print("replace ", i, " to 2017 for conversion")
_df.loc[_df[year_col] == i, _col] = _df[_df[year_col] == i][_col].values[0].replace(str(i), "2017")
# How to remove strange year rows
# train = train[~train['year'].isin(yearsGreaterThan2017)]
# 2. Convert string to datetime
_df[_col] = pd.to_datetime(_df[_col])
print(_col, "column conversion to datetime type is done")
# 3. Add more date time features
month_col = _col + 'Month'
week_col = _col + 'Week'
weekday_col = _col + 'Weekday'
day_col = _col + 'Day'
hour_col = _col + 'Hour'
#year_month_day_col = _col + 'YearMonthDay'
#year_month_day_hour_col = _col + 'YearMonthDayHour'
_df[month_col] = pd.DatetimeIndex(_df[_col]).month
_df[week_col] = pd.DatetimeIndex(_df[_col]).week
_df[weekday_col] = pd.DatetimeIndex(_df[_col]).weekday
_df[day_col] = pd.DatetimeIndex(_df[_col]).day
_df[hour_col] = pd.DatetimeIndex(_df[_col]).hour
#_df[year_month_day_col] = _df[[year_col, month_col, day_col]].apply(lambda x: ''.join(str(x)), axis=1)
#_df[year_month_day_hour_col] = _df[[year_col, month_col, day_col, hour_col]].apply(lambda x: ''.join(str(x)), axis=1)
print("year, month, week, weekday, day, hour features are added")
return _df
# Delete rows with list condition for dataframe
def delRows(_df, _col, _list):
_df = _df[~_df[_col].isin(_list)]
return _df
import re
# Create new column using regex pattern for strings for dataframe
def addFeatureRegex(_df, _col, _newCol):
_df[_newCol] = _df[_col].apply(lambda x: int(re.findall(r"\D(\d{4})\D", " "+ str(x) +" ")[0]))
return _df
# Convert string to datetime type
def stringToDatetime(_df, _col):
_df[_col] = _df[_col].astype('datetime64[ns]')
return _df
# Add features from datetime
def addDatetimeFeatures(_df, _col):
_df[_col + 'Year'] = pd.DatetimeIndex(_df[_col]).year
_df[_col + 'Month'] = pd.DatetimeIndex(_df[_col]).month
_df[_col + 'Week'] = | pd.DatetimeIndex(_df[_col]) | pandas.DatetimeIndex |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division
import os
import copy
import unittest
import csv
import json
import numpy as np
import pandas as pd
from multiprocessing import set_start_method
from sklearn.exceptions import NotFittedError
from pymatgen import Structure, Lattice, Molecule
from pymatgen.util.testing import PymatgenTest
from matminer.featurizers.composition import ElementProperty
from matminer.featurizers.site import SiteElementalProperty
from matminer.featurizers.structure import DensityFeatures, \
RadialDistributionFunction, PartialRadialDistributionFunction, \
ElectronicRadialDistributionFunction, \
MinimumRelativeDistances, SiteStatsFingerprint, CoulombMatrix, \
SineCoulombMatrix, OrbitalFieldMatrix, GlobalSymmetryFeatures, \
EwaldEnergy, BondFractions, BagofBonds, StructuralHeterogeneity, \
MaximumPackingEfficiency, ChemicalOrdering, StructureComposition, \
Dimensionality, XRDPowderPattern, CGCNNFeaturizer, JarvisCFID, \
GlobalInstabilityIndex, \
StructuralComplexity
# For the CGCNNFeaturizer
try:
import torch
import cgcnn
except ImportError:
torch, cgcnn = None, None
test_dir = os.path.join(os.path.dirname(__file__))
class StructureFeaturesTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.diamond_no_oxi = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C", "C"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012],
[0, 0, 4.025]]), ["Na1+", "Cl1-"], [[0, 0, 0],
[2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[2.105, 2.1045, 2.1045], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.ni3al = Structure(
Lattice([[3.52, 0, 0], [0, 3.52, 0], [0, 0, 3.52]]),
["Al", ] + ["Ni"] * 3,
[[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.sc = Structure(Lattice([[3.52, 0, 0], [0, 3.52, 0], [0, 0, 3.52]]),
["Al"], [[0, 0, 0]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False)
self.bond_angles = range(5, 180, 5)
def test_density_features(self):
df = DensityFeatures()
f = df.featurize(self.diamond)
self.assertAlmostEqual(f[0], 3.49, 2)
self.assertAlmostEqual(f[1], 5.71, 2)
self.assertAlmostEqual(f[2], 0.25, 2)
f = df.featurize(self.nacl)
self.assertAlmostEqual(f[0], 2.105, 2)
self.assertAlmostEqual(f[1], 23.046, 2)
self.assertAlmostEqual(f[2], 0.620, 2)
nacl_disordered = copy.deepcopy(self.nacl)
nacl_disordered.replace_species({"Cl1-": "Cl0.99H0.01"})
self.assertFalse(df.precheck(nacl_disordered))
structures = [self.diamond, self.nacl, nacl_disordered]
df2 = pd.DataFrame({"structure": structures})
self.assertAlmostEqual(df.precheck_dataframe(df2, "structure"), 2 / 3)
def test_global_symmetry(self):
gsf = GlobalSymmetryFeatures()
self.assertEqual(gsf.featurize(self.diamond), [227, "cubic", 1, True])
def test_dimensionality(self):
cscl = PymatgenTest.get_structure("CsCl")
df = Dimensionality(bonds={("Cs", "Cl"): 3.5})
self.assertEqual(df.featurize(cscl)[0], 1)
df = Dimensionality(bonds={("Cs", "Cl"): 3.7})
self.assertEqual(df.featurize(cscl)[0], 3)
def test_rdf_and_peaks(self):
## Test diamond
rdforig = RadialDistributionFunction().featurize(
self.diamond)
rdf = rdforig[0]
# Make sure it the last bin is cutoff-bin_max
self.assertAlmostEqual(max(rdf['distances']), 19.9)
# Verify bin sizes
self.assertEqual(len(rdf['distribution']), 200)
# Make sure it gets all of the peaks
self.assertEqual(np.count_nonzero(rdf['distribution']), 116)
# Check the values for a few individual peaks
self.assertAlmostEqual(
rdf['distribution'][int(round(1.5 / 0.1))], 15.12755155)
self.assertAlmostEqual(
rdf['distribution'][int(round(2.9 / 0.1))], 12.53193948)
self.assertAlmostEqual(
rdf['distribution'][int(round(19.9 / 0.1))], 0.822126129)
# Repeat test with NaCl (omitting comments). Altering cutoff distance
rdforig = RadialDistributionFunction(cutoff=10).featurize(self.nacl)
rdf = rdforig[0]
self.assertAlmostEqual(max(rdf['distances']), 9.9)
self.assertEqual(len(rdf['distribution']), 100)
self.assertEqual(np.count_nonzero(rdf['distribution']), 11)
self.assertAlmostEqual(
rdf['distribution'][int(round(2.8 / 0.1))], 27.09214168)
self.assertAlmostEqual(
rdf['distribution'][int(round(4.0 / 0.1))], 26.83338723)
self.assertAlmostEqual(
rdf['distribution'][int(round(9.8 / 0.1))], 3.024406467)
# Repeat test with CsCl. Altering cutoff distance and bin_size
rdforig = RadialDistributionFunction(
cutoff=8, bin_size=0.5).featurize(self.cscl)
rdf = rdforig[0]
self.assertAlmostEqual(max(rdf['distances']), 7.5)
self.assertEqual(len(rdf['distribution']), 16)
self.assertEqual(np.count_nonzero(rdf['distribution']), 5)
self.assertAlmostEqual(
rdf['distribution'][int(round(3.5 / 0.5))], 6.741265585)
self.assertAlmostEqual(
rdf['distribution'][int(round(4.0 / 0.5))], 3.937582548)
self.assertAlmostEqual(
rdf['distribution'][int(round(7.0 / 0.5))], 1.805505363)
def test_prdf(self):
# Test a few peaks in diamond
# These expected numbers were derived by performing
# the calculation in another code
distances, prdf = PartialRadialDistributionFunction().compute_prdf(self.diamond)
self.assertEqual(len(prdf.values()), 1)
self.assertAlmostEqual(prdf[('C', 'C')][int(round(1.4 / 0.1))], 0)
self.assertAlmostEqual(prdf[('C', 'C')][int(round(1.5 / 0.1))], 1.32445167622)
self.assertAlmostEqual(max(distances), 19.9)
self.assertAlmostEqual(prdf[('C', 'C')][int(round(19.9 / 0.1))], 0.07197902)
# Test a few peaks in CsCl, make sure it gets all types correctly
distances, prdf = PartialRadialDistributionFunction(cutoff=10).compute_prdf(self.cscl)
self.assertEqual(len(prdf.values()), 4)
self.assertAlmostEqual(max(distances), 9.9)
self.assertAlmostEqual(prdf[('Cs', 'Cl')][int(round(3.6 / 0.1))], 0.477823197)
self.assertAlmostEqual(prdf[('Cl', 'Cs')][int(round(3.6 / 0.1))], 0.477823197)
self.assertAlmostEqual(prdf[('Cs', 'Cs')][int(round(3.6 / 0.1))], 0)
# Do Ni3Al, make sure it captures the antisymmetry of Ni/Al sites
distances, prdf = PartialRadialDistributionFunction(cutoff=10, bin_size=0.5)\
.compute_prdf(self.ni3al)
self.assertEqual(len(prdf.values()), 4)
self.assertAlmostEqual(prdf[('Ni', 'Al')][int(round(2 / 0.5))], 0.125236677)
self.assertAlmostEqual(prdf[('Al', 'Ni')][int(round(2 / 0.5))], 0.37571003)
self.assertAlmostEqual(prdf[('Al', 'Al')][int(round(2 / 0.5))], 0)
# Check the fit operation
featurizer = PartialRadialDistributionFunction()
featurizer.fit([self.diamond, self.cscl, self.ni3al])
self.assertEqual({'Cs', 'Cl', 'C', 'Ni', 'Al'}, set(featurizer.elements_))
featurizer.exclude_elems = ['Cs', 'Al']
featurizer.fit([self.diamond, self.cscl, self.ni3al])
self.assertEqual({'Cl', 'C', 'Ni'}, set(featurizer.elements_))
featurizer.include_elems = ['H']
featurizer.fit([self.diamond, self.cscl, self.ni3al])
self.assertEqual({'H', 'Cl', 'C', 'Ni'}, set(featurizer.elements_))
# Check the feature labels
featurizer.exclude_elems = ()
featurizer.include_elems = ()
featurizer.elements_ = ['Al', 'Ni']
labels = featurizer.feature_labels()
n_bins = len(featurizer._make_bins()) - 1
self.assertEqual(3 * n_bins, len(labels))
self.assertIn('Al-Ni PRDF r=0.00-0.10', labels)
# Check the featurize method
featurizer.elements_ = ['C']
features = featurizer.featurize(self.diamond)
prdf = featurizer.compute_prdf(self.diamond)[1]
self.assertArrayAlmostEqual(features, prdf[('C', 'C')])
# Check the featurize_dataframe
df = pd.DataFrame.from_dict({"structure": [self.diamond, self.cscl]})
featurizer.fit(df["structure"])
df = featurizer.featurize_dataframe(df, col_id="structure")
self.assertEqual(df["Cs-Cl PRDF r=0.00-0.10"][0], 0.0)
self.assertAlmostEqual(df["Cl-Cl PRDF r=19.70-19.80"][1], 0.049, 3)
self.assertEqual(df["Cl-Cl PRDF r=19.90-20.00"][0], 0.0)
# Make sure labels and features are in the same order
featurizer.elements_ = ['Al', 'Ni']
features = featurizer.featurize(self.ni3al)
labels = featurizer.feature_labels()
prdf = featurizer.compute_prdf(self.ni3al)[1]
self.assertEqual((n_bins * 3,), features.shape)
self.assertTrue(labels[0].startswith('Al-Al'))
self.assertTrue(labels[n_bins].startswith('Al-Ni'))
self.assertTrue(labels[2 * n_bins].startswith('Ni-Ni'))
self.assertArrayAlmostEqual(features, np.hstack(
[prdf[('Al', 'Al')], prdf[('Al', 'Ni')], prdf[('Ni', 'Ni')]]))
def test_redf(self):
d = ElectronicRadialDistributionFunction().featurize(
self.diamond)[0]
self.assertAlmostEqual(int(1000 * d["distances"][0]), 25)
self.assertAlmostEqual(int(1000 * d["distribution"][0]), 0)
self.assertAlmostEqual(int(1000 * d["distances"][len(
d["distances"]) - 1]), 6175)
self.assertAlmostEqual(int(1000 * d["distribution"][len(
d["distances"]) - 1]), 0)
d = ElectronicRadialDistributionFunction().featurize(
self.nacl)[0]
self.assertAlmostEqual(int(1000 * d["distances"][0]), 25)
self.assertAlmostEqual(int(1000 * d["distribution"][0]), 0)
self.assertAlmostEqual(int(1000 * d["distances"][56]), 2825)
self.assertAlmostEqual(int(1000 * d["distribution"][56]), -2108)
self.assertAlmostEqual(int(1000 * d["distances"][len(
d["distances"]) - 1]), 9875)
d = ElectronicRadialDistributionFunction().featurize(
self.cscl)[0]
self.assertAlmostEqual(int(1000 * d["distances"][0]), 25)
self.assertAlmostEqual(int(1000 * d["distribution"][0]), 0)
self.assertAlmostEqual(int(1000 * d["distances"][72]), 3625)
self.assertAlmostEqual(int(1000 * d["distribution"][72]), -2194)
self.assertAlmostEqual(int(1000 * d["distances"][len(
d["distances"]) - 1]), 7275)
def test_coulomb_matrix(self):
# flat
cm = CoulombMatrix(flatten=True)
df = pd.DataFrame({"s": [self.diamond, self.nacl]})
with self.assertRaises(NotFittedError):
df = cm.featurize_dataframe(df, "s")
df = cm.fit_featurize_dataframe(df, "s")
labels = cm.feature_labels()
self.assertListEqual(labels,
["coulomb matrix eig 0", "coulomb matrix eig 1"])
self.assertArrayAlmostEqual(df[labels].iloc[0],
[49.169453, 24.546758],
decimal=5)
self.assertArrayAlmostEqual(df[labels].iloc[1],
[153.774731, 452.894322],
decimal=5)
# matrix
species = ["C", "C", "H", "H"]
coords = [[0, 0, 0], [0, 0, 1.203], [0, 0, -1.06], [0, 0, 2.263]]
acetylene = Molecule(species, coords)
morig = CoulombMatrix(flatten=False).featurize(acetylene)
mtarget = [[36.858, 15.835391290, 2.995098235, 1.402827813], \
[15.835391290, 36.858, 1.4028278132103624, 2.9950982], \
[2.9368896127, 1.402827813, 0.5, 0.159279959], \
[1.4028278132, 2.995098235, 0.159279959, 0.5]]
self.assertAlmostEqual(
int(np.linalg.norm(morig - np.array(mtarget))), 0)
m = CoulombMatrix(diag_elems=False,
flatten=False).featurize(acetylene)[0]
self.assertAlmostEqual(m[0][0], 0.0)
self.assertAlmostEqual(m[1][1], 0.0)
self.assertAlmostEqual(m[2][2], 0.0)
self.assertAlmostEqual(m[3][3], 0.0)
def test_sine_coulomb_matrix(self):
# flat
scm = SineCoulombMatrix(flatten=True)
df = pd.DataFrame({"s": [self.sc, self.ni3al]})
with self.assertRaises(NotFittedError):
df = scm.featurize_dataframe(df, "s")
df = scm.fit_featurize_dataframe(df, "s")
labels = scm.feature_labels()
self.assertEqual(labels[0], "sine coulomb matrix eig 0")
self.assertArrayAlmostEqual(
df[labels].iloc[0],
[235.740418, 0.0, 0.0, 0.0],
decimal=5)
self.assertArrayAlmostEqual(
df[labels].iloc[1],
[232.578562, 1656.288171, 1403.106576, 1403.106576],
decimal=5)
# matrix
scm = SineCoulombMatrix(flatten=False)
sin_mat = scm.featurize(self.diamond)
mtarget = [[36.8581, 6.147068], [6.147068, 36.8581]]
self.assertAlmostEqual(
np.linalg.norm(sin_mat - np.array(mtarget)), 0.0, places=4)
scm = SineCoulombMatrix(diag_elems=False, flatten=False)
sin_mat = scm.featurize(self.diamond)[0]
self.assertEqual(sin_mat[0][0], 0)
self.assertEqual(sin_mat[1][1], 0)
def test_orbital_field_matrix(self):
ofm_maker = OrbitalFieldMatrix(flatten=False)
ofm = ofm_maker.featurize(self.diamond)[0]
mtarget = np.zeros((32, 32))
mtarget[1][1] = 1.4789015 # 1.3675444
mtarget[1][3] = 1.4789015 # 1.3675444
mtarget[3][1] = 1.4789015 # 1.3675444
mtarget[3][3] = 1.4789015 # 1.3675444 if for a coord# of exactly 4
for i in range(32):
for j in range(32):
if not i in [1, 3] and not j in [1, 3]:
self.assertEqual(ofm[i, j], 0.0)
mtarget = np.matrix(mtarget)
self.assertAlmostEqual(
np.linalg.norm(ofm - mtarget), 0.0, places=4)
ofm_maker = OrbitalFieldMatrix(True, flatten=False)
ofm = ofm_maker.featurize(self.diamond)[0]
mtarget = np.zeros((39, 39))
mtarget[1][1] = 1.4789015
mtarget[1][3] = 1.4789015
mtarget[3][1] = 1.4789015
mtarget[3][3] = 1.4789015
mtarget[1][33] = 1.4789015
mtarget[3][33] = 1.4789015
mtarget[33][1] = 1.4789015
mtarget[33][3] = 1.4789015
mtarget[33][33] = 1.4789015
mtarget = np.matrix(mtarget)
self.assertAlmostEqual(
np.linalg.norm(ofm - mtarget), 0.0, places=4)
ofm_flat = OrbitalFieldMatrix(period_tag=False, flatten=True)
self.assertEqual(len(ofm_flat.feature_labels()), 1024)
ofm_flat = OrbitalFieldMatrix(period_tag=True, flatten=True)
self.assertEqual(len(ofm_flat.feature_labels()), 1521)
ofm_vector = ofm_flat.featurize(self.diamond)
for ix in [40, 42, 72, 118, 120, 150, 1288, 1320]:
self.assertAlmostEqual(ofm_vector[ix], 1.4789015345821415)
def test_min_relative_distances(self):
self.assertAlmostEqual(MinimumRelativeDistances().featurize(
self.diamond_no_oxi)[0][0], 1.1052576)
self.assertAlmostEqual(MinimumRelativeDistances().featurize(
self.nacl)[0][0], 0.8891443)
self.assertAlmostEqual(MinimumRelativeDistances().featurize(
self.cscl)[0][0], 0.9877540)
def test_sitestatsfingerprint(self):
# Test matrix.
op_struct_fp = SiteStatsFingerprint.from_preset("OPSiteFingerprint",
stats=None)
opvals = op_struct_fp.featurize(self.diamond)
oplabels = op_struct_fp.feature_labels()
self.assertAlmostEqual(opvals[10][0], 0.9995, places=7)
self.assertAlmostEqual(opvals[10][1], 0.9995, places=7)
opvals = op_struct_fp.featurize(self.nacl)
self.assertAlmostEqual(opvals[18][0], 0.9995, places=7)
self.assertAlmostEqual(opvals[18][1], 0.9995, places=7)
opvals = op_struct_fp.featurize(self.cscl)
self.assertAlmostEqual(opvals[22][0], 0.9995, places=7)
self.assertAlmostEqual(opvals[22][1], 0.9995, places=7)
# Test stats.
op_struct_fp = SiteStatsFingerprint.from_preset("OPSiteFingerprint")
opvals = op_struct_fp.featurize(self.diamond)
print(opvals, '**')
self.assertAlmostEqual(opvals[0], 0.0005, places=7)
self.assertAlmostEqual(opvals[1], 0, places=7)
self.assertAlmostEqual(opvals[2], 0.0005, places=7)
self.assertAlmostEqual(opvals[3], 0.0, places=7)
self.assertAlmostEqual(opvals[4], 0.0005, places=7)
self.assertAlmostEqual(opvals[18], 0.0805, places=7)
self.assertAlmostEqual(opvals[20], 0.9995, places=7)
self.assertAlmostEqual(opvals[21], 0, places=7)
self.assertAlmostEqual(opvals[22], 0.0075, places=7)
self.assertAlmostEqual(opvals[24], 0.2355, places=7)
self.assertAlmostEqual(opvals[-1], 0.0, places=7)
# Test coordination number
cn_fp = SiteStatsFingerprint.from_preset("JmolNN", stats=("mean",))
cn_vals = cn_fp.featurize(self.diamond)
self.assertEqual(cn_vals[0], 4.0)
# Test the covariance
prop_fp = SiteStatsFingerprint(SiteElementalProperty(properties=["Number", "AtomicWeight"]),
stats=["mean"], covariance=True)
# Test the feature labels
labels = prop_fp.feature_labels()
self.assertEqual(3, len(labels))
# Test a structure with all the same type (cov should be zero)
features = prop_fp.featurize(self.diamond)
self.assertArrayAlmostEqual(features, [6, 12.0107, 0])
# Test a structure with only one atom (cov should be zero too)
features = prop_fp.featurize(self.sc)
self.assertArrayAlmostEqual([13, 26.9815386, 0], features)
# Test a structure with nonzero covariance
features = prop_fp.featurize(self.nacl)
self.assertArrayAlmostEqual([14, 29.22138464, 37.38969216], features)
def test_ewald(self):
# Add oxidation states to all of the structures
for s in [self.nacl, self.cscl, self.diamond]:
s.add_oxidation_state_by_guess()
# Test basic
ewald = EwaldEnergy(accuracy=2)
self.assertArrayAlmostEqual(ewald.featurize(self.diamond), [0])
self.assertAlmostEqual(ewald.featurize(self.nacl)[0], -8.84173626, 2)
self.assertLess(ewald.featurize(self.nacl),
ewald.featurize(self.cscl)) # Atoms are closer in NaCl
# Perform Ewald summation by "hand",
# Using the result from GULP
self.assertArrayAlmostEqual([-8.84173626], ewald.featurize(self.nacl), 2)
def test_bondfractions(self):
# Test individual structures with featurize
bf_md = BondFractions.from_preset("MinimumDistanceNN")
bf_md.no_oxi = True
bf_md.fit([self.diamond_no_oxi])
self.assertArrayEqual(bf_md.featurize(self.diamond), [1.0])
self.assertArrayEqual(bf_md.featurize(self.diamond_no_oxi), [1.0])
bf_voronoi = BondFractions.from_preset("VoronoiNN")
bf_voronoi.bbv = float("nan")
bf_voronoi.fit([self.nacl])
bond_fracs = bf_voronoi.featurize(self.nacl)
bond_names = bf_voronoi.feature_labels()
ref = {'Na+ - Na+ bond frac.': 0.25, 'Cl- - Na+ bond frac.': 0.5,
'Cl- - Cl- bond frac.': 0.25}
self.assertDictEqual(dict(zip(bond_names, bond_fracs)), ref)
# Test to make sure dataframe behavior is as intended
s_list = [self.diamond_no_oxi, self.ni3al]
df = pd.DataFrame.from_dict({'s': s_list})
bf_voronoi.fit(df['s'])
df = bf_voronoi.featurize_dataframe(df, 's')
# Ensure all data is properly labelled and organized
self.assertArrayEqual(df['C - C bond frac.'].as_matrix(), [1.0, np.nan])
self.assertArrayEqual(df['Al - Ni bond frac.'].as_matrix(), [np.nan, 0.5])
self.assertArrayEqual(df['Al - Al bond frac.'].as_matrix(), [np.nan, 0.0])
self.assertArrayEqual(df['Ni - Ni bond frac.'].as_matrix(), [np.nan, 0.5])
# Test to make sure bad_bond_values (bbv) are still changed correctly
# and check inplace behavior of featurize dataframe.
bf_voronoi.bbv = 0.0
df = pd.DataFrame.from_dict({'s': s_list})
df = bf_voronoi.featurize_dataframe(df, 's')
self.assertArrayEqual(df['C - C bond frac.'].as_matrix(), [1.0, 0.0])
self.assertArrayEqual(df['Al - Ni bond frac.'].as_matrix(), [0.0, 0.5])
self.assertArrayEqual(df['Al - Al bond frac.'].as_matrix(), [0.0, 0.0])
self.assertArrayEqual(df['Ni - Ni bond frac.'].as_matrix(), [0.0, 0.5])
def test_bob(self):
# Test a single fit and featurization
scm = SineCoulombMatrix(flatten=False)
bob = BagofBonds(coulomb_matrix=scm, token=' - ')
bob.fit([self.ni3al])
truth1 = [235.74041833262768, 1486.4464890775491, 1486.4464890775491,
1486.4464890775491, 38.69353092306119, 38.69353092306119,
38.69353092306119, 38.69353092306119, 38.69353092306119,
38.69353092306119, 83.33991275736257, 83.33991275736257,
83.33991275736257, 83.33991275736257, 83.33991275736257,
83.33991275736257]
truth1_labels = ['Al site #0', 'Ni site #0', 'Ni site #1', 'Ni site #2',
'Al - Ni bond #0', 'Al - Ni bond #1',
'Al - Ni bond #2', 'Al - Ni bond #3',
'Al - Ni bond #4', 'Al - Ni bond #5',
'Ni - Ni bond #0', 'Ni - Ni bond #1',
'Ni - Ni bond #2', 'Ni - Ni bond #3',
'Ni - Ni bond #4', 'Ni - Ni bond #5']
self.assertAlmostEqual(bob.featurize(self.ni3al), truth1)
self.assertEqual(bob.feature_labels(), truth1_labels)
# Test padding from fitting and dataframe featurization
bob.coulomb_matrix = CoulombMatrix(flatten=False)
bob.fit([self.ni3al, self.cscl, self.diamond_no_oxi])
df = pd.DataFrame({'structures': [self.cscl]})
df = bob.featurize_dataframe(df, 'structures')
self.assertEqual(len(df.columns.values), 25)
self.assertAlmostEqual(df['Cs site #0'][0], 7513.468312122532)
self.assertAlmostEqual(df['Al site #0'][0], 0.0)
self.assertAlmostEqual(df['Cs - Cl bond #1'][0], 135.74726437398044)
self.assertAlmostEqual(df['Al - Ni bond #0'][0], 0.0)
# Test error handling for bad fits or null fits
bob = BagofBonds(CoulombMatrix(flatten=False))
self.assertRaises(NotFittedError, bob.featurize, self.nacl)
bob.fit([self.ni3al, self.diamond])
self.assertRaises(ValueError, bob.featurize, self.nacl)\
def test_ward_prb_2017_lpd(self):
"""Test the local property difference attributes from Ward 2017"""
f = SiteStatsFingerprint.from_preset(
"LocalPropertyDifference_ward-prb-2017"
)
# Test diamond
features = f.featurize(self.diamond)
self.assertArrayAlmostEqual(features, [0] * (22 * 5))
features = f.featurize(self.diamond_no_oxi)
self.assertArrayAlmostEqual(features, [0] * (22 * 5))
# Test CsCl
big_face_area = np.sqrt(3) * 3 / 2 * (2 / 4 / 4)
small_face_area = 0.125
big_face_diff = 55 - 17
features = f.featurize(self.cscl)
labels = f.feature_labels()
my_label = 'mean local difference in Number'
self.assertAlmostEqual((8 * big_face_area * big_face_diff) /
(8 * big_face_area + 6 * small_face_area),
features[labels.index(my_label)], places=3)
my_label = 'range local difference in Electronegativity'
self.assertAlmostEqual(0, features[labels.index(my_label)], places=3)
def test_ward_prb_2017_efftcn(self):
"""Test the effective coordination number attributes of Ward 2017"""
f = SiteStatsFingerprint.from_preset(
"CoordinationNumber_ward-prb-2017"
)
# Test Ni3Al
features = f.featurize(self.ni3al)
labels = f.feature_labels()
my_label = 'mean CN_VoronoiNN'
self.assertAlmostEqual(12, features[labels.index(my_label)])
self.assertArrayAlmostEqual([12, 12, 0, 12, 0], features)
def test_ward_prb_2017_strhet(self):
f = StructuralHeterogeneity()
# Test Ni3Al, which is uniform
features = f.featurize(self.ni3al)
self.assertArrayAlmostEqual([0, 1, 1, 0, 0, 0, 0, 0, 0], features)
# Do CsCl, which has variation in the neighbors
big_face_area = np.sqrt(3) * 3 / 2 * (2 / 4 / 4)
small_face_area = 0.125
average_dist = (8 * np.sqrt(
3) / 2 * big_face_area + 6 * small_face_area) \
/ (8 * big_face_area + 6 * small_face_area)
rel_var = (8 * abs(np.sqrt(3) / 2 - average_dist) * big_face_area +
6 * abs(1 - average_dist) * small_face_area) \
/ (8 * big_face_area + 6 * small_face_area) / average_dist
cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[0.5, 0.5, 0.5], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
features = f.featurize(cscl)
self.assertArrayAlmostEqual(
[0, 1, 1, rel_var, rel_var, 0, rel_var, 0, 0],
features)
def test_packing_efficiency(self):
f = MaximumPackingEfficiency()
# Test L1_2
self.assertArrayAlmostEqual([np.pi / 3 / np.sqrt(2)],
f.featurize(self.ni3al))
# Test B1
self.assertArrayAlmostEqual([np.pi / 6], f.featurize(self.nacl),
decimal=3)
def test_ordering_param(self):
f = ChemicalOrdering()
# Check that elemental structures return zero
features = f.featurize(self.diamond)
self.assertArrayAlmostEqual([0, 0, 0], features)
# Check result for CsCl
# These were calculated by hand by <NAME>
features = f.featurize(self.cscl)
self.assertAlmostEqual(0.551982, features[0], places=5)
self.assertAlmostEqual(0.241225, features[1], places=5)
# Check for L1_2
features = f.featurize(self.ni3al)
self.assertAlmostEqual(1./3., features[0], places=5)
self.assertAlmostEqual(0.0303, features[1], places=5)
def test_composition_features(self):
comp = ElementProperty.from_preset("magpie")
f = StructureComposition(featurizer=comp)
# Test the fitting (should not crash)
f.fit([self.nacl, self.diamond])
# Test the features
features = f.featurize(self.nacl)
self.assertArrayAlmostEqual(comp.featurize(self.nacl.composition),
features)
# Test the citations/implementors
self.assertEqual(comp.citations(), f.citations())
self.assertEqual(comp.implementors(), f.implementors())
def test_xrd_powderPattern(self):
# default settings test
xpp = XRDPowderPattern()
pattern = xpp.featurize(self.diamond)
self.assertAlmostEqual(pattern[44], 0.19378, places=2)
self.assertEqual(len(pattern), 128)
# reduced range
xpp = XRDPowderPattern(two_theta_range=(0, 90))
pattern = xpp.featurize(self.diamond)
self.assertAlmostEqual(pattern[44], 0.4083, places=2)
self.assertEqual(len(pattern), 91)
self.assertEqual(len(xpp.feature_labels()), 91)
@unittest.skipIf(not (torch and cgcnn),
"pytorch or cgcnn not installed.")
def test_cgcnn_featurizer(self):
# Test regular classification.
cla_props, cla_atom_features, cla_structs = self._get_cgcnn_data()
atom_fea_len = 64
cgcnn_featurizer = \
CGCNNFeaturizer(atom_init_fea=cla_atom_features,
train_size=5, val_size=2, test_size=3,
atom_fea_len=atom_fea_len)
cgcnn_featurizer.fit(X=cla_structs, y=cla_props)
self.assertEqual(len(cgcnn_featurizer.feature_labels()), atom_fea_len)
state_dict = cgcnn_featurizer.model.state_dict()
self.assertEqual(state_dict['embedding.weight'].size(),
torch.Size([64, 92]))
self.assertEqual(state_dict['embedding.bias'].size(),
torch.Size([64]))
self.assertEqual(state_dict['convs.0.fc_full.weight'].size(),
torch.Size([128, 169]))
self.assertEqual(state_dict['convs.1.bn1.weight'].size(),
torch.Size([128]))
self.assertEqual(state_dict['convs.2.bn2.bias'].size(),
torch.Size([64]))
self.assertEqual(state_dict['conv_to_fc.weight'].size(),
torch.Size([128, 64]))
self.assertEqual(state_dict['fc_out.weight'].size(),
torch.Size([2, 128]))
for struct in cla_structs:
result = cgcnn_featurizer.featurize(struct)
self.assertEqual(len(result), atom_fea_len)
# Test regular regression and default atom_init_fea and featurize_many.
reg_props, reg_atom_features, reg_structs = \
self._get_cgcnn_data("regression")
cgcnn_featurizer = \
CGCNNFeaturizer(task="regression", atom_fea_len=atom_fea_len,
train_size=6, val_size=2, test_size=2)
cgcnn_featurizer.fit(X=reg_structs, y=reg_props)
cgcnn_featurizer.set_n_jobs(1)
result = cgcnn_featurizer.featurize_many(entries=reg_structs)
self.assertEqual(np.array(result).shape,
(len(reg_structs), atom_fea_len))
# Test classification from pre-trained model.
cgcnn_featurizer = \
CGCNNFeaturizer(h_fea_len=32, n_conv=4,
pretrained_name='semi-metal-classification',
atom_init_fea=cla_atom_features, train_size=5,
val_size=2, test_size=3, atom_fea_len=atom_fea_len)
cgcnn_featurizer.fit(X=cla_structs, y=cla_props)
self.assertEqual(len(cgcnn_featurizer.feature_labels()), atom_fea_len)
validate_features = [2.1295, 2.1288, 1.8504, 1.9175, 2.1094,
1.7770, 2.0471, 1.7426, 1.7288, 1.7770]
for struct, validate_feature in zip(cla_structs, validate_features):
result = cgcnn_featurizer.featurize(struct)
self.assertEqual(len(result), atom_fea_len)
self.assertAlmostEqual(result[0], validate_feature, 4)
# Test regression from pre-trained model.
cgcnn_featurizer = \
CGCNNFeaturizer(task="regression", h_fea_len=32, n_conv=4,
pretrained_name='formation-energy-per-atom',
atom_init_fea=reg_atom_features,
train_size=5, val_size=2, test_size=3,
atom_fea_len=atom_fea_len)
cgcnn_featurizer.fit(X=reg_structs, y=reg_props)
self.assertEqual(len(cgcnn_featurizer.feature_labels()), atom_fea_len)
validate_features = [1.6871, 1.5679, 1.5316, 1.6419, 1.6031,
1.4333, 1.5709, 1.5070, 1.5038, 1.4333]
for struct, validate_feature in zip(reg_structs, validate_features):
result = cgcnn_featurizer.featurize(struct)
self.assertEqual(len(result), atom_fea_len)
self.assertAlmostEqual(result[-1], validate_feature, 4)
# Test warm start regression.
warm_start_file = os.path.join(test_dir,
'cgcnn_test_regression_model.pth.tar')
warm_start_model = torch.load(warm_start_file)
self.assertEqual(warm_start_model['epoch'], 31)
self.assertEqual(warm_start_model['best_epoch'], 9)
self.assertAlmostEqual(warm_start_model['best_mae_error'].numpy(),
2.3700, 4)
cgcnn_featurizer = \
CGCNNFeaturizer(task="regression", warm_start_file=warm_start_file,
epochs=100, atom_fea_len=atom_fea_len,
atom_init_fea=reg_atom_features,
train_size=6, val_size=2, test_size=2)
cgcnn_featurizer.fit(X=reg_structs, y=reg_props)
# If use CGCNN featurize_many(), you should change the multiprocessing
# start_method to 'spawn', because Gloo (that uses Infiniband) and
# NCCL2 are not fork safe, pytorch don't support them or just
# set n_jobs = 1 to avoid multiprocessing as follows.
set_start_method('spawn', force=True)
result = cgcnn_featurizer.featurize_many(entries=reg_structs)
self.assertEqual(np.array(result).shape,
(len(reg_structs), atom_fea_len))
# Test featurize_dataframe.
df = | pd.DataFrame.from_dict({"structure": cla_structs}) | pandas.DataFrame.from_dict |
import pandas as pd
import networkx as nx
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
#funtions
def degree(G,f):
"""
Adds a column to the dataframe f with the degree of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
degree_dic = nx.degree_centrality(G)
degree_df = pd.DataFrame(data = {'name': list(degree_dic.keys()), 'degree': list(degree_dic.values()) })
f = pd.merge(f, degree_df, on='name')
return f
def centrality(G,f):
"""
Adds a column to the dataframe f with the centrality of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
centrality_dic = nx.degree_centrality(G)
centrality_df = pd.DataFrame(data = {'name': list(centrality_dic.keys()), 'centrality': list(centrality_dic.values()) })
f = pd.merge(f, centrality_df, on='name')
return f
def betweenness(G,f):
"""
Adds a column to the dataframe f with the betweenness of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
betweenness_dic = nx.betweenness_centrality(G)
betweenness_df = pd.DataFrame(data = {'name': list(betweenness_dic.keys()), 'betweenness': list(betweenness_dic.values()) })
f = pd.merge(f, betweenness_df, on='name')
return f
def pagerank(G,f):
"""
Adds a column to the dataframe f with the pagerank of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
pagerank_dic = nx.pagerank(G)
pagerank_df = pd.DataFrame(data = {'name': list(pagerank_dic.keys()), 'pagerank': list(pagerank_dic.values()) })
f = pd.merge(f, pagerank_df, on='name')
return f
def clustering(G,f):
"""
Adds a column to the dataframe f with the clustering coeficient of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
clustering_dic = nx.clustering(G)
clustering_df = pd.DataFrame(data = {'name': list(clustering_dic.keys()), 'clustering': list(clustering_dic.values()) })
f = pd.merge(f, clustering_df, on='name')
return f
def communities_greedy_modularity(G,f):
"""
Adds a column to the dataframe f with the community of each node.
The communitys are detected using greedy modularity.
G: a networkx graph.
f: a pandas dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
communities_dic = nx.algorithms.community.greedy_modularity_communities(G)
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_greedy_modularity': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
f = pd.merge(f, communities_df, on='name')
return f
def communities_label_propagation(G,f):
"""
Adds a column to the dataframe f with the community of each node.
The communitys are detected using glabel propagation.
G: a networkx graph.
f: a pandas dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
communities_gen = nx.algorithms.community.label_propagation_communities(G)
communities_dic = [community for community in communities_gen]
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_label_propagation': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
f = pd.merge(f, communities_df, on='name')
return f
def mean_neighbors(G,f,column,n=1):
"""
Adds a column to the dataframe f with the mean value of its neigbors feature.
G: a networkx graph.
f: a pandas dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
mean_neighbors = np.zeros([f.shape[0]])
matrix = nx.to_numpy_matrix(G)
for e in range(1,n):
matrix += matrix ** e
for i in f.index:
neighbors = matrix[i]>0
mean_neighbors[i] = f[neighbors.tolist()[0]][column].mean()
f["mean_neighbors"] = mean_neighbors
return f
def std_neighbors(G,f,column,n=1):
"""
Adds a column to the dataframe f with the standar desviation value of its neigbors feature.
G: a networkx graph.
f: a pandas dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
std_neighbors = np.zeros([f.shape[0]])
matrix = nx.to_numpy_matrix(G)
for e in range(1,n):
matrix += matrix ** e
for i in f.index:
neighbors = matrix[i]>0
std_neighbors[i] = f[neighbors.tolist()[0]][column].std()
f["std_neighbors"] = std_neighbors
return f
def max_neighbors(G,f,column,n=1):
"""
Adds a column to the dataframe f with the maximum value of its neigbors feature.
G: a networkx graph.
f: a pandas dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
max_neighbors = np.zeros([f.shape[0]])
matrix = nx.to_numpy_matrix(G)
for e in range(1,n):
matrix += matrix ** e
for i in f.index:
neighbors = matrix[i]>0
max_neighbors[i] = f[neighbors.tolist()[0]][column].max()
f["max_neighbors"] = max_neighbors
return f
def min_neighbors(G,f,column,n=1):
"""
Adds a column to the dataframe f with the minimum value of its neigbors feature.
G: a networkx graph.
f: a pandas dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
min_neighbors = np.zeros([f.shape[0]])
matrix = nx.to_numpy_matrix(G)
for e in range(1,n):
matrix += matrix ** e
for i in f.index:
neighbors = matrix[i]>0
min_neighbors[i] = f[neighbors.tolist()[0]][column].min()
f["min_neighbors"] = min_neighbors
return f
def within_module_degree(G,f, column_communities = None, community_method = "label_propagation"):
"""
the within_module_degree calculates: Zi = (ki-ks)/Ss
Ki = number of links between the node i and all the nodes of its cluster
Ks = mean degree of the nodes in cluster s
Ss = the standar desviation of the nodes in cluster s
The within-module degree z-score measures how well-connected node i is to other nodes in the module.
PAPER: <NAME>., & <NAME>. (2005). Functional cartography of complex metabolic networks. nature, 433(7028), 895.
G: a networkx graph.
f: a pandas dataframe.
column_communities: a column of the dataframe with the communities for each node. If None, the communities will be estimated using metodo comunidades.
community_method: method to calculate the communities in the graph G if they are not provided with columna_comunidades.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
if column_communities == None:
if community_method == "label_propagation":
f = communities_label_propagation(G,f)
column_communities = "communities_label_propagation"
elif community_method == "greedy_modularity":
f = communities_greedy_modularity(G,f)
column_communities = "communities_greedy_modularity"
else:
raise ValueError('A clustering method should be provided.')
z_df = pd.DataFrame(data = {'name': [], 'within_module_degree': [] })
for comutnity in set(f[column_communities]):
G2 = G.subgraph(f[f[column_communities] == comutnity]["name"].values)
Ks = 2*len(G2.edges) / len(G2.nodes)
Ss = np.std([i[1] for i in G2.degree()])
z_df = pd.concat([z_df,pd.DataFrame(data = {'name': list(G2.nodes), 'within_module_degree': [(i[1]-Ks)/Ss for i in G2.degree()] }) ])
f = pd.merge(f, z_df, on='name')
return f
def participation_coefficient(G,f, column_communities = None, community_method = "label_propagation"):
"""
the participation_coefficient calculates: Pi = 1- sum_s( (Kis/Kit)^2 )
Kis = number of links between the node i and the nodes of the cluster s
Kit = degree of the node i
The participation coefficient of a node is therefore close to 1 if its links are uniformly distributed among all the modules and 0 if all its links are within its own module.
PAPER: <NAME>., & <NAME>. (2005). Functional cartography of complex metabolic networks. nature, 433(7028), 895.
G: a networkx graph.
f: a pandas dataframe.
columna_comunidades: a column of the dataframe with the communities for each node. If None, the communities will be estimated using metodo comunidades.
metodo_comunidades: method to calculate the communities in the graph G if they are not provided with columna_comunidades.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
if column_communities == None:
if community_method == "label_propagation":
f = communities_label_propagation(G,f)
column_communities = "communities_label_propagation"
elif community_method == "greedy_modularity":
f = communities_greedy_modularity(G,f)
column_communities = "communities_greedy_modularity"
else:
raise ValueError('A clustering method should be provided.')
p_df = pd.DataFrame(data = {'name': f['name'], 'participation_coefficient': [1 for _ in f['name']] })
for node in f['name']:
Kit = len(G.edges(node))
for comutnity in set(f[column_communities]):
Kis = len([edge for edge in G.edges(node) if edge[1] in f[ f[column_communities] == comutnity ]["name"]])
p_df.loc[ p_df["name"] == node, 'participation_coefficient' ] -= ( Kis / Kit ) ** 2
f = pd.merge(f, p_df, on='name')
return f
def node_embeddings(G,f,dim=20, walk_length=16, num_walks=100, workers=2):
"""
Adds the embeddings of the nodes to the dataframe f.
G: a networkx graph.
f: a pandas dataframe.
dim: the dimension of the embedding.
<NAME>., & <NAME>. (2016, August). node2vec: Scalable feature learning for networks. In Proceedings of the 22nd ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 855-864). ACM.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
from node2vec import Node2Vec
node2vec = Node2Vec(G, dimensions=dim, walk_length=walk_length, num_walks=num_walks, workers=workers)
model = node2vec.fit(window=10, min_count=1)
embeddings_df = pd.DataFrame(columns = ['name']+['node_embeddings_'+str(i) for i in range(dim)])
embeddings_df['name'] = f['name']
for name in embeddings_df['name']:
embeddings_df[embeddings_df['name'] == name] = [name] + list(model[str(name)])
f = pd.merge(f, embeddings_df, on='name')
return f
#Transformers
class Dumb(BaseEstimator, TransformerMixin):
def __init__(self,m = 8):
self.m = m
print('a',self.m)
def fit(self, X, y=None):
return self
def transform(self, X):
print('b',self.m)
return X
class Replace(BaseEstimator, TransformerMixin):
def __init__(self, value1,value2):
self.value1 = value1
self.value2 = value2
def fit(self, X, y=None):
return self
def transform(self, X):
return X.replace(self.value1, self.value2, regex=True)
class DropName(BaseEstimator, TransformerMixin):
"""
Drops the "name" column.
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
X_prima = X.drop(['name'],axis=1)
return X_prima
class Graph_fuction(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the result of the function for each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
function: a python function that takes the graph G as input and outpus a column of the same length that the number of nodes in the graph.
column_name: a string with the name of the column
"""
def __init__(self, G, function, column_name = "Graph_fuction"):
self.G = G
self.function = function
self.column_name = column_name
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
column = self.function(G_train)
degree_df = pd.DataFrame(data = {'name': list(G_train.nodes()), self.column_name: column })
X_prima = pd.merge(X, degree_df, on='name')
print(X_prima.columns)
return X_prima
class Graph_features_fuction(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the result of the function for each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
function: a python function that takes the graph G as input and outpus a column of the same length that the number of nodes in the graph.
column_name: a string with the name of the column
"""
def __init__(self, G, function, column_name = "Graph_features_fuction"):
self.G = G
self.function = function
self.column_name = column_name
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
column = self.function(G_train, X)
degree_df = pd.DataFrame(data = {'name': list(G_train.nodes()), self.column_name: column })
X_prima = pd.merge(X, degree_df, on='name')
print(X_prima.columns)
return X_prima
class Degree(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the degree of each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
degree_dic = nx.degree_centrality(G_train)
degree_df = pd.DataFrame(data = {'name': list(degree_dic.keys()), 'degree': list(degree_dic.values()) })
X_prima = pd.merge(X, degree_df, on='name')
return X_prima
class Clustering(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the degree of each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
clustering_dic = nx.clustering(G_train)
clustering_df = pd.DataFrame(data = {'name': list(clustering_dic.keys()), 'clustering': list(clustering_dic.values()) })
X_prima = pd.merge(X, clustering_df, on='name')
return X_prima
class Centrality(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the centrality of each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
centrality_dic = nx.degree_centrality(G_train)
centrality_df = pd.DataFrame(data = {'name': list(centrality_dic.keys()), 'centrality': list(centrality_dic.values()) })
X_prima = pd.merge(X, centrality_df, on='name')
return X_prima
class Betweenness(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the betweenness of each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
betweenness_dic = nx.betweenness_centrality(G_train)
betweenness_df = pd.DataFrame(data = {'name': list(betweenness_dic.keys()), 'betweenness': list(betweenness_dic.values()) })
X_prima = pd.merge(X, betweenness_df, on='name')
return X_prima
class Pagerank(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the pagerank of each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
pagerank_dic = nx.pagerank(G_train)
pagerank_df = pd.DataFrame(data = {'name': list(pagerank_dic.keys()), 'pagerank': list(pagerank_dic.values()) })
X_prima = pd.merge(X, pagerank_df, on='name')
return X_prima
class Communities_greedy_modularity(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the comunity of each node.
The comunitys are detected using greedy modularity.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
communities_dic = nx.algorithms.community.greedy_modularity_communities(G_train)
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_greedy_modularity': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
X_prima = pd.merge(X,communities_df, on='name')
return X_prima
class Communities_label_propagation(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the comunity of each node.
The comunitys are detected using glabel propagation.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
communities_gen = nx.algorithms.community.label_propagation_communities(G_train)
communities_dic = [community for community in communities_gen]
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_label_propagation': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
X_prima = | pd.merge(X,communities_df, on='name') | pandas.merge |
import pandas as pd
import numpy as np
#
# TODO:
# Load up the dataset, setting correct header labels.
#
df = pd.read_csv('Datasets/census.data',
names = ['education', 'age', 'capital-gain', 'race', 'capital-loss', 'hours-per-week', 'sex', 'classification'],
na_values = '?')
# TODO:
# Use basic pandas commands to look through the dataset... get a
# feel for it before proceeding! Do the data-types of each column
# reflect the values you see when you look through the data using
# a text editor / spread sheet program? If you see 'object' where
# you expect to see 'int32' / 'float64', that is a good indicator
# that there is probably a string or missing value in a column.
# use `your_data_frame['your_column'].unique()` to see the unique
# values of each column and identify the rogue values. If these
# should be represented as nans, you can convert them using
# na_values when loading the dataframe.
#
df.info()
#
# TODO:
# Look through your data and identify any potential categorical
# features. Ensure you properly encode any ordinal and nominal
# types using the methods discussed in the chapter.
#
# Be careful! Some features can be represented as either categorical
# or continuous (numerical). If you ever get confused, think to yourself
# what makes more sense generally---to represent such features with a
# continuous numeric type... or a series of categories?
# ordinal
df.education = df.education.astype('category', ordered = True) # , categories = [...]
df.classification = df.classification.astype('category', ordered = True, categories = ['<=50K', '>50K'])
# nominal
df.race = df.race.astype('category') #.cat.codes
df.sex = df.sex.astype('category') #.cat.codes
df.dtypes
# or, for nominal values
df = | pd.get_dummies(df, columns=['race', 'sex']) | pandas.get_dummies |
##############################################################################
#Copyright 2019 Google LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
################################################################################
from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
import warnings
warnings.filterwarnings("ignore")
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from numpy import inf
from sklearn.linear_model import LassoCV, RidgeCV
from sklearn.linear_model import Lasso, Ridge
from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pylab as plt
get_ipython().magic(u'matplotlib inline')
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 10, 6
from sklearn.metrics import classification_report, confusion_matrix
from functools import reduce
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import RepeatedKFold, RepeatedStratifiedKFold
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from autoviml.QuickML_Stacking import QuickML_Stacking
from autoviml.Transform_KM_Features import Transform_KM_Features
from autoviml.QuickML_Ensembling import QuickML_Ensembling
from autoviml.Auto_NLP import Auto_NLP, select_top_features_from_SVD
import xgboost as xgb
import sys
##################################################################################
def find_rare_class(classes, verbose=0):
######### Print the % count of each class in a Target variable #####
"""
Works on Multi Class too. Prints class percentages count of target variable.
It returns the name of the Rare class (the one with the minimum class member count).
This can also be helpful in using it as pos_label in Binary and Multi Class problems.
"""
counts = OrderedDict(Counter(classes))
total = sum(counts.values())
if verbose >= 1:
print(' Class -> Counts -> Percent')
for cls in counts.keys():
print("%6s: % 7d -> % 5.1f%%" % (cls, counts[cls], counts[cls]/total*100))
if type(pd.Series(counts).idxmin())==str:
return pd.Series(counts).idxmin()
else:
return int(pd.Series(counts).idxmin())
###############################################################################
def return_factorized_dict(ls):
"""
###### Factorize any list of values in a data frame using this neat function
if your data has any NaN's it automatically marks it as -1 and returns that for NaN's
Returns a dictionary mapping previous values with new values.
"""
factos = pd.unique(pd.factorize(ls)[0])
categs = pd.unique(pd.factorize(ls)[1])
if -1 in factos:
categs = np.insert(categs,np.where(factos==-1)[0][0],np.nan)
return dict(zip(categs,factos))
#############################################################################################
from sklearn.metrics import confusion_matrix
def balanced_accuracy_score(y_true, y_pred, sample_weight=None,
adjusted=False):
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
with np.errstate(divide='ignore', invalid='ignore'):
per_class = np.diag(C) / C.sum(axis=1)
if np.any(np.isnan(per_class)):
warnings.warn('y_pred contains classes not in y_true')
per_class = per_class[~np.isnan(per_class)]
score = np.mean(per_class)
if adjusted:
n_classes = len(per_class)
chance = 1 / n_classes
score -= chance
score /= 1 - chance
return score
#############################################################################################
import os
def check_if_GPU_exists():
GPU_exists = False
try:
from tensorflow.python.client import device_lib
dev_list = device_lib.list_local_devices()
print('Number of GPUs = %d' %len(dev_list))
for i in range(len(dev_list)):
if 'GPU' == dev_list[i].device_type:
GPU_exists = True
print('%s available' %dev_list[i].device_type)
except:
print('')
if not GPU_exists:
try:
os.environ['NVIDIA_VISIBLE_DEVICES']
print('GPU available on this device')
return True
except:
print('No GPU available on this device')
return False
else:
return True
#############################################################################################
def analyze_problem_type(train, targ,verbose=0):
"""
This module analyzes a Target Variable and finds out whether it is a
Regression or Classification type problem
"""
if train[targ].dtype != 'int64' and train[targ].dtype != float :
if train[targ].dtype == object:
if len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
else:
if len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
elif train[targ].dtype == 'int64' or train[targ].dtype == float :
if len(train[targ].unique()) == 1:
print('Error in data set: Only one class in Target variable. Check input and try again')
sys.exit()
elif len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
elif train[targ].dtype == object:
if len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
elif train[targ].dtype == bool:
model_class = 'Binary_Classification'
elif train[targ].dtype == 'int64':
if len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
else :
model_class = 'Regression'
return model_class
#######
def convert_train_test_cat_col_to_numeric(start_train, start_test, col,str_flag=True):
"""
#### This is the easiest way to label encode object variables in both train and test
#### This takes care of some categories that are present in train and not in test
### and vice versa
"""
start_train = copy.deepcopy(start_train)
start_test = copy.deepcopy(start_test)
missing_flag = False
new_missing_col = ''
if start_train[col].isnull().sum() > 0:
missing_flag = True
if str_flag:
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[col].isnull(),new_missing_col]=1
start_train[col] = start_train[col].fillna("NA", inplace=False).astype(str)
else:
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[col].isnull(),new_missing_col]=1
start_train[col] = start_train[col].fillna("NA", inplace=False).astype('category')
if len(start_train[col].apply(type).value_counts()) > 1:
print(' Alert! Mixed Data Types in Train data set %s column with %d data types. Fixing it...' %(
col, len(start_train[col].apply(type).value_counts())))
train_categs = start_train[col].value_counts().index.tolist()
else:
train_categs = np.unique(start_train[col]).tolist()
if not isinstance(start_test,str) :
if start_test[col].isnull().sum() > 0:
#### IN some rare cases, Test data has missing values while Train data doesn.t
#### This section is take care of those rare cases. We need to create a missing col
#### We need to create that missing flag column in both train and test in that case
if not missing_flag:
missing_flag = True
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
##### THis is to take care of Missing_Flag in start_test data set!!
start_test[new_missing_col] = 0
start_test.loc[start_test[col].isnull(),new_missing_col]=1
if str_flag:
start_test[col] = start_test[col].fillna("NA", inplace=False).astype(str)
else:
start_test[col] = start_test[col].fillna("NA", inplace=False).astype('category')
else:
#### In some rare cases, there is missing values in train but not in test data!
#### In those cases, we need to create a new_missing_col in test data in addition to train
start_test[new_missing_col] = 0
if len(start_test[col].apply(type).value_counts()) > 1:
print(' Alert! Mixed Data Types in Test data set %s column with %d data types. Fixing it...' %(
col, len(start_test[col].apply(type).value_counts())))
test_categs = start_test[col].value_counts().index.tolist()
test_categs = [x if isinstance(x,str) else str(x) for x in test_categs]
start_test[col] = start_test[col].astype(str).values
else:
test_categs = np.unique(start_test[col]).tolist()
if not isinstance(start_test,str) :
categs_all = np.unique( train_categs + test_categs).tolist()
dict_all = return_factorized_dict(categs_all)
else:
dict_all = return_factorized_dict(train_categs)
start_train[col] = start_train[col].map(dict_all)
if not isinstance(start_test,str) :
start_test[col] = start_test[col].map(dict_all)
return start_train, start_test, missing_flag, new_missing_col
#############################################################################################################
def flatten_list(list_of_lists):
final_ls = []
for each_item in list_of_lists:
if isinstance(each_item,list):
final_ls += each_item
else:
final_ls.append(each_item)
return final_ls
#############################################################################################################
import scipy as sp
def Auto_ViML(train, target, test='',sample_submission='',hyper_param='RS', feature_reduction=True,
scoring_parameter='logloss', Boosting_Flag=None, KMeans_Featurizer=False,
Add_Poly=0, Stacking_Flag=False, Binning_Flag=False,
Imbalanced_Flag=False, verbose=0):
"""
#########################################################################################################
############# This is not an Officially Supported Google Product! #########################
#########################################################################################################
#### Automatically Build Variant Interpretable Machine Learning Models (Auto_ViML) ######
#### Developed by <NAME> ######
###### Version 0.1.652 #######
##### GPU UPGRADE!! Now with Auto_NLP. Best Version to Download or Upgrade. May 15,2020 ######
###### Auto_VIMAL with Auto_NLP combines structured data with NLP for Predictions. #######
#########################################################################################################
#Copyright 2019 Google LLC #######
# #######
#Licensed under the Apache License, Version 2.0 (the "License"); #######
#you may not use this file except in compliance with the License. #######
#You may obtain a copy of the License at #######
# #######
# https://www.apache.org/licenses/LICENSE-2.0 #######
# #######
#Unless required by applicable law or agreed to in writing, software #######
#distributed under the License is distributed on an "AS IS" BASIS, #######
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #######
#See the License for the specific language governing permissions and #######
#limitations under the License. #######
#########################################################################################################
#### Auto_ViML was designed for building a High Performance Interpretable Model With Fewest Vars. ###
#### The "V" in Auto_ViML stands for Variant because it tries Multiple Models and Multiple Features ###
#### to find the Best Performing Model for any data set.The "i" in Auto_ViML stands " Interpretable"###
#### since it selects the fewest Features to build a simpler, more interpretable model. This is key. ##
#### Auto_ViML is built mostly using Scikit-Learn, Numpy, Pandas and Matplotlib. Hence it should run ##
#### on any Python 2 or Python 3 Anaconda installations. You won't have to import any special ####
#### Libraries other than "SHAP" library for SHAP values which provides more interpretability. #####
#### But if you don't have it, Auto_ViML will skip it and show you the regular feature importances. ###
#########################################################################################################
#### INPUTS: ###
#########################################################################################################
#### train: could be a datapath+filename or a dataframe. It will detect which is which and load it.####
#### test: could be a datapath+filename or a dataframe. If you don't have any, just leave it as "". ###
#### submission: must be a datapath+filename. If you don't have any, just leave it as empty string.####
#### target: name of the target variable in the data set. ####
#### sep: if you have a spearator in the file such as "," or "\t" mention it here. Default is ",". ####
#### scoring_parameter: if you want your own scoring parameter such as "f1" give it here. If not, #####
#### it will assume the appropriate scoring param for the problem and it will build the model.#####
#### hyper_param: Tuning options are GridSearch ('GS'), RandomizedSearch ('RS')and now HyperOpt ('HO')#
#### Default setting is 'GS'. Auto_ViML with HyperOpt is approximately 3X Faster than Auto_ViML###
#### feature_reduction: Default = 'True' but it can be set to False if you don't want automatic ####
#### feature_reduction since in Image data sets like digits and MNIST, you get better #####
#### results when you don't reduce features automatically. You can always try both and see. #####
#### KMeans_Featurizer = True: Adds a cluster label to features based on KMeans. Use for Linear. #####
#### False (default) = For Random Forests or XGB models, leave it False since it may overfit.####
#### Boosting Flag: you have 3 possible choices (default is False): #####
#### None = This will build a Linear Model #####
#### False = This will build a Random Forest or Extra Trees model (also known as Bagging) #####
#### True = This will build an XGBoost model #####
#### Add_Poly: Default is 0. It has 2 additional settings: #####
#### 1 = Add interaction variables only such as x1*x2, x2*x3,...x9*10 etc. #####
#### 2 = Add Interactions and Squared variables such as x1**2, x2**2, etc. #####
#### Stacking_Flag: Default is False. If set to True, it will add an additional feature which #####
#### is derived from predictions of another model. This is used in some cases but may result#####
#### in overfitting. So be careful turning this flag "on". #####
#### Binning_Flag: Default is False. It set to True, it will convert the top numeric variables #####
#### into binned variables through a technique known as "Entropy" binning. This is very #####
#### helpful for certain datasets (especially hard to build models). #####
#### Imbalanced_Flag: Default is False. If set to True, it will downsample the "Majority Class" #####
#### in an imbalanced dataset and make the "Rare" class at least 5% of the data set. This #####
#### the ideal threshold in my mind to make a model learn. Do it for Highly Imbalanced data.#####
#### verbose: This has 3 possible states: #####
#### 0 = limited output. Great for running this silently and getting fast results. #####
#### 1 = more charts. Great for knowing how results were and making changes to flags in input. #####
#### 2 = lots of charts and output. Great for reproducing what Auto_ViML does on your own. #####
#########################################################################################################
#### OUTPUTS: #####
#########################################################################################################
#### model: It will return your trained model #####
#### features: the fewest number of features in your model to make it perform well #####
#### train_modified: this is the modified train dataframe after removing and adding features #####
#### test_modified: this is the modified test dataframe with the same transformations as train #####
################# A D D I T I O N A L N O T E S ###########
#### Finally, it writes your submission file to disk in the current directory called "mysubmission.csv"
#### This submission file is ready for you to show it clients or submit it to competitions. #####
#### If no submission file was given but as long as you give it a test file name, it will create #####
#### a submission file for you named "mySubmission.csv". #####
#### Auto_ViML works on any Multi-Class, Multi-Label Data Set. So you can have many target labels #####
#### You don't have to tell Auto_ViML whether it is a Regression or Classification problem. #####
#### Suggestions for a Scoring Metric: #####
#### If you have Binary Class and Multi-Class in a Single Label, Choose Accuracy. It will ######
#### do very well. If you want something better, try roc_auc even for Multi-Class which works. ######
#### You can try F1 or Weighted F1 if you want something complex or for Multi-Class. ######
#### Note that For Imbalanced Classes (<=5% classes), it automatically adds Class Weights. ######
#### Also, Note that it handles Multi-Label automatically so you can send Train data ######
#### with multiple Labels (Targets) and it will automatically predict for each Label. ######
#### Finally this is Meant to Be a Fast Algorithm, so use it for just quick POCs ######
#### This is Not Meant for Production Problems. It produces great models but it is not Perfect! ######
######################### HELP OTHERS! PLEASE CONTRIBUTE! OPEN A PULL REQUEST! ##########################
#########################################################################################################
"""
##### These copies are to make sure that the originals are not destroyed ####
CPU_count = os.cpu_count()
test = copy.deepcopy(test)
orig_train = copy.deepcopy(train)
orig_test = copy.deepcopy(test)
train_index = train.index
if not isinstance(test, str):
test_index = test.index
start_test = copy.deepcopy(orig_test)
####### These are Global Settings. If you change them here, it will ripple across the whole code ###
corr_limit = 0.70 #### This decides what the cut-off for defining highly correlated vars to remove is.
scaling = 'MinMax' ### This decides whether to use MinMax scaling or Standard Scaling ("Std").
first_flag = 0 ## This is just a setting to detect which is
seed= 99 ### this maintains repeatability of the whole ML pipeline here ###
subsample=0.7 #### Leave this low so the models generalize better. Increase it if you want overfit models
col_sub_sample = 0.7 ### Leave this low for the same reason above
poly_degree = 2 ### this create 2-degree polynomial variables in Add_Poly. Increase if you want more degrees
booster = 'gbtree' ### this is the booster for XGBoost. The other option is "Linear".
n_splits = 5 ### This controls the number of splits for Cross Validation. Increasing will take longer time.
matplotlib_flag = True #(default) This is for drawing SHAP values. If this is False, initJS is used.
early_stopping = 20 #### Early stopping rounds for XGBoost ######
encoded = '_Label_Encoded' ### This is the tag we add to feature names in the end to indicate they are label encoded
catboost_limit = 0.4 #### The catboost_limit represents the percentage of num vars in data. ANy lower, CatBoost is used.
cat_code_limit = 100 #### If the number of dummy variables to create in a data set exceeds this, CatBoost is the default Algorithm used
one_hot_size = 500 #### This determines the max length of one_hot_max_size parameter of CatBoost algrithm
Alpha_min = -3 #### The lowest value of Alpha in LOGSPACE that is used in CatBoost
Alpha_max = 2 #### The highest value of Alpha in LOGSPACE that is used in Lasso or Ridge Regression
Cs = [0.001,0.005,0.01,0.05,0.1,0.25,0.5,1,2,4,6,10,20,30,40,50,100,150,200,400,800,1000,2000]
#Cs = np.logspace(-4,3,40) ### The list of values of C used in Logistic Regression
tolerance = 0.001 #### This tolerance is needed to speed up Logistic Regression. Otherwise, SAGA takes too long!!
#### 'lbfgs' is the fastest one but doesnt provide accurate results. Newton-CG is slower but accurate!
#### SAGA is extremely slow. Even slower than Newton-CG. Liblinear is the fastest and as accurate as Newton-CG!
solvers = ['liblinear'] ### Other solvers for Logistic Regression model: ['newton-cg','lbfgs','saga','liblinear']
solver = 'liblinear' ### This is the next fastest solver after liblinear. Useful for Multi-class problems!
penalties = ['l2','l1'] ### This is to determine the penalties for LogisticRegression
n_steps = 6 ### number of estimator steps between 100 and max_estims
max_depth = 10 ##### This limits the max_depth used in decision trees and other classifiers
max_features = 10 #### maximum number of features in a random forest model or extra trees model
warm_start = True ### This is to set the warm_start flag for the ExtraTrees models
bootstrap = True #### Set this flag to control whether to bootstrap variables or not.
n_repeats = 1 #### This is for repeated KFold and StratifiedKFold - this changes the folds every time
Bins = 30 ### This is for plotting probabilities in a histogram. For small data sets, 30 is enough.
top_nlp_features = 100 ### This sets a limit on the number of features added by each NLP transformer!
removed_features_threshold = 5 #### This triggers the Truncated_SVD if number of removed features from XGB exceeds this!
calibrator_flag = False ### In Multi-class data sets, a CalibratedClassifier works better than regular classifiers!
max_class_length = 1 ### It turns out the number of classes is directly correlated to Estimated Time. Hence this!
print('############## D A T A S E T A N A L Y S I S #######################')
########## I F CATBOOST IS REQUESTED, THEN CHECK IF IT IS INSTALLED #######################
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
from catboost import CatBoostClassifier, CatBoostRegressor
#### Similarly for Random Forests Model, it takes too long with Grid Search, so MAKE IT RandomizedSearch!
if not Boosting_Flag: ### there is also a chance Boosting_Flag is None - This is to eliminate that chance!
if orig_train.shape[0] >= 10000:
hyper_param = 'RS'
print('Changing hyperparameter search to RS. Otherwise, Random Forests will take too long for 10,000+ rows')
elif Boosting_Flag: ### there is also a chance Boosting_Flag is None - This is to eliminate that chance!
if not isinstance(Boosting_Flag, str):
if orig_train.shape[0] >= 10000:
hyper_param = 'RS'
print('Changing hyperparameter search to RS. Otherwise XGBoost will take too long for 10,000+ rows.')
########### T H I S I S W H E R E H Y P E R O P T P A R A M S A R E S E T #########
if hyper_param == 'HO':
########### HyperOpt related objective functions are defined here #################
from hyperopt import hp, tpe
from hyperopt.fmin import fmin
from hyperopt import Trials
from autoviml.custom_scores_HO import accu, rmse, gini_sklearn, gini_meae
from autoviml.custom_scores_HO import gini_msle, gini_mae, gini_mse, gini_rmse
from autoviml.custom_scores_HO import gini_accuracy, gini_bal_accuracy, gini_roc
from autoviml.custom_scores_HO import gini_precision, gini_average_precision, gini_weighted_precision
from autoviml.custom_scores_HO import gini_macro_precision, gini_micro_precision
from autoviml.custom_scores_HO import gini_samples_precision, gini_f1, gini_weighted_f1
from autoviml.custom_scores_HO import gini_macro_f1, gini_micro_f1, gini_samples_f1,f2_measure
from autoviml.custom_scores_HO import gini_log_loss, gini_recall, gini_weighted_recall
from autoviml.custom_scores_HO import gini_samples_recall, gini_macro_recall, gini_micro_recall
else:
from autoviml.custom_scores import accu, rmse, gini_sklearn, gini_meae
from autoviml.custom_scores import gini_msle, gini_mae, gini_mse, gini_rmse
from autoviml.custom_scores import gini_accuracy, gini_bal_accuracy, gini_roc
from autoviml.custom_scores import gini_precision, gini_average_precision, gini_weighted_precision
from autoviml.custom_scores import gini_macro_precision, gini_micro_precision
from autoviml.custom_scores import gini_samples_precision, gini_f1, gini_weighted_f1
from autoviml.custom_scores import gini_macro_f1, gini_micro_f1, gini_samples_f1,f2_measure
from autoviml.custom_scores import gini_log_loss, gini_recall, gini_weighted_recall
from autoviml.custom_scores import gini_samples_recall, gini_macro_recall, gini_micro_recall
###### If hyper_param = 'GS', it takes a LOOOONG TIME with "SAGA" solver for LogisticRegression.
#### Hence to speed it up you need to change the tolerance threshold to something bigger
if hyper_param == 'GS':
tolerance = 0.01 #### This tolerance is bigger to speed up Logistic Regression. Otherwise, SAGA takes too long!!
########## This is where some more default parameters are set up ######
data_dimension = orig_train.shape[0]*orig_train.shape[1] ### number of cells in the entire data set .
if data_dimension > 1000000:
### if data dimension exceeds 1 million, then reduce no of params
no_iter=30
early_stopping = 10
test_size = 0.20
max_iter = 10000
Bins = 100
top_nlp_features = 300
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 5000
else:
max_estims = 400
else:
max_estims = 400
else:
if orig_train.shape[0] <= 1000:
no_iter=20
test_size = 0.1
max_iter = 4000
top_nlp_features = 250
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 3000
else:
max_estims = 300
else:
max_estims = 300
early_stopping = 4
else:
no_iter=30
test_size = 0.15
max_iter = 7000
top_nlp_features = 200
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 4000
else:
max_estims = 350
else:
max_estims = 350
early_stopping = 6
#### The warnings from Sklearn are so annoying that I have to shut it off ####
import warnings
warnings.filterwarnings("ignore")
def warn(*args, **kwargs):
pass
warnings.warn = warn
### First_Flag is merely a flag for the first time you want to set values of variables
if scaling == 'MinMax':
SS = MinMaxScaler()
elif scaling == 'Std':
SS = StandardScaler()
else:
SS = MinMaxScaler()
### Make target into a list so that we can uniformly process the target label
if not isinstance(target, list):
target = [target]
model_label = 'Single_Label'
elif isinstance(target, list):
if len(target)==1:
model_label = 'Single_Label'
elif len(target) > 1:
model_label = 'Multi_Label'
else:
print('Target variable is neither a string nor a list. Please check input and try again!')
return
##### This is where we run the Traditional models to compare them to XGB #####
start_time = time.time()
####################################################################################
##### Set up your Target Labels and Classes Properly Here #### Label Encoding #####
#### This is for Classification Problems Only where you do Label Encoding of Target
mldict = lambda: defaultdict(mldict)
label_dict = mldict()
first_time = True
print('Training Set Shape = {}'.format(orig_train.shape))
print(' Training Set Memory Usage = {:.2f} MB'.format(orig_train.memory_usage().sum() / 1024**2))
if not isinstance(orig_test,str):
print('Test Set Shape = {}'.format(orig_test.shape))
print(' Test Set Memory Usage = {:.2f} MB'.format(orig_test.memory_usage().sum() / 1024**2))
print('%s Target: %s' %(model_label,target))
###### Now analyze what problem we have here ####
try:
modeltype = analyze_problem_type(train, target[0],verbose)
except:
print('Cannot find the Target variable in data set. Please check input and try again')
return
for each_target in target:
#### Make sure you don't move these 2 lines: they need to be reset for every target!
#### HyperOpt will not do Trials beyond max_evals - so only if you reset here, it will do it again.
if hyper_param == 'HO':
params_dict = {}
bayes_trials = Trials()
############ THIS IS WHERE OTHER DEFAULT PARAMS ARE SET ###############
c_params = dict()
r_params = dict()
if modeltype == 'Regression':
scv = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=seed)
eval_metric = 'rmse'
objective = 'reg:squarederror'
model_class = 'Regression'
start_train = copy.deepcopy(orig_train)
else:
if len(np.unique(train[each_target])) == 2:
model_class = 'Binary-Class'
elif len(np.unique(train[each_target])) > 2:
model_class = 'Multi-Class'
##### If multi-class happens, then you absolutely need to do SMOTE. Otherwise, you don't get good results!
#### Unfortunately SMOTE blows up when the data set is large -> so better to turn it off!
print('ALERT! Setting Imbalanced_Flag to True in Auto_ViML for Multi_Classification problems improves results!')
#Imbalanced_Flag = True
else:
print('Target label %s has less than 2 classes. Stopping' %each_target)
return
### This is for Classification Problems Only ########
print('Shuffling the data set before training')
start_train = orig_train.sample(frac=1.0, random_state=seed)
scv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=seed)
if modeltype != 'Regression':
rare_class_orig = find_rare_class(orig_train[each_target].values,verbose=1)
### Perfrom Label Transformation only for Classification Problems ####
classes = np.unique(orig_train[each_target])
if first_time:
if hyper_param == 'GS':
print('Using GridSearchCV for Hyper Parameter Tuning. This is slow. Switch to RS for faster tuning...')
elif hyper_param == 'RS':
print('Using RandomizedSearchCV for Hyper Parameter Tuning. This is 3X faster than GridSearchCV...')
else:
print('Using HyperOpt which is approximately 3X Faster than GridSearchCV but results vary...')
first_time = False
if len(classes) > 2:
##### If Boosting_Flag = True, change it to False here since Multi-Class XGB is VERY SLOW!
max_class_length = len(classes)
if Boosting_Flag:
print('CAUTION: In Multi-Class Boosting (2+ classes), TRAINING WILL TAKE A LOT OF TIME!')
objective = 'multi:softmax'
eval_metric = "mlogloss"
else:
max_class_length = 2
eval_metric="logloss"
objective = 'binary:logistic'
### Do Label Encoding when the Target Classes in each Label are Strings or Multi Class ###
if type(start_train[each_target].values[0])==str or str(start_train[each_target].dtype
)=='category' or sorted(np.unique(start_train[each_target].values))[0] != 0:
### if the class is a string or if it has more than 2 classes, then use Factorizer!
label_dict[each_target]['values'] = start_train[each_target].values
#### Factorizer is the easiest way to convert target in train and predictions in test
#### This takes care of some classes that are present in train and not in predictions
### and vice versa. Hence it is better than Label Encoders which breaks when above happens.
train_targ_categs = list(start_train[each_target].value_counts().index)
if len(train_targ_categs) == 2:
majority_class = [x for x in train_targ_categs if x != rare_class_orig]
dict_targ_all = {majority_class[0]: 0, rare_class_orig: 1}
else:
dict_targ_all = return_factorized_dict(train_targ_categs)
start_train[each_target] = start_train[each_target].map(dict_targ_all)
label_dict[each_target]['dictionary'] = copy.deepcopy(dict_targ_all)
label_dict[each_target]['transformer'] = dict([(v,k) for (k,v) in dict_targ_all.items()])
label_dict[each_target]['classes'] = copy.deepcopy(train_targ_categs)
class_nums = list(dict_targ_all.values())
label_dict[each_target]['class_nums'] = copy.deepcopy(class_nums)
print('String or Multi Class target: %s transformed as follows: %s' %(each_target,dict_targ_all))
rare_class = find_rare_class(start_train[each_target].values)
else:
### Since the each_target here is already numeric, you don't have to modify it
start_train[each_target] = start_train[each_target].astype(int).values
rare_class = find_rare_class(start_train[each_target].values)
label_dict[each_target]['values'] = start_train[each_target].values
label_dict[each_target]['classes'] = np.unique(start_train[each_target].values)
class_nums = np.unique(start_train[each_target].values)
label_dict[each_target]['class_nums'] = copy.deepcopy(class_nums)
label_dict[each_target]['transformer'] = []
label_dict[each_target]['dictionary'] = dict(zip(classes,classes))
print(' Target %s is already numeric. No transformation done.' %each_target)
if rare_class != 1:
print('Alert! Rare Class is not 1 but %s in this data set' %rare_class)
else:
#### In Regression problems, max_class_length is artificially set to one.
#### It turns out that Estimated Time is correlated to number of classes in data set. Hence we use this!
max_class_length = 1
###########################################################################################
#### This is where we start doing the iterative hyper tuning parameters #####
params_dict = defaultdict(list)
accu_mean = []
error_rate = []
###### This is where we do the training and hyper parameter tuning ########
orig_preds = [x for x in list(orig_train) if x not in target]
count = 0
################# CLASSIFY COLUMNS HERE ######################
var_df = classify_columns(orig_train[orig_preds], verbose)
##### Classify Columns ################
id_cols = var_df['id_vars']
nlp_columns = var_df['nlp_vars']
date_cols = var_df['date_vars']
del_cols = var_df['cols_delete']
factor_cols = var_df['factor_vars']
numvars = var_df['continuous_vars']+var_df['int_vars']
cat_vars = var_df['string_bool_vars']+var_df['discrete_string_vars']+var_df[
'cat_vars']+var_df['factor_vars']+var_df['num_bool_vars']
num_bool_vars = var_df['num_bool_vars']
#######################################################################################
preds = [x for x in orig_preds if x not in id_cols+del_cols+date_cols+target]
if len(id_cols+del_cols+date_cols)== 0:
print(' No variables removed since no ID or low-information variables found in data set')
else:
print(' %d variables removed since they were ID or low-information variables'
%len(id_cols+del_cols+date_cols))
################## This is where real code begins ###################################################
GPU_exists = check_if_GPU_exists()
###### This is where we set the CPU and GPU parameters for XGBoost
param = {}
if Boosting_Flag:
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
model_name = 'CatBoost'
hyper_param = None
else:
model_name = 'XGBoost'
else:
model_name = 'XGBoost'
elif Boosting_Flag is None:
model_name = 'Linear'
else:
model_name = 'Forests'
##### Set the Scoring Parameters here based on each model and preferences of user ##############
cpu_params = {}
if model_name == 'XGBoost':
##### WE should keep CPU params as backup in case GPU fails!
cpu_params['nthread'] = -1
cpu_params['tree_method'] = 'hist'
cpu_params['grow_policy'] = 'depthwise'
cpu_params['max_depth'] = max_depth
cpu_params['max_leaves'] = 0
cpu_params['verbosity'] = 0
cpu_params['gpu_id'] = 0
cpu_params['updater'] = 'grow_colmaker'
cpu_params['predictor'] = 'cpu_predictor'
cpu_params['num_parallel_tree'] = 1
if GPU_exists:
param['nthread'] = -1
param['tree_method'] = 'gpu_hist'
param['grow_policy'] = 'depthwise'
param['max_depth'] = max_depth
param['max_leaves'] = 0
param['verbosity'] = 0
param['gpu_id'] = 0
param['updater'] = 'grow_gpu_hist' #'prune'
param['predictor'] = 'gpu_predictor'
param['num_parallel_tree'] = 1
else:
param = copy.deepcopy(cpu_params)
validation_metric = copy.deepcopy(scoring_parameter)
elif model_name.lower() == 'catboost':
if model_class == 'Binary-Class':
catboost_scoring = 'Accuracy'
validation_metric = 'Accuracy'
loss_function='Logloss'
elif model_class == 'Multi-Class':
catboost_scoring = 'AUC'
validation_metric = 'AUC:type=Mu'
loss_function='MultiClass'
else:
loss_function = 'RMSE'
validation_metric = 'RMSE'
catboost_scoring = 'RMSE'
else:
validation_metric = copy.deepcopy(scoring_parameter)
########## D A T A P R E P R O C E S S I N G H E R E ##########################
print('############# D A T A P R E P A R A T I O N #############')
if start_train.isnull().sum().sum() > 0:
print('Filling missing values with "missing" placeholder and adding a column for missing_flags')
else:
print('No Missing Values in train data set')
copy_preds = copy.deepcopy(preds)
missing_flag_cols = []
if len(copy_preds) > 0:
dict_train = {}
for f in copy_preds:
if f in nlp_columns:
#### YOu have to skip this for NLP columns ##############
continue
missing_flag = False
if start_train[f].dtype == object:
#### This is the easiest way to label encode object variables in both train and test
#### This takes care of some categories that are present in train and not in test
### and vice versa
start_train, start_test,missing_flag,new_missing_col = convert_train_test_cat_col_to_numeric(start_train, start_test,f,True)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
elif start_train[f].dtype == np.int64 or start_train[f].dtype == np.int32 or start_train[f].dtype == np.int16:
### if there are integer variables, don't scale them. Leave them as is.
fill_num = start_train[f].min() - 1
if start_train[f].isnull().sum() > 0:
missing_flag = True
new_missing_col = f + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[f].isnull(),new_missing_col]=1
start_train[f] = start_train[f].fillna(fill_num).astype(int)
if type(orig_test) != str:
if missing_flag:
start_test[new_missing_col] = 0
if start_test[f].isnull().sum() > 0:
start_test.loc[start_test[f].isnull(),new_missing_col]=1
start_test[f] = start_test[f].fillna(fill_num).astype(int)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
elif f in factor_cols:
start_train, start_test,missing_flag,new_missing_col = convert_train_test_cat_col_to_numeric(start_train, start_test,f,False)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
else:
### for all numeric variables, fill missing values with 1 less than min.
fill_num = start_train[f].min() - 1
if start_train[f].isnull().sum() > 0:
missing_flag = True
new_missing_col = f + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[f].isnull(),new_missing_col]=1
start_train[f] = start_train[f].fillna(fill_num)
if type(orig_test) != str:
if missing_flag:
start_test[new_missing_col] = 0
if start_test[f].isnull().sum() > 0:
start_test.loc[start_test[f].isnull(),new_missing_col]=1
start_test[f] = start_test[f].fillna(fill_num)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
###########################################################################################
if orig_train.isnull().sum().sum() > 0:
### If there are missing values in remaining features print it here ####
top5 = orig_train.isnull().sum().sort_values(ascending=False).index.tolist()[:5]
print(' Columns with most missing values: %s' %(
[x for x in top5 if orig_train[x].isnull().sum()>0]))
print(' and their missing value totals: %s' %([orig_train[x].isnull().sum() for x in
top5 if orig_train[x].isnull().sum()>0]))
if start_train[copy_preds].isnull().sum().sum() == 0:
print('Completed missing value Imputation. No more missing values in train.')
if verbose >= 1:
print(' %d new missing value columns added: %s' %(len(missing_flag_cols),missing_flag_cols))
else:
print('Error: Unable to complete missing value imputation in train. Exiting...')
return
####################################################################################
if type(orig_test) != str:
if start_test[copy_preds].isnull().sum().sum() > 0:
print('Test data still has some missing values. Fix it. Exiting...')
return
else:
print('Test data has no missing values. Continuing...')
###########################################################################################
else:
print(' Could not find any variables in your data set. Please check your dataset and try again')
return
###########################################################################################
print('Completed Label Encoding and Filling of Missing Values for Train and Test Data')
### This is a minor test to make sure that Boolean vars are Integers if they are Numeric!
if len(num_bool_vars) > 0:
### Just make sure that numeric Boolean vars are set as Integer type -> otherwise CatBoost will blow up
for each_bool_num in var_df['num_bool_vars']:
start_train[each_bool_num] = start_train[each_bool_num].astype(int)
if type(start_test) != str:
start_test[each_bool_num] = start_test[each_bool_num].astype(int)
######################################################################################
######### Set your Refit Criterion here - if you want to maximize Precision or Recall do it here ##
if modeltype == 'Regression':
if scoring_parameter in ['log_loss', 'neg_mean_squared_error','mean_squared_error']:
refit_metric = 'rmse'
else:
refit_metric = 'mae'
else:
if scoring_parameter in ['precision', 'precision_score','average_precision']:
refit_metric = 'precision'
elif scoring_parameter in ['logloss', 'log_loss']:
refit_metric = 'log_loss'
elif scoring_parameter in ['recall', 'recall_score']:
refit_metric = 'recall'
elif scoring_parameter in ['f1', 'f1_score','f1_weighted']:
refit_metric = 'f1'
elif scoring_parameter in ['accuracy', 'balanced_accuracy','balanced-accuracy']:
refit_metric = 'balanced_accuracy'
else:
refit_metric = 'balanced_accuracy'
print('%s problem: hyperparameters are being optimized for %s' %(modeltype,refit_metric))
###########################################################################################
### Make sure you remove variables that are highly correlated within data set first
rem_vars = left_subtract(preds,numvars)
if len(numvars) > 0 and feature_reduction:
numvars = remove_variables_using_fast_correlation(start_train,numvars, 'pearson',
corr_limit,verbose)
### Reduced Preds are now free of correlated variables and hence can be used for Poly adds
red_preds = rem_vars + numvars
#### You need to save a copy of this red_preds so you can later on create a start_train
#### with it after each_target cycle is completed. Very important!
orig_red_preds = copy.deepcopy(red_preds)
for each_target in target:
print('\n############# PROCESSING T A R G E T = %s ##########################' %each_target)
######## D E F I N I N G N E W T R A I N and N E W T E S T here #########################
#### This is where we set the orig train data set with multiple labels to the new start_train
#### start_train has the new features added or reduced with the multi targets in one cycle
### That way, we start each train with one target, and then reset it with multi target
#############################################################################################
train = start_train[[each_target]+red_preds]
if type(orig_test) != str:
test = start_test[red_preds]
###### Add Polynomial Variables and Interaction Variables to Train ######
if Add_Poly >= 1:
if Add_Poly == 1:
print('\nAdding only Interaction Variables. This may result in Overfitting!')
elif Add_Poly == 2:
print('\nAdding only Squared Variables. This may result in Overfitting!')
elif Add_Poly == 3:
print('\nAdding Both Interaction and Squared Variables. This may result in Overfitting!')
## Since the data is already scaled, we set scaling to None here ##
### For train data we have to set the fit_flag to True ####
if len(numvars) > 1:
#### train_red contains reduced numeric variables with original and substituted poly/intxn variables
train_sel, lm, train_red,md,fin_xvars,feature_xvar_dict = add_poly_vars_select(train,numvars,
each_target,modeltype,poly_degree,Add_Poly,md='',
corr_limit=corr_limit, scaling='None',
fit_flag=True,verbose=verbose)
#### train_red contains reduced numeric variables with original and substituted poly/intxn variables
if len(left_subtract(train_sel,numvars)) > 0:
#### This means that new intxn and poly vars were added. In that case, you can use them as is
#### Since these vars were alread tested for correlation, there should be no high correlation!
### SO you can take train_sel as the new list of numeric vars (numvars) going forward!
addl_vars = left_subtract(train_sel,numvars)
#numvars = list(set(numvars).intersection(set(train_sel)))
##### Print the additional Interxn and Poly variables here #######
if verbose >= 1:
print(' Intxn and Poly Vars are: %s' %addl_vars)
train = train_red[train_sel].join(train[rem_vars+[each_target]])
red_preds = [x for x in list(train) if x not in [each_target]]
if type(test) != str:
######### Add Polynomial and Interaction variables to Test ################
## Since the data is already scaled, we set scaling to None here ##
### For Test data we have to set the fit_flag to False ####
_, _, test_x_df,_,_,_ = add_poly_vars_select(test,numvars,each_target,
modeltype,poly_degree,Add_Poly,md,
corr_limit, scaling='None', fit_flag=False,
verbose=verbose)
### we need to convert x_vars into text_vars in test_x_df using feature_xvar_dict
test_x_vars = test_x_df.columns.tolist()
test_text_vars = [feature_xvar_dict[x] for x in test_x_vars]
test_x_df.columns = test_text_vars
#### test_red contains reduced variables with orig and substituted poly/intxn variables
test_red = test_x_df[train_sel]
#### we should now combined test_red with rem_vars so that it is the same shape as train
test = test_red.join(test[rem_vars])
#### Now we should change train_sel to subst_vars since that is the new list of vars going forward
numvars = copy.deepcopy(train_sel)
else:
#### NO new variables were added. so we can skip the rest of the stuff now ###
#### This means the train_sel is the new set of numeric features selected by add_poly algorithm
red_preds = train_sel+rem_vars
print(' No new variable was added by polynomial features...')
else:
print('\nAdding Polynomial vars ignored since no numeric vars in data')
train_sel = copy.deepcopy(numvars)
else:
### if there are no Polynomial vars, then all numeric variables are selected
train_sel = copy.deepcopy(numvars)
################ A U T O N L P P R O C E S S I N G B E G I N S H E R E !!! ####
if len(nlp_columns) > 0:
for nlp_column in nlp_columns:
nlp_column_train = train[nlp_column].values
if not isinstance(orig_test, str):
nlp_column_test = test[nlp_column].values
train1, test1, best_nlp_transformer,max_features_limit = Auto_NLP(nlp_column,
train, test, each_target, refit_metric,
modeltype, top_nlp_features, verbose,
build_model=False)
########################################################################
if KMeans_Featurizer:
start_time1 = time.time()
##### Do a clustering of word vectors from each NLP_column. This gives great results!
tfidf_term_array = create_tfidf_terms(nlp_column_train, best_nlp_transformer,
is_train=True, max_features_limit=max_features_limit)
print ('Creating word clusters using term matrix of size: %d for Train data set...' %len(tfidf_term_array['terms']))
num_clusters = int(np.sqrt(len(tfidf_term_array['terms']))/2)
if num_clusters < 2:
num_clusters = 2
##### Always set verbose to 0 since we KMEANS running is too verbose!
km = KMeans(n_clusters=num_clusters, random_state=seed, verbose=0)
kme, cluster_labels = return_cluster_labels(km, tfidf_term_array, num_clusters,
is_train=True)
if isinstance(nlp_column, str):
cluster_col = nlp_column + '_word_cluster_label'
else:
cluster_col = str(nlp_column) + '_word_cluster_label'
train1[cluster_col] = cluster_labels
print ('Created one new column: %s using selected NLP technique...' %cluster_col)
if not isinstance(orig_test, str):
tfidf_term_array_test = create_tfidf_terms(nlp_column_test, best_nlp_transformer,
is_train=False, max_features_limit=max_features_limit)
_, cluster_labels_test = return_cluster_labels(kme, tfidf_term_array_test, num_clusters,
is_train=False)
test1[cluster_col] = cluster_labels_test
print ('Created word clusters using same sized term matrix for Test data set...')
print(' Time Taken for creating word cluster labels = %0.0f seconds' %(time.time()-start_time1) )
####### Make sure you include the above new columns created in the predictor variables!
red_preds = [x for x in list(train1) if x not in [each_target]]
train = train1[red_preds+[each_target]]
if not isinstance(orig_test, str):
test = test1[red_preds]
################ A U T O N L P P R O C E S S I N G E N D S H E R E !!! ####
###### We have to detect float variables again since we have created new variables using Auto_NLP!!
train_sel = np.array(red_preds)[(train[red_preds].dtypes==float).values].tolist()
######### A D D D A T E T I M E F E A T U R E S ####################
if len(date_cols) > 0:
#### Do this only if date time columns exist in your data set!
for date_col in date_cols:
print('Processing %s column for date time features....' %date_col)
date_df_train = create_time_series_features(orig_train, date_col)
if not isinstance(date_df_train, str):
date_col_adds = date_df_train.columns.tolist()
print(' Adding %d columns from date time column %s' %(len(date_col_adds),date_col))
train = train.join(date_df_train)
else:
date_col_adds = []
if not isinstance(orig_test, str):
date_df_test = create_time_series_features(orig_test, date_col)
if not isinstance(date_df_test, str):
test = test.join(date_df_test)
red_preds = [x for x in list(train) if x not in [each_target]]
train_sel = train_sel + date_col_adds
######### SELECT IMPORTANT FEATURES HERE #############################
if feature_reduction:
important_features,num_vars, imp_cats = find_top_features_xgb(train,red_preds,train_sel,
each_target,
modeltype,corr_limit,verbose)
else:
important_features = copy.deepcopy(red_preds)
num_vars = copy.deepcopy(numvars)
#### we need to set the rem_vars in case there is no feature reduction #######
imp_cats = left_subtract(important_features,num_vars)
#####################################################################################
if len(important_features) == 0:
print('No important features found. Using all input features...')
important_features = copy.deepcopy(red_preds)
num_vars = copy.deepcopy(numvars)
#### we need to set the rem_vars in case there is no feature reduction #######
imp_cats = left_subtract(important_features,num_vars)
### Training an XGBoost model to find important features
train = train[important_features+[each_target]]
######################################################################
if type(orig_test) != str:
test = test[important_features]
############## F E A T U R E E N G I N E E R I N G S T A R T S N O W ##############
###### From here on we do some Feature Engg using Target Variable with Data Leakage ############
### To avoid Model Leakage, we will now split the Data into Train and CV so that Held Out Data
## is Pure and is unadulterated by learning from its own Target. This is known as Data Leakage.
###################################################################################################
print('Starting Feature Engineering now...')
X = train[important_features]
y = train[each_target]
################ I M P O R T A N T ##################################################
### The reason we don't use train_test_split is because we want only a partial train entropy binned
### If we use the whole of Train for entropy binning then there will be data leakage and our
### cross validation test scores will not be so accurate. So don't change the next 5 lines here!
################ I M P O R T A N T ##################################################
if modeltype == 'Regression':
skf = KFold(n_splits=n_splits, random_state=seed)
else:
skf = StratifiedKFold(n_splits=n_splits, random_state=seed, shuffle=True)
cv_train_index, cv_index = next(skf.split(X, y))
################ TRAIN CV TEST SPLIT HERE ##################################################
try:
#### Sometimes this works but other times, it gives an error!
X_train, X_cv = X.loc[cv_train_index], X.loc[cv_index]
y_train, y_cv = y.loc[cv_train_index], y.loc[cv_index]
### The reason we don't use train_test_split is because we want only a partial train entropy binned
part_train = train.loc[cv_train_index]
part_cv = train.loc[cv_index]
except:
#### This works when the above method gives an error!
X_train, X_cv = X.iloc[cv_train_index], X.iloc[cv_index]
y_train, y_cv = y.iloc[cv_train_index], y.iloc[cv_index]
### The reason we don't use train_test_split is because we want only a partial train entropy binned
part_train = train.iloc[cv_train_index]
part_cv = train.iloc[cv_index]
print('Train CV Split completed with', "TRAIN rows:", cv_train_index.shape[0], "CV rows:", cv_index.shape[0])
################ IMPORTANT ENTROPY BINNING FIRST TIME #####################################
############ Add Entropy Binning of Continuous Variables Here ##############################
num_vars = np.array(important_features)[(train[important_features].dtypes==float)].tolist()
saved_important_features = copy.deepcopy(important_features) ### these are original features without '_bin' added
#### saved_num_vars is an important variable: it contains the orig_num_vars before they were binned
saved_num_vars = copy.deepcopy(num_vars) ### these are original numeric features without '_bin' added
############### BINNING FIRST TIME ##################################################
if Binning_Flag and len(saved_num_vars) > 0:
#### Do binning only when there are numeric features ####
#### When we Bin the first time, we set the entropy_binning flag to False so
#### no numeric variables are removed. But next time, we will remove them later!
part_train, num_vars, important_features, part_cv = add_entropy_binning(part_train,
each_target, saved_num_vars,
saved_important_features, part_cv,
modeltype, entropy_binning=False,verbose=verbose)
#### In saved_num_vars we send in all the continuous_vars but we bin only the top few vars.
### Those that are binned are removed from saved_num_vars and the remaining become num_vars
### Our job is to find the names of those original numeric variables which were binned.
### orig_num_vars contains original num vars. num_vars contains binned versions of those vars.
### Those binned variables have now become categorical vars and must be added to imp_cats.
### you get the name of the original vars which were binned here in this orig_num_vars variable!
orig_num_vars = left_subtract(saved_num_vars,num_vars)
#### you need to know the name of the binner variables. This is where you get it!
binned_num_vars = left_subtract(num_vars,saved_num_vars)
imp_cats += binned_num_vars
#### Also note that important_features does not contain orig_num_vars which have been erased.
else:
print(' Binning_Flag set to False or there are no numeric vars in data set to be binned')
####################### KMEANS FIRST TIME ############################
### Now we add another Feature tied to KMeans clustering using Predictor and Target variables ###
if KMeans_Featurizer and len(saved_num_vars) > 0:
### DO KMeans Featurizer only if there are numeric features in the data set!
print(' Adding one Feature named "KMeans_Clusters" based on KMeans_Featurizer_Flag=True...')
km_label = 'KMeans_Clusters'
if modeltype != 'Regression':
#### Make the number of clusters as the same as log10 of number of rows in Train
num_clusters = int(np.round(max(2,np.log10(train.shape[0]))))
#### Make the number of clusters as the same as log10 of number of rows in Train
train_clusters, cv_clusters = Transform_KM_Features(part_train[
important_features], part_train[each_target],
part_cv[important_features], num_clusters)
else:
### If it is Regression, you don't have to specify the number of clusters
train_clusters, cv_clusters = Transform_KM_Features(part_train[
important_features], part_train[each_target],
part_cv[important_features])
#### Since this is returning the each_target in X_train, we need to drop it here ###
print(' Used KMeans to naturally cluster Train predictor variables into %d clusters' %num_clusters)
part_train[km_label] = train_clusters
part_cv[km_label] = cv_clusters
#X_train.drop(each_target,axis=1,inplace=True)
imp_cats.append(km_label)
for imp_cat in imp_cats:
part_train[imp_cat] = part_train[imp_cat].astype(int)
part_cv[imp_cat] = part_cv[imp_cat].astype(int)
####### The features are checked again once we add the cluster feature ####
important_features.append(km_label)
else:
print(' KMeans_Featurizer set to False or there are no numeric vars in data')
km_label = ''
####################### STACKING FIRST TIME ############################
######### This is where you do Stacking of Multi Model Results into One Column ###
if Stacking_Flag:
#### In order to join, you need X_train to be a Pandas Series here ##
print('Alert! Stacking can produce Highly Overfit models on Training Data...')
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_train to train on and using it to predict on X_cv!
addcol, stacks1 = QuickML_Stacking(part_train[important_features],part_train[
each_target],part_train[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
addcol, stacks2 = QuickML_Stacking(part_train[important_features],part_train[
each_target],part_cv[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
part_train = part_train.join(pd.DataFrame(stacks1,index=cv_train_index,
columns=addcol))
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
part_cv = part_cv.join(pd.DataFrame(stacks2,index=cv_index,
columns=addcol))
print(' Adding %d Stacking feature(s) to training data' %len(addcol))
###### We make sure that we remove any new features that are highly correlated ! #####
#addcol = remove_variables_using_fast_correlation(X_train,addcol,corr_limit,verbose)
important_features += addcol
###############################################################################
#### part train contains the unscaled original train. It also contains binned and orig_num_vars!
#### DO NOT DO TOUCH part_train and part_cv -> we need it to recrate train later!
####################### Now do Feature Scaling Here #################################
part_train_scaled, part_cv_scaled = perform_scaling_numeric_vars(part_train, important_features,
part_cv, model_name, SS)
#### part_train_scaled has both predictor and target variables. Target must be removed!
important_features = find_remove_duplicates(important_features)
X_train = part_train_scaled[important_features]
X_cv = part_cv_scaled[important_features]
#### Remember that the next 2 lines are crucial: if X and y are dataframes, then predict_proba
### will return dataframes or series. Otherwise it will return Numpy array's.
## Be consistent when using dataframes with XGB. That's the best way to keep feature names!
print('############### M O D E L B U I L D I N G B E G I N S ####################')
print('Rows in Train data set = %d' %X_train.shape[0])
print(' Features in Train data set = %d' %X_train.shape[1])
print(' Rows in held-out data set = %d' %X_cv.shape[0])
data_dim = X_train.shape[0]*X_train.shape[1]
### Setting up the Estimators for Single Label and Multi Label targets only
if modeltype == 'Regression':
metrics_list = ['neg_mean_absolute_error' ,'neg_mean_squared_error',
'neg_mean_squared_log_error','neg_median_absolute_error']
eval_metric = "rmse"
if scoring_parameter == 'neg_mean_absolute_error' or scoring_parameter =='mae':
meae_scorer = make_scorer(gini_meae, greater_is_better=False)
scorer = meae_scorer
elif scoring_parameter == 'neg_mean_squared_error' or scoring_parameter =='mse':
mse_scorer = make_scorer(gini_mse, greater_is_better=False)
scorer = mse_scorer
elif scoring_parameter == 'neg_mean_squared_log_error' or scoring_parameter == 'log_error':
msle_scorer = make_scorer(gini_msle, greater_is_better=False)
print(' Log Error is not recommended since predicted values might be negative and error')
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
elif scoring_parameter == 'neg_median_absolute_error' or scoring_parameter == 'median_error':
mae_scorer = make_scorer(gini_mae, greater_is_better=False)
scorer = mae_scorer
elif scoring_parameter =='rmse' or scoring_parameter == 'root_mean_squared_error':
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
else:
scoring_parameter = 'rmse'
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
#### HYPER PARAMETERS FOR TUNING ARE SETUP HERE ###
if hyper_param == 'GS':
r_params = {
"Forests": {
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#"criterion" : ['mse','mae'],
},
"Linear": {
'alpha': np.logspace(-5,3),
},
"XGBoost": {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
},
"CatBoost": {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
},
}
else:
import scipy as sp
r_params = {
"Forests": {
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion" : ['mse','mae'],
},
"Linear": {
'alpha': sp.stats.uniform(scale=1000),
},
"XGBoost": {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(2, 10),
},
"CatBoost": {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
},
}
if Boosting_Flag:
if model_name.lower() == 'catboost':
xgbm = CatBoostRegressor(verbose=1,iterations=max_estims,random_state=99,
one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBRegressor(seed=seed,n_jobs=-1,random_state=seed,subsample=subsample,
colsample_bytree=col_sub_sample,n_estimators=max_estims,
objective=objective)
xgbm.set_params(**param)
elif Boosting_Flag is None:
#xgbm = Lasso(max_iter=max_iter,random_state=seed)
xgbm = Lasso(max_iter=max_iter,random_state=seed)
else:
xgbm = RandomForestRegressor(
**{
'bootstrap': bootstrap, 'n_jobs': -1, 'warm_start': warm_start,
'random_state':seed,'min_samples_leaf':2,
'max_features': "sqrt"
})
else:
#### This is for Binary Classification ##############################
classes = label_dict[each_target]['classes']
metrics_list = ['accuracy_score','roc_auc_score','logloss', 'precision','recall','f1']
# Create regularization hyperparameter distribution with 50 C values ####
if hyper_param == 'GS':
c_params['XGBoost'] = {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
}
c_params["CatBoost"] = {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
}
if Imbalanced_Flag:
c_params['Linear'] = {
'C': Cs,
'solver' : solvers,
'penalty' : penalties,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': Cs,
'class_weight':[None, 'balanced'],
'penalty' : penalties,
'solver' : solvers,
}
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#'max_features': [1,2,5, max_features],
#"criterion":['gini','entropy'],
}
else:
import scipy as sp
c_params['XGBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
}
c_params["CatBoost"] = {
'learning_rate': sp.stats.uniform(scale=1),
}
if Imbalanced_Flag:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'solver' : solvers,
'penalty' : penalties,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'penalty' : penalties,
'solver' : solvers,
}
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion":['gini','entropy'],
#'max_features': ['log', "sqrt"] ,
#'class_weight':[None,'balanced']
}
# Create regularization hyperparameter distribution using uniform distribution
if len(classes) == 2:
objective = 'binary:logistic'
if scoring_parameter == 'accuracy' or scoring_parameter == 'accuracy_score':
accuracy_scorer = make_scorer(gini_accuracy, greater_is_better=True, needs_proba=False)
scorer =accuracy_scorer
elif scoring_parameter == 'gini':
gini_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer =gini_scorer
elif scoring_parameter == 'auc' or scoring_parameter == 'roc_auc' or scoring_parameter == 'roc_auc_score':
roc_scorer = make_scorer(gini_roc, greater_is_better=True, needs_threshold=True)
scorer =roc_scorer
elif scoring_parameter == 'log_loss' or scoring_parameter == 'logloss':
scoring_parameter = 'neg_log_loss'
logloss_scorer = make_scorer(gini_log_loss, greater_is_better=False, needs_proba=False)
scorer =logloss_scorer
elif scoring_parameter=='balanced_accuracy' or scoring_parameter=='balanced-accuracy' or scoring_parameter=='average_accuracy':
bal_accuracy_scorer = make_scorer(gini_bal_accuracy, greater_is_better=True,
needs_proba=False)
scorer = bal_accuracy_scorer
elif scoring_parameter == 'precision' or scoring_parameter == 'precision_score':
precision_scorer = make_scorer(gini_precision, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =precision_scorer
elif scoring_parameter == 'recall' or scoring_parameter == 'recall_score':
recall_scorer = make_scorer(gini_recall, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =recall_scorer
elif scoring_parameter == 'f1' or scoring_parameter == 'f1_score':
f1_scorer = make_scorer(gini_f1, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =f1_scorer
elif scoring_parameter == 'f2' or scoring_parameter == 'f2_score':
f2_scorer = make_scorer(f2_measure, greater_is_better=True, needs_proba=False)
scorer =f2_scorer
else:
logloss_scorer = make_scorer(gini_log_loss, greater_is_better=False, needs_proba=False)
scorer =logloss_scorer
#f1_scorer = make_scorer(gini_f1, greater_is_better=True, needs_proba=False,
# pos_label=rare_class)
#scorer = f1_scorer
### DO NOT USE NUM CLASS WITH BINARY CLASSIFICATION ######
if Boosting_Flag:
if model_name.lower() == 'catboost':
xgbm = CatBoostClassifier(verbose=1,iterations=max_estims,
random_state=99,one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample,gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=max_estims,
n_jobs=-1, nthread=None, objective=objective,
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
seed=1)
xgbm.set_params(**param)
elif Boosting_Flag is None:
#### I have set the Verbose to be False here since it produces too much output ###
xgbm = LogisticRegression(random_state=seed,verbose=False,n_jobs=-1,solver=solver,
fit_intercept=True, tol=tolerance,
warm_start=warm_start, max_iter=max_iter)
else:
xgbm = RandomForestClassifier(
**{
'bootstrap': bootstrap, 'n_jobs': -1, 'warm_start': warm_start,
'random_state':seed,'min_samples_leaf':2,'oob_score':True,
'max_features': "sqrt"
})
else:
##### This is for MULTI Classification ##########################
objective = 'multi:softmax'
eval_metric = "mlogloss"
if scoring_parameter == 'gini':
gini_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer = gini_scorer
elif scoring_parameter=='balanced_accuracy' or scoring_parameter=='balanced-accuracy' or scoring_parameter=='average_accuracy':
bal_accuracy_scorer = make_scorer(gini_bal_accuracy, greater_is_better=True,
needs_proba=False)
scorer = bal_accuracy_scorer
elif scoring_parameter == 'roc_auc' or scoring_parameter == 'roc_auc_score':
roc_auc_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer = roc_auc_scorer
elif scoring_parameter == 'average_precision' or scoring_parameter == 'mean_precision':
average_precision_scorer = make_scorer(gini_average_precision,
greater_is_better=True, needs_proba=True)
scorer = average_precision_scorer
elif scoring_parameter == 'samples_precision':
samples_precision_scorer = make_scorer(gini_samples_precision,
greater_is_better=True, needs_proba=True)
scorer = samples_precision_scorer
elif scoring_parameter == 'weighted_precision' or scoring_parameter == 'weighted-precision':
weighted_precision_scorer = make_scorer(gini_weighted_precision,
greater_is_better=True, needs_proba=True)
scorer = weighted_precision_scorer
elif scoring_parameter == 'macro_precision':
macro_precision_scorer = make_scorer(gini_macro_precision,
greater_is_better=True, needs_proba=True)
scorer = macro_precision_scorer
elif scoring_parameter == 'micro_precision':
scorer = micro_precision_scorer
micro_precision_scorer = make_scorer(gini_micro_precision,
greater_is_better=True, needs_proba=True)
elif scoring_parameter == 'samples_recall':
samples_recall_scorer = make_scorer(gini_samples_recall, greater_is_better=True, needs_proba=True)
scorer = samples_recall_scorer
elif scoring_parameter == 'weighted_recall' or scoring_parameter == 'weighted-recall':
weighted_recall_scorer = make_scorer(gini_weighted_recall,
greater_is_better=True, needs_proba=True)
scorer = weighted_recall_scorer
elif scoring_parameter == 'macro_recall':
macro_recall_scorer = make_scorer(gini_macro_recall,
greater_is_better=True, needs_proba=True)
scorer = macro_recall_scorer
elif scoring_parameter == 'micro_recall':
micro_recall_scorer = make_scorer(gini_micro_recall, greater_is_better=True, needs_proba=True)
scorer = micro_recall_scorer
elif scoring_parameter == 'samples_f1':
samples_f1_scorer = make_scorer(gini_samples_f1,
greater_is_better=True, needs_proba=True)
scorer = samples_f1_scorer
elif scoring_parameter == 'weighted_f1' or scoring_parameter == 'weighted-f1':
weighted_f1_scorer = make_scorer(gini_weighted_f1,
greater_is_better=True, needs_proba=True)
scorer = weighted_f1_scorer
elif scoring_parameter == 'macro_f1':
macro_f1_scorer = make_scorer(gini_macro_f1,
greater_is_better=True, needs_proba=True)
scorer = macro_f1_scorer
elif scoring_parameter == 'micro_f1':
micro_f1_scorer = make_scorer(gini_micro_f1,
greater_is_better=True, needs_proba=True)
scorer = micro_f1_scorer
else:
weighted_f1_scorer = make_scorer(gini_weighted_f1,
greater_is_better=True, needs_proba=True)
scorer = weighted_f1_scorer
import scipy as sp
if Boosting_Flag:
# Create regularization hyperparameter distribution using uniform distribution
if hyper_param == 'GS':
c_params['XGBoost'] = {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
}
c_params["CatBoost"] = {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
}
else:
import scipy as sp
c_params['XGBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100, max_estims),
'max_depth': sp.stats.randint(1, 10)
}
c_params['CatBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
}
if model_name.lower() == 'catboost':
xgbm = CatBoostClassifier(verbose=1,iterations=max_estims,
random_state=99,one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample, gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=max_estims,
n_jobs=-1, nthread=None, objective=objective,
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
num_class= len(classes),
seed=1)
xgbm.set_params(**param)
elif Boosting_Flag is None:
if hyper_param == 'GS':
if Imbalanced_Flag:
c_params['Linear'] = {
'C': Cs,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': Cs,
}
else:
if Imbalanced_Flag:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
}
#### I have set the Verbose to be False here since it produces too much output ###
xgbm = LogisticRegression(random_state=seed,verbose=False,n_jobs=-1,solver=solver,
fit_intercept=True, tol=tolerance, multi_class='auto',
max_iter=max_iter, warm_start=False,
)
else:
if hyper_param == 'GS':
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#"criterion":['gini','entropy'],
}
else:
c_params["Forests"] = {
##### I have set these to avoid OverFitting which is a problem for small data sets ###
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion":['gini','entropy'],
#'class_weight':[None,'balanced']
}
xgbm = RandomForestClassifier(bootstrap=bootstrap, oob_score=True,warm_start=warm_start,
n_estimators=100,max_depth=3,
min_samples_leaf=2,max_features='auto',
random_state=seed,n_jobs=-1)
###### Now do RandomizedSearchCV using # Early-stopping ################
if modeltype == 'Regression':
#scoreFunction = {"mse": "neg_mean_squared_error", "mae": "neg_mean_absolute_error"}
#### I have set the Verbose to be False here since it produces too much output ###
if hyper_param == 'GS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = GridSearchCV(xgbm,param_grid=r_params[model_name],
scoring = scorer,
n_jobs=-1,
cv = scv,
refit = refit_metric,
return_train_score = True,
verbose=0)
elif hyper_param == 'RS':
gs = RandomizedSearchCV(xgbm,
param_distributions = r_params[model_name],
n_iter = no_iter,
scoring = scorer,
refit = refit_metric,
return_train_score = True,
random_state = seed,
cv = scv,
n_jobs=-1,
verbose = 0)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
gs = copy.deepcopy(xgbm)
else:
if hyper_param == 'GS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = GridSearchCV(xgbm,param_grid=c_params[model_name],
scoring = scorer,
return_train_score = True,
n_jobs=-1,
refit = refit_metric,
cv = scv,
verbose=0)
elif hyper_param == 'RS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = RandomizedSearchCV(xgbm,
param_distributions = c_params[model_name],
n_iter = no_iter,
scoring = scorer,
refit = refit_metric,
return_train_score = True,
random_state = seed,
n_jobs=-1,
cv = scv,
verbose = 0)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
gs = copy.deepcopy(xgbm)
#trains and optimizes the model
eval_set = [(X_train,y_train),(X_cv,y_cv)]
print('Finding Best Model and Hyper Parameters for Target: %s...' %each_target)
##### Here is where we put the part_train and part_cv together ###########
if modeltype != 'Regression':
### Do this only for Binary Classes and Multi-Classes, both are okay
baseline_accu = 1-(train[each_target].value_counts(1).sort_values())[rare_class]
print(' Baseline Accuracy Needed for Model = %0.2f%%' %(baseline_accu*100))
print('CPU Count = %s in this device' %CPU_count)
if modeltype == 'Regression':
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(3000000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(50000.*CPU_count)))
elif Boosting_Flag is None:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(80000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(40000.*CPU_count)))
else:
if hyper_param == 'GS':
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(300000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(10000.*CPU_count)))
elif Boosting_Flag is None:
#### A Linear model is usually the fastest ###########
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(50000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(16000.*CPU_count)))
else:
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(3000000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(40000.*CPU_count)))
elif Boosting_Flag is None:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(100000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(25000.*CPU_count)))
##### Since we are using Multiple Models each with its own quirks, we have to make sure it is done this way
##### ############ TRAINING MODEL FIRST TIME WITH X_TRAIN AND TESTING ON X_CV ############
model_start_time = time.time()
################################################################################################################################
##### BE VERY CAREFUL ABOUT MODIFYING THIS NEXT LINE JUST BECAUSE IT APPEARS TO BE A CODING MISTAKE. IT IS NOT!! #############
################################################################################################################################
#######
if Imbalanced_Flag:
if modeltype == 'Regression':
########### In case someone sets the Imbalanced_Flag mistakenly to True and it is Regression, you must set it to False ######
Imbalanced_Flag = False
else:
####### Imbalanced with Classification #################
try:
print('############## Imbalanced Flag on: Training model with SMOTE Oversampling method ###########')
#### The model is the downsampled model Trained on downsampled data sets. ####
model, X_train, y_train = training_with_SMOTE(X_train,y_train,eval_set, gs,
Boosting_Flag, eval_metric,
modeltype, model_name,training=True,
minority_class=rare_class,imp_cats=imp_cats,
calibrator_flag=calibrator_flag,
GPU_exists=GPU_exists, params = cpu_params,
verbose=verbose)
if isinstance(model, str):
model = copy.deepcopy(gs)
#### If d_model failed, it will just be an empty string, so you try the regular model ###
print('Error in training Imbalanced model first time. Trying regular model..')
Imbalanced_Flag = False
if Boosting_Flag:
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
try:
model.fit(X_train, y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=0)
except:
#### On Colab, even though GPU exists, many people don't turn it on.
#### In that case, XGBoost blows up when gpu_predictor is used.
#### This is to turn it back to cpu_predictor in case GPU errors!
if GPU_exists:
print('Error: GPU exists but it is not turned on. Using CPU for predictions...')
model.estimator.set_params(**cpu_params)
model.fit(X_train,y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=False)
else:
model.fit(X_train,y_train,
eval_metric=eval_metric, verbose=False)
else:
try:
model.fit(X_train, y_train,
cat_features=imp_cats,eval_set=(X_cv,y_cv), use_best_model=True,plot=True)
except:
model.fit(X_train, y_train, cat_features=imp_cats,use_best_model=False,plot=False)
else:
model.fit(X_train, y_train)
#### If downsampling succeeds, it will be used to get the best score and can become model again ##
if hyper_param == 'RS' or hyper_param == 'GS':
best_score = model.best_score_
else:
val_keys = list(model.best_score_.keys())
best_score = model.best_score_[val_keys[-1]][validation_metric]
except:
print('Error in training Imbalanced model first time. Trying regular model..')
Imbalanced_Flag = False
best_score = 0
################################################################################################################################
####### Though this next step looks like it is a Coding Mistake by Me, don't change it!!! ###################
####### This is for case when Imbalanced with Classification succeeds, this next step is skipped ############
################################################################################################################################
if not Imbalanced_Flag:
########### This is for both regular Regression and regular Classification Model Training. It is not a Mistake #############
########### In case Imbalanced training fails, this method is also tried. That's why we test the Flag here!! #############
try:
model = copy.deepcopy(gs)
if Boosting_Flag:
if model_name == 'XGBoost':
try:
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X_train, y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=0)
except:
#### On Colab, even though GPU exists, many people don't turn it on.
#### In that case, XGBoost blows up when gpu_predictor is used.
#### This is to turn it back to cpu_predictor in case GPU errors!
if GPU_exists:
print('Error: GPU exists but it is not turned on. Using CPU for predictions...')
model.estimator.set_params(**cpu_params)
model.fit(X_train,y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=False)
else:
model.fit(X_train,y_train,
eval_metric=eval_metric, verbose=False)
else:
try:
model.fit(X_train, y_train, cat_features=imp_cats,
eval_set=(X_cv,y_cv), use_best_model=True, plot=True)
except:
model.fit(X_train, y_train, cat_features=imp_cats, use_best_model=False, plot=False)
else:
model.fit(X_train, y_train)
except:
print('Training regular model first time is Erroring: Check if your Input is correct...')
return
try:
if hyper_param == 'RS' or hyper_param == 'GS':
best_score = model.best_score_
validation_metric = copy.deepcopy(scoring_parameter)
else:
val_keys = list(model.best_score_.keys())
if 'validation' in val_keys:
validation_metric = list(model.best_score_['validation'].keys())[0]
best_score = model.best_score_['validation'][validation_metric]
else:
validation_metric = list(model.best_score_['learn'].keys())[0]
best_score = model.best_score_['learn'][validation_metric]
except:
print('Error: Not able to print validation metrics. Continuing...')
## TRAINING OF MODELS COMPLETED. NOW GET METRICS on CV DATA ################
print(' Actual training time (in seconds): %0.0f' %(time.time()-model_start_time))
print('########### S I N G L E M O D E L R E S U L T S #################')
if modeltype != 'Regression':
############## This is for Classification Only !! ########################
if scoring_parameter in ['logloss','neg_log_loss','log_loss','log-loss','']:
print('{}-fold Cross Validation {} = {}'.format(n_splits, 'logloss', best_score))
elif scoring_parameter in ['accuracy','balanced-accuracy','balanced_accuracy','roc_auc','roc-auc',
'f1','precision','recall','average-precision','average_precision',
'weighted_f1','weighted-f1','AUC']:
print('%d-fold Cross Validation %s = %0.1f%%' %(n_splits,scoring_parameter, best_score*100))
else:
print('%d-fold Cross Validation %s = %0.1f' %(n_splits,validation_metric, best_score))
else:
######### This is for Regression only ###############
if best_score < 0:
best_score = best_score*-1
if scoring_parameter == '':
print('%d-fold Cross Validation %s Score = %0.4f' %(n_splits,'RMSE', best_score))
else:
print('%d-fold Cross Validation %s Score = %0.4f' %(n_splits,validation_metric, best_score))
#### We now need to set the Best Parameters, Fit the Model on Full X_train and Predict on X_cv
### Find what the order of best params are and set the same as the original model ###
if hyper_param == 'RS' or hyper_param == 'GS':
best_params= model.best_params_
print(' Best Parameters for Model = %s' %model.best_params_)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
#### CatBoost does not need too many iterations. Just make sure you set the iterations low after the first time!
if model.get_best_iteration() == 0:
### In some small data sets, the number of iterations becomes zero, hence we set it as a default number
best_params = dict(zip(['iterations','learning_rate'],[1000,model.get_all_params()['learning_rate']]))
else:
best_params = dict(zip(['iterations','learning_rate'],[model.get_best_iteration(),model.get_all_params()['learning_rate']]))
print(' %s Best Parameters for Model: Iterations = %s, learning_rate = %0.2f' %(
model_name, model.get_best_iteration(), model.get_all_params()['learning_rate']))
if hyper_param == 'RS' or hyper_param == 'GS':
#### In the case of CatBoost, we don't do any Hyper Parameter tuning #########
gs = copy.deepcopy(model)
model = gs.best_estimator_
if modeltype == 'Multi_Classification':
try:
if X_cv.shape[0] <= 1000:
# THis works well for small data sets and is similar to parametric
method= 'sigmoid' # 'isotonic' # #
else:
# THis works well for large data sets and is non-parametric
method= 'isotonic'
model = CalibratedClassifierCV(model, method=method, cv="prefit")
model.fit(X_train, y_train)
print('Using a Calibrated Classifier in this Multi_Classification dataset to improve results...')
calibrator_flag = True
except:
calibrator_flag = False
pass
### Make sure you set this flag as False so that when ensembling is completed, this flag is True ##
if model_name.lower() == 'catboost':
print('Best Model selected and its parameters are:\n %s' %model.get_all_params())
else:
print('Best Model selected and its parameters are:\n %s' %model)
performed_ensembling = False
if modeltype != 'Regression':
m_thresh = 0.5
y_proba = model.predict_proba(X_cv)
y_pred = model.predict(X_cv)
if len(classes) <= 2:
print('Finding Best Threshold for Highest F1 Score...')
precision, recall, thresholds = precision_recall_curve(y_cv, y_proba[:,rare_class])
#precision, recall, thresholds = precision_recall_curve(y_cv, y_proba[:,1])
try:
f1 = (2*precision*recall)/(precision+recall)
f1 = np.nan_to_num(f1)
m_idx = np.argmax(f1)
m_thresh = thresholds[m_idx]
best_f1 = f1[m_idx]
except:
best_f1 = f1_score(y_cv, y_pred)
m_thresh = 0.5
# retrieve just the probabilities for the positive class
pos_probs = y_proba[:, rare_class]
if verbose >= 1:
# create a histogram of the predicted probabilities for the Rare Class since it will help decide threshold
plt.figure(figsize=(6,6))
plt.hist(pos_probs, bins=Bins, color='g')
plt.title("Model's Predictive Probability Histogram for Rare Class=%s with suggested threshold in red" %rare_class_orig)
plt.axvline(x=m_thresh, color='r', linestyle='--')
plt.show();
print(" Using threshold=0.5. However, %0.3f provides better F1=%0.2f for rare class..." %(m_thresh,best_f1))
###y_pred = (y_proba[:,rare_class]>=m_thresh).astype(int)
predicted = copy.deepcopy(y_proba)
predicted [:,0] = (predicted [:,0] >= (1-m_thresh)).astype('int')
predicted [:,1] = (predicted [:,1] > m_thresh).astype('int')
if m_thresh != 0.5:
y_pred = predicted[:,rare_class]
else:
y_proba = model.predict_proba(X_cv)
y_pred = model.predict(X_cv)
else:
y_pred = model.predict(X_cv)
### This is where you print out the First Model's Results ########
print('########################################################')
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values
if isinstance(y_cv,pd.Series):
y_cv = y_cv.values
print('%s Model Prediction Results on Held Out CV Data Set:' %model_name)
if modeltype == 'Regression':
rmsle_calculated_m = rmse(y_cv, y_pred)
print_regression_model_stats(y_cv, y_pred,'%s Model: Predicted vs Actual for %s'%(model_name,each_target))
else:
if model_name == 'Forests':
if calibrator_flag:
print(' OOB Score = %0.3f' %model.base_estimator.oob_score_)
else:
print(' OOB Score = %0.3f' %model.oob_score_)
rmsle_calculated_m = balanced_accuracy_score(y_cv,y_pred)
if len(classes) == 2:
print(' Regular Accuracy Score = %0.1f%%' %(accuracy_score(y_cv,y_pred)*100))
y_probas = model.predict_proba(X_cv)
rmsle_calculated_m = print_classification_model_stats(y_cv, y_probas, m_thresh)
else:
###### Use a nice classification matrix printing module here #########
print(' Balanced Accuracy Score = %0.1f%%' %(rmsle_calculated_m*100))
print(classification_report(y_cv,y_pred))
print(confusion_matrix(y_cv, y_pred))
###### SET BEST PARAMETERS HERE ######
### Find what the order of best params are and set the same as the original model ###
## This is where we set the best parameters from training to the model ####
if modeltype == 'Regression':
if not Stacking_Flag:
print('################# E N S E M B L E M O D E L ##################')
try:
cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype=modeltype, Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
else:
subm[new_col] = cv_ensembles[:,each]
cols.append(new_col)
if len(cols) == 5:
print(' Displaying results of weighted average ensemble of %d regressors' %len(cols))
ensem_pred = subm[cols[-1]]*0.5+0.125*(subm[cols[0]]+subm[
cols[1]]+subm[cols[2]]+subm[cols[3]])
else:
print(' Calculating regular average ensemble of %d regressors' %len(cols))
ensem_pred = (subm[cols].mean(axis=1))
print('#############################################################################')
performed_ensembling = True
#### Since we have a new ensembled y_pred, make sure it is series or array before printing it!
if isinstance(y_pred,pd.Series):
print_regression_model_stats(y_cv, ensem_pred.values,'Ensemble Model: Model Predicted vs Actual for %s' %each_target)
else:
print_regression_model_stats(y_cv, ensem_pred,'Ensemble Model: Model Predicted vs Actual for %s' %each_target)
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
## This is for Classification Problems Only #
### Find what the order of best params are and set the same as the original model ###
## This is where we set the best parameters from training to the model ####
if not Stacking_Flag:
print('################# E N S E M B L E M O D E L ##################')
#### We do Ensembling only if the Stacking_Flag is False. Otherwise, we don't!
try:
classes = label_dict[each_target]['classes']
cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
if len(classes) == 2:
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
else:
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
else:
subm[new_col] = cv_ensembles[:,each]
cols.append(new_col)
if len(cols) == 5:
print(' Displaying results of weighted average ensemble of %d classifiers' %len(cols))
ensem_pred = np.round(subm[cols[-1]]*0.5+0.125*(subm[cols[0]]+subm[
cols[1]]+subm[cols[2]]+subm[cols[3]])).astype(int)
else:
print(' Calculating regular average ensemble of %d classifiers' %len(cols))
ensem_pred = (subm[cols].mean(axis=1)).astype(int)
print('#############################################################################')
performed_ensembling = True
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
print('No Ensembling of models done since Stacking_Flag = True ')
if verbose >= 1:
if len(classes) == 2:
plot_classification_results(model,X_cv, y_cv, y_pred, classes, class_nums, each_target )
else:
try:
Draw_ROC_MC_ML(model, X_cv, y_cv, each_target, model_name, verbose)
Draw_MC_ML_PR_ROC_Curves(model,X_cv,y_cv)
except:
print('Could not plot PR and ROC curves. Continuing...')
#### In case there are special scoring_parameter requests, you can print it here!
if scoring_parameter == 'roc_auc' or scoring_parameter == 'auc':
if len(classes) == 2:
print(' ROC AUC Score = %0.1f%%' %(roc_auc_score(y_cv, y_proba[:,rare_class])*100))
else:
print(' No ROC AUC score for multi-class problems')
elif scoring_parameter == 'jaccard':
accu_all = jaccard_singlelabel(y_cv, y_pred)
print(' Mean Jaccard Similarity = {:,.1f}%'.format(
accu_all*100))
## This is for multi-label problems ##
if count == 0:
zipped = copy.deepcopy(y_pred)
count += 1
else:
zipped = zip(zipped,y_pred)
count += 1
elif scoring_parameter == 'basket_recall':
if count == 0:
zipped = copy.deepcopy(y_pred)
count += 1
else:
zipped = zip(zipped,y_pred)
count += 1
if not Stacking_Flag and performed_ensembling:
if modeltype == 'Regression':
rmsle_calculated_f = rmse(y_cv, y_pred)
print('After multiple models, Ensemble Model Results:')
print(' RMSE Score = %0.5f' %(rmsle_calculated_f,))
print('#############################################################################')
if rmsle_calculated_f < rmsle_calculated_m:
print('Ensembling Models is better than Single Model for this data set.')
error_rate.append(rmsle_calculated_f)
else:
print('Single Model is better than Ensembling Models for this data set.')
error_rate.append(rmsle_calculated_m)
else:
rmsle_calculated_f = balanced_accuracy_score(y_cv,y_pred)
print('After multiple models, Ensemble Model Results:')
rare_pct = y_cv[y_cv==rare_class].shape[0]/y_cv.shape[0]
print(' Balanced Accuracy Score = %0.3f%%' %(
rmsle_calculated_f*100))
print(classification_report(y_cv,y_pred))
print(confusion_matrix(y_cv,y_pred))
print('#############################################################################')
if rmsle_calculated_f > rmsle_calculated_m:
print('Ensembling Models is better than Single Model for this data set.')
error_rate.append(rmsle_calculated_f)
else:
print('Single Model is better than Ensembling Models for this data set.')
error_rate.append(rmsle_calculated_m)
if verbose >= 1:
if Boosting_Flag:
try:
if model_name.lower() == 'catboost':
plot_xgb_metrics(model,catboost_scoring,eval_set,modeltype,'%s Results' %each_target,
model_name)
else:
plot_xgb_metrics(gs.best_estimator_,eval_metric,eval_set,modeltype,'%s Results' %each_target,
model_name)
except:
print('Could not plot Model Evaluation Results Metrics')
else:
try:
plot_RS_params(gs.cv_results_, scoring_parameter, each_target)
except:
print('Could not plot Cross Validation Parameters')
print(' Time taken for this Target (in seconds) = %0.0f' %(time.time()-start_time))
print('Training model on complete Train data and Predicting using give Test Data...')
################ I M P O R T A N T: C O M B I N I N G D A T A ######################
#### This is Second time: we combine train and CV into Train and Test Sets #################
train = part_train.append(part_cv)
important_features = [x for x in list(train) if x not in [each_target]]
############################################################################################
###### Now that we have used partial data to make stacking predictors, we can remove them from consideration!
if Stacking_Flag:
important_features = left_subtract(important_features, addcol)
try:
train.drop(addcol,axis=1, inplace=True)
except:
pass
###### Similarly we will have to create KMeans_Clusters again using full Train data!
if KMeans_Featurizer:
important_features = left_subtract(important_features, km_label)
try:
train.drop(km_label,axis=1, inplace=True)
except:
pass
########################## BINNING SECOND TIME ###############################
new_num_vars = np.array(important_features)[(train[important_features].dtypes==float)].tolist()
## Now we re-use the saved_num_vars which contained a list of num_vars for binning now!
###### Once again we do Entropy Binning on the Full Train Data Set !!
########################## BINNING SECOND TIME ###############################
if Binning_Flag and len(saved_num_vars) > 0:
### when you bin the second time, you have to send in important_features with original
### numeric variables so that it works on binning only those. Otherwise it will fail.
### Do Entropy Binning only if there are numeric variables in the data set! #####
#### When we Bin the second first time, we set the entropy_binning flag to True so
#### that all numeric variables that are binned are removed. This way, only bins remain.
train, num_vars, important_features, test = add_entropy_binning(train, each_target,
orig_num_vars, important_features, test,
modeltype, entropy_binning=True,verbose=verbose)
#### In saved_num_vars we send in all the continuous_vars but we bin only the top few vars.
### Those that are binned are removed from saved_num_vars and the remaining become num_vars
### Our job is to find the names of those original numeric variables which were binned.
### orig_num_vars contains original num vars. num_vars contains binned versions of those vars.
### Those binned variables have now become categorical vars and must be added to imp_cats.
#### Also note that important_features does not contain orig_num_vars which have been erased.
else:
print(' Binning_Flag set to False or there are no numeric vars in data set to be binned')
### Now we add another Feature tied to KMeans clustering using Predictor and Target variables ###
####################### KMEANS SECOND TIME ############################
if KMeans_Featurizer and len(saved_num_vars) > 0:
#### Perform KMeans Featurizer only if there are numeric variables in data set! #########
print('Adding one feature named "KMeans_Clusters" using KMeans_Featurizer...')
km_label = 'KMeans_Clusters'
if modeltype != 'Regression':
#### Make the number of clusters as the same as log10 of number of rows in Train
train_cluster, test_cluster = Transform_KM_Features(train[important_features], train[each_target], test[important_features], num_clusters)
else:
train_cluster, test_cluster = Transform_KM_Features(train[important_features], train[each_target], test[important_features])
#### Now make sure that the cat features are either string or integers ######
print(' Used KMeans to naturally cluster Train predictor variables into %d clusters' %num_clusters)
train[km_label] = train_cluster
if not isinstance(test, str):
test[km_label] = test_cluster
#X_train.drop(each_target,axis=1,inplace=True)
for imp_cat in imp_cats:
train[imp_cat] = train[imp_cat].astype(int)
if not isinstance(test, str):
test[imp_cat] = test[imp_cat].astype(int)
saved_num_vars.append(km_label) ### You need to add it to this variable list for Scaling later!
important_features.append(km_label)
########################## STACKING SECOND TIME ###############################
######### This is where you do Stacking of Multi Model Results into One Column ###
if Stacking_Flag:
#### In order to join, you need X_train to be a Pandas Series here ##
print('CAUTION: Stacking can produce Highly Overfit models on Training Data...')
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_cv to train on and using it to predict on X_train!
addcol, stacks1 = QuickML_Stacking(train[important_features],train[each_target],'',
modeltype, Boosting_Flag, scoring_parameter,verbose)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
#### The reason we add the word "Partial_Train" is to show that these Stacking results are from Partial Train data!
addcols = copy.deepcopy(addcol)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
train = train.join(pd.DataFrame(stacks1,index=train.index,
columns=addcols))
##### Leaving multiple columns for Stacking is best! Do not do the average of predictions!
print(' Adding %d Stacking feature(s) to training data' %len(addcols))
if not isinstance(orig_test, str):
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_train to train on and using it to predict on X_test
_, stacks2 = QuickML_Stacking(train[important_features],train[each_target],test[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
test = test.join(pd.DataFrame(stacks2,index=test.index,
columns=addcols))
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
#test = test.join(pd.DataFrame(stacks2.mean(axis=1).round().astype(int),
# columns=[addcol],index=test.index))
###### We make sure that we remove too many features that are highly correlated ! #####
#addcol = remove_variables_using_fast_correlation(train,addcol,corr_limit,verbose)
important_features += addcols
saved_num_vars.append(addcol) ### You need to add it for binning later!
############################################################################################
if len(important_features) == 0:
print('No important features found. Using all input features...')
important_features = copy.deepcopy(saved_important_features)
#important_features = copy.deepcopy(red_preds)
############################################################################################
if model_name.lower() == 'catboost':
print(' Setting best params for CatBoost model from Initial State since you cannot change params to a fitted Catboost model ')
model = xgbm.set_params(**best_params)
print(' Number of Categorical and Integer variables used in CatBoost training = %d' %len(imp_cats))
#### Perform Scaling of Train data a second time using FULL TRAIN data set this time !
#### important_features keeps track of all variables that we need to ensure they are scaled!
train, test = perform_scaling_numeric_vars(train, important_features, test,
model_name, SS)
################ T R A I N I N G M O D E L A S E C O N D T I M E ###################
### The next 2 lines are crucial: if X and y are dataframes, then next 2 should be df's
### They should not be df.values since they will become numpy arrays and XGB will error.
trainm = train[important_features+[each_target]]
red_preds = copy.deepcopy(important_features)
X = trainm[red_preds]
y = trainm[each_target]
eval_set = [()]
##### ############ TRAINING MODEL SECOND TIME WITH FULL_TRAIN AND PREDICTING ON TEST ############
model_start_time = time.time()
if modeltype != 'Regression':
if Imbalanced_Flag:
try:
print('################## Imbalanced Flag Set ############################')
print('Imbalanced Class Training using SMOTE Rare Class Oversampling method...')
model, X, y = training_with_SMOTE(X,y, eval_set, model,
Boosting_Flag, eval_metric,modeltype, model_name,
training=False, minority_class=rare_class,
imp_cats=imp_cats, calibrator_flag=calibrator_flag,
GPU_exists=GPU_exists, params=cpu_params,
verbose=verbose)
if isinstance(model, str):
#### If downsampling model failed, it will just be an empty string, so you can try regular model ###
model = copy.deepcopy(best_model)
print('Error in training Imbalanced model second time. Trying regular model..')
Imbalanced_Flag = False
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
#### Set the Verbose to 0 since we don't want too much output ##
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
except:
print('Error in training Imbalanced model second time. Trying regular model..')
Imbalanced_Flag = False
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
else:
try:
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
### Since second time we don't have X_cv, we remove it
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
except:
print('Training regular model second time erroring: Check if Input is correct...')
return
else:
try:
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, use_best_model=False, plot=False)
else:
model.fit(X, y)
except:
print('Training model second time is Erroring: Check if Input is correct...')
return
print('Actual Training time taken in seconds = %0.0f' %(time.time()-model_start_time))
## TRAINING OF MODELS COMPLETED. NOW START PREDICTIONS ON TEST DATA ################
#### new_cols is to keep track of new prediction columns we are creating #####
new_cols = []
if not isinstance(orig_test, str):
### If there is a test data frame, then let us predict on it #######
### The next 3 lines are crucial: if X and y are dataframes, then next 2 should be df's
### They should not be df.values since they will become numpy arrays and XGB will error.
try:
#### We need the id columns to carry over into the predictions ####
testm = orig_test[id_cols].join(test[red_preds])
except:
### if for some reason id columns are not available, then do without it
testm = test[red_preds]
X_test = testm[red_preds]
else:
##### If there is no Test file, then do a final prediction on Train itself ###
orig_index = orig_train.index
trainm = train.reindex(index = orig_index)
testm = orig_train[id_cols].join(trainm[red_preds])
X_test = testm[red_preds]
if modeltype == 'Regression':
y_pred = model.predict(X_test)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values
######## This is for Regression Problems Only ###########
###### If Stacking_ Flag is False, then we do Ensembling #######
if not Stacking_Flag:
try:
new_cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
#### In Test data verbose is set to zero since no results can be obtained!
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype=modeltype, Boosting_Flag=Boosting_Flag,
scoring='', verbose=0)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
testm[new_col] = y_pred
else:
subm[new_col] = ensembles[:,each]
testm[new_col] = ensembles[:,each]
new_cols.append(new_col)
### After this, y_pred is a Series from now on. You need y_pred.values ####
if len(new_cols) == 5:
print(' Calculating weighted average ensemble of %d regressors' %len(new_cols))
ensem_pred = subm[new_cols[-1]]*0.5+0.125*(subm[new_cols[0]]+subm[
new_cols[1]]+subm[new_cols[2]]+subm[new_cols[3]])
else:
print(' Calculating regular average ensemble of %d regressors' %len(cols))
ensem_pred = (subm[new_cols].mean(axis=1))
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
ensem_pred = ensem_pred.values
new_col = each_target+'_Ensembled_predictions'
testm[new_col] = ensem_pred
new_cols.append(new_col)
print('Completed Ensemble predictions on held out data')
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
stack_cols, stacksfinal = QuickML_Stacking(X, y, X_test,
modeltype, Boosting_Flag,
scoring_parameter,verbose=verbose)
new_col = each_target+'_Stacked_'+stack_cols[0].split("_")[0]+'_predictions'
if len(stack_cols) == 1:
testm[new_col] = stacksfinal
else:
#### Just average the predictions from each stacked model into a final pred
testm[new_col] = stacksfinal.mean(axis=1)
if not isinstance(sample_submission, str):
sample_submission[each_target] = y_pred
#### If there is a test file, it probably doesn't have target, so add predictions to it!
testm[each_target+'_predictions'] = y_pred
else:
proba_cols = []
######## This is for both Binary and Multi Classification Problems ###########
y_proba = model.predict_proba(X_test)
y_pred = model.predict(X_test)
predicted = copy.deepcopy(y_proba)
if len(classes) <= 2:
predicted [:,0] = (predicted [:,0] >= (1-m_thresh)).astype('int')
predicted [:,1] = (predicted [:,1] > m_thresh).astype('int')
if predicted[:,rare_class].mean()==0 or predicted[:,rare_class].mean()==1:
### If the model is predicting all 0's or all 1's, you need to use a regular threshold
m_thresh = 0.5
print(' Making test Data predictions using regular Threshold = %0.3f' %m_thresh)
else:
### If the model is good with the modified threshold, then you use the modified threshold!
print(' Making test Data predictions using modified Threshold = %0.3f' %m_thresh)
y_pred = predicted[:,rare_class]
else:
##### For multi-class, just make predictions of multiple classes here #######
y_pred = model.predict(X_test)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values.astype(int)
else:
### In a small number of cases, it's an array but has a shape of 1.
### This causes errors later. Hence I have to make it a singleton array.
try:
if y_pred.shape[1] == 1:
y_pred = y_pred.ravel()
except:
y_pred = y_pred.astype(int)
if len(label_dict[each_target]['transformer']) == 0:
######### NO T R A N S F O R M E R L O G I C B E G I N S H E R E ! #####################
### if there is no transformer, then leave the predicted classes as is
classes = label_dict[each_target]['classes']
##### If there is no transformer, you can just predict the classes as is and save it here ###
testm[each_target+'_predictions'] = y_pred
###### If Stacking_Flag is False, then we do Ensembling #######
if not Stacking_Flag:
### Ensembling is not done when the model name is CatBoost ####
new_cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
#### In Test data verbose is set to zero since no results can be obtained!
if len(classes) == 2:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=0)
else:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=0)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
testm[new_col] = y_pred
else:
subm[new_col] = ensembles[:,each]
testm[new_col] = ensembles[:,each]
new_cols.append(new_col)
### You will need to create probabilities for each class here ####
for each_class in classes:
if isinstance(each_class, str):
proba_col = each_target+'_proba_'+each_class
else:
proba_col = each_target+'_proba_'+str(each_class)
count = int(label_dict[each_target]['dictionary'][each_class])
testm[proba_col] = y_proba[:,count]
proba_cols.append(proba_col)
if not Stacking_Flag:
new_col = each_target+'_Ensembled_predictions'
if len(new_cols) == 5:
print(' Calculating weighted average ensemble of %d classifiers' %len(new_cols))
ensem_pred = np.round(subm[new_cols[-1]]*0.5+0.125*(subm[new_cols[0]]+subm[
new_cols[1]]+subm[new_cols[2]]+subm[new_cols[3]])).astype(int)
else:
print(' Calculating average ensemble of %d classifiers' %len(new_cols))
ensem_pred = (subm[new_cols].mean(axis=1)).astype(int)
else:
stack_cols, stacksfinal = QuickML_Stacking(X, y, X_test,
modeltype, Boosting_Flag,scoring_parameter,verbose)
new_col = each_target+'_Stacked_'+stack_cols[0].split("_")[0]+'_predictions'
ensem_pred = np.argmax(stacksfinal,axis=1)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
testm[new_col] = ensem_pred
new_cols.append(new_col)
if not isinstance(sample_submission, str):
sample_submission[each_target] = y_pred
else:
######### T R A N S F O R M E R L O G I C B E G I N S H E R E ! #####################
### if there is a transformer, then you must convert the predicted classes to orig classes
classes = label_dict[each_target]['classes']
dic = label_dict[each_target]['dictionary']
transformer = label_dict[each_target]['transformer']
class_nums = label_dict[each_target]['class_nums']
##### If there is a transformer, you must convert predictions to original classes
testm[each_target+'_predictions'] = pd.Series(y_pred).map(transformer).values
for each_class in classes:
if isinstance(each_class, str):
proba_col = each_target+'_proba_'+each_class
else:
proba_col = each_target+'_proba_'+str(each_class)
count = label_dict[each_target]['dictionary'][each_class]
testm[proba_col] = y_proba[:,count]
proba_cols.append(proba_col)
###### If Stacking_ Flag is False, then we do Ensembling #######
if not Stacking_Flag:
subm = pd.DataFrame()
#### This is for Ensembling Only #####
if len(classes) == 2:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
else:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
testm[new_col] = pd.Series(y_pred).map(transformer).values
else:
subm[new_col] = ensembles[:,each]
testm[new_col] = pd.Series(ensembles[:,each]).map(transformer).values
new_cols.append(new_col)
### After this, y_pred is a Series from now on. You need y_pred.values ####
if len(cols) == 5:
print(' Calculating weighted average ensemble of %d classifiers' %len(new_cols))
ensem_pred = np.round(subm[new_cols[-1]]*0.5+0.125*(subm[new_cols[0]]+subm[
new_cols[1]]+subm[new_cols[2]]+subm[new_cols[3]])).astype(int)
else:
print(' Calculating regular average ensemble of %d classifiers' %len(new_cols))
ensem_pred = (subm[new_cols].mean(axis=1)).astype(int)
print('########################################################')
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
print('Completed Ensemble predictions on held out data')
new_col = each_target+'_Ensembled_predictions'
else:
stack_cols, stacksfinal = QuickML_Stacking(X, y, X_test,
modeltype, Boosting_Flag,scoring_parameter,verbose)
new_col = each_target+'_Stacked_'+stack_cols[0].split("_")[0]+'_predictions'
ensem_pred = np.argmax(stacksfinal,axis=1)
print('########################################################')
print('Completed Stacked predictions on held out data')
testm[new_col] = | pd.Series(ensem_pred) | pandas.Series |
import plotly.graph_objects as go
import pandas as pd
import plotly.express as px
from datetime import datetime, timedelta
import requests
import json
import time
def read():
df1 = pd.read_csv("CSV/ETH_BTC_USD_2015-08-09_2020-04-04-CoinDesk.csv")
df1.columns = ['date', 'ETH', 'BTC']
df1.date = pd.to_datetime(df1.date, dayfirst=True)
df1.set_index('date', inplace=True)
EOS = | pd.read_csv("ICO_coins/EOS_USD_2018-06-06_2020-04-02-CoinDesk.csv") | pandas.read_csv |
import os
import time
import shutil
import numpy as np
import pandas as pd
from smac.configspace import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter, \
UniformFloatHyperparameter, UniformIntegerHyperparameter
from smac.scenario.scenario import Scenario
from smac.facade.smac_facade import SMAC
from .bayopt_base import BayoptBase
class SMACOPT(BayoptBase):
"""
Interface of SMAC (Bayesian Optimization).
Parameters
----------
:type para_space: dict or list of dictionaries
:param para_space: It has three types:
Continuous:
Specify `Type` as `continuous`, and include the keys of `Range` (a list with lower-upper elements pair) and
`Wrapper`, a callable function for wrapping the values.
Integer:
Specify `Type` as `integer`, and include the keys of `Mapping` (a list with all the sortted integer elements).
Categorical:
Specify `Type` as `categorical`, and include the keys of `Mapping` (a list with all the possible categories).
:type max_runs: int, optional, default=100
:param max_runs: The maximum number of trials to be evaluated. When this values is reached,
then the algorithm will stop.
:type estimator: estimator object
:param estimator: This is assumed to implement the scikit-learn estimator interface.
:type cv: cross-validation method, an sklearn object.
:param cv: e.g., `StratifiedKFold` and KFold` is used.
:type scoring: string, callable, list/tuple, dict or None, optional, default=None
:param scoring: A sklearn type scoring function.
If None, the estimator's default scorer (if available) is used. See the package `sklearn` for details.
:type refit: boolean, or string, optional, default=True
:param refit: It controls whether to refit an estimator using the best found parameters on the whole dataset.
:type random_state: int, optional, default=0
:param random_state: The random seed for optimization.
:type verbose: boolean, optional, default=False
:param verbose: It controls whether the searching history will be printed.
Examples
----------
>>> import numpy as np
>>> from sklearn import svm
>>> from sklearn import datasets
>>> from sequd import SMACOPT
>>> from sklearn.model_selection import KFold
>>> iris = datasets.load_iris()
>>> ParaSpace = {'C':{'Type': 'continuous', 'Range': [-6, 16], 'Wrapper': np.exp2},
'gamma': {'Type': 'continuous', 'Range': [-16, 6], 'Wrapper': np.exp2}}
>>> estimator = svm.SVC()
>>> cv = KFold(n_splits=5, random_state=0, shuffle=True)
>>> clf = SMACOPT(ParaSpace, max_runs=100,
estimator=estimator, cv=cv, scoring=None, refit=None, random_state=0, verbose=False)
>>> clf.fit(iris.data, iris.target)
Attributes
----------
:vartype best_score\_: float
:ivar best_score\_: The best average cv score among the evaluated trials.
:vartype best_params\_: dict
:ivar best_params\_: Parameters that reaches `best_score_`.
:vartype best_estimator\_: sklearn estimator
:ivar best_estimator\_: The estimator refitted based on the `best_params_`.
Not available if estimator = None or `refit=False`.
:vartype search_time_consumed\_: float
:ivar search_time_consumed\_: Seconds used for whole searching procedure.
:vartype refit_time\_: float
:ivar refit_time\_: Seconds used for refitting the best model on the whole dataset.
Not available if estimator=None or `refit=False`.
"""
def __init__(self, para_space, max_runs=100, estimator=None, cv=None,
scoring=None, refit=True, random_state=0, verbose=False):
super(SMACOPT, self).__init__(para_space, max_runs, verbose)
self.cv = cv
self.refit = refit
self.scoring = scoring
self.estimator = estimator
self.random_state = random_state
self.method = "SMAC"
self.cs = ConfigurationSpace()
for item, values in self.para_space.items():
if values['Type'] == "continuous":
para = UniformFloatHyperparameter(item, values['Range'][0], values['Range'][1])
elif values['Type'] == "integer":
para = UniformIntegerHyperparameter(item, min(values['Mapping']), max(values['Mapping']))
elif values['Type'] == "categorical":
para = CategoricalHyperparameter(item, values['Mapping'])
self.cs.add_hyperparameter(para)
def obj_func(self, cfg):
cfg = {k: cfg[k] for k in cfg}
next_params = pd.DataFrame(cfg, columns=self.para_names, index=[0])
parameters = {}
for item, values in self.para_space.items():
if (values['Type'] == "continuous"):
parameters[item] = values['Wrapper'](float(next_params[item].iloc[0]))
elif (values['Type'] == "integer"):
parameters[item] = int(next_params[item].iloc[0])
elif (values['Type'] == "categorical"):
parameters[item] = next_params[item].iloc[0]
score = self.wrapper_func(parameters)
logs_aug = parameters
logs_aug.update({"score": score})
logs_aug = | pd.DataFrame(logs_aug, index=[self.iteration]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import random
import pickle
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import torch
from torch.nn.utils.rnn import pad_sequence
from utils import _get_parcel, _get_behavioral
from cc_utils import _get_clip_labels
K_RUNS = 4
K_SEED = 330
def _get_clip_seq(df, subject_list, args):
'''
return:
X: input seq (batch_size x time x feat_size)
y: label seq (batch_size x time)
X_len: len of each seq (batch_size x 1)
batch_size <-> number of sequences
time <-> max length after padding
'''
features = [ii for ii in df.columns if 'feat' in ii]
X = []
y = []
for subject in subject_list:
for i_class in range(args.k_class):
if i_class==0: # split test-retest into 4
seqs = df[(df['Subject']==subject) &
(df['y'] == 0)][features].values
label_seqs = df[(df['Subject']==subject) &
(df['y'] == 0)]['y'].values
k_time = int(seqs.shape[0]/K_RUNS)
for i_run in range(K_RUNS):
seq = seqs[i_run*k_time:(i_run+1)*k_time, :]
label_seq = label_seqs[i_run*k_time:(i_run+1)*k_time]
if args.zscore:
# zscore each seq that goes into model
seq = (1/np.std(seq))*(seq - np.mean(seq))
X.append(torch.FloatTensor(seq))
y.append(torch.LongTensor(label_seq))
else:
seq = df[(df['Subject']==subject) &
(df['y'] == i_class)][features].values
label_seq = df[(df['Subject']==subject) &
(df['y'] == i_class)]['y'].values
if args.zscore:
# zscore each seq that goes into model
seq = (1/np.std(seq))*(seq - np.mean(seq))
X.append(torch.FloatTensor(seq))
y.append(torch.LongTensor(label_seq))
X_len = torch.LongTensor([len(seq) for seq in X])
# pad sequences
X = pad_sequence(X, batch_first=True, padding_value=0)
y = pad_sequence(y, batch_first=True, padding_value=-100)
return X.to(args.device), X_len.to(args.device), y.to(args.device)
def _clip_class_df(args):
'''
data for 15-way clip classification
args.roi: number of ROIs
args.net: number of subnetworks (7 or 17)
args.subnet: subnetwork; 'wb' if all subnetworks
args.invert_flag: all-but-one subnetwork
args.r_roi: number of random ROIs to pick
args.r_seed: random seed for picking ROIs
save each timepoint as feature vector
append class label based on clip
return:
pandas df
'''
load_path = (args.input_data + '/data_MOVIE_runs_%s' %(args.roi_name) +
'_%d_net_%d_ts.pkl' %(args.roi, args.net))
with open(load_path, 'rb') as f:
data = pickle.load(f)
# where are the clips within the run?
timing_file = pd.read_csv('data/videoclip_tr_lookup.csv')
'''
main
'''
clip_y = _get_clip_labels()
table = []
for run in range(K_RUNS):
print('loading run %d/%d' %(run+1, K_RUNS))
run_name = 'MOVIE%d' %(run+1) #MOVIEx_7T_yz
# timing file for run
timing_df = timing_file[
timing_file['run'].str.contains(run_name)]
timing_df = timing_df.reset_index(drop=True)
for subject in data:
# get subject data (time x roi x run)
vox_ts = data[subject][:, :, run]
for jj, clip in timing_df.iterrows():
start = int(np.floor(clip['start_tr']))
stop = int(np.ceil(clip['stop_tr']))
clip_length = stop - start
# assign label to clip
y = clip_y[clip['clip_name']]
for t in range(clip_length):
act = vox_ts[t + start, :]
t_data = {}
t_data['Subject'] = subject
t_data['timepoint'] = t
for feat in range(vox_ts.shape[1]):
t_data['feat_%d' %(feat)] = act[feat]
t_data['y'] = y
table.append(t_data)
df = | pd.DataFrame(table) | pandas.DataFrame |
"""
ETL Pipeline that takes any dataset and performs the following tasks:
* Combines the two given datasets
* Cleans the data
* Stores it in a SQLite database
"""
# import libraries
import sys
import pandas as pd
from settings import *
from sqlalchemy import create_engine
def retrieve_filename():
"""
Accesses the filename that is passed in as a command line
argument when this file is run.
"""
return sys.argv[1]
def load_data(filename):
"""Load dataset
Args:
filename: str
Contains location to data
Return:
df: Pandas DataFrame
"""
df = pd.read_csv(filename)
return df
def process_categorical_data(df):
"""Splits categories and converts categories to numbers
Args:
df: Pandas DataFrame
Return
categories: Pandas DataFrame
"""
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(pat = ';', expand = True)
# select the first row of the categories dataframe
row = categories.iloc[0]
# Use this row to extract a list of new column names for categories.
# one way is to apply a lambda function that takes everything
# up to the second to last character of each string with slicing
category_colnames = row.apply(lambda x: str(x).split('-')[0])
# rename the columns of `categories`
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].apply(lambda x: str(x).split('-')[1])
# convert column from string to numeric
categories[column] = | pd.to_numeric(categories[column]) | pandas.to_numeric |
import anemoi as an
import pandas as pd
import numpy as np
import scipy as sp
import statsmodels.api as sm
import scipy.odr.odrpack as odrpack
import warnings
def compare_sorted_df_columns(cols_1, cols_2):
return sorted(cols_1) == sorted(cols_2)
def valid_ws_correlation_data(data, ref_ws_col='ref', site_ws_col='site'):
'''Perform checks on wind speed correlation data.
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref_ws_col and site_ws_col
ref_ws_col: string, default 'ref'
Reference anemometer data column to use.
site_ws_col: string, default 'site'
Site anemometer data column to use.
'''
if ref_ws_col == site_ws_col:
raise ValueError("Error: Reference and site wind speed columns cannot have the same name.")
return False
if not compare_sorted_df_columns(data.columns.tolist(), [ref_ws_col, site_ws_col]):
raise ValueError("Error: the correlation data don't match the expected format.")
return False
if not data.shape[0] > 6:
warnings.warn("Warning: trying to correalate between less than six points.")
return False
if (data.loc[:,ref_ws_col] == data.loc[:,site_ws_col]).sum() == data.shape[0]:
warnings.warn("Warning: it seems you are trying to correalate a single mast against itself.")
return False
return True
def return_correlation_results_frame(ref_label='ref', site_label='site'):
results = pd.DataFrame(columns=['slope', 'offset' , 'R2', 'uncert', 'points'],
index=pd.MultiIndex.from_tuples([(ref_label, site_label)],
names=['ref', 'site'])
)
return results
def return_correlation_data_from_masts(ref_mast, site_mast):
'''Return a DataFrame of reference and site data for correlations.
Will be extracted from each MetMast object using the primary anemometers and wind vanes.
:Parameters:
ref_mast: MetMast
Anemoi MetMast object
site_mast: MetMast
Anemoi MetMast object
:Returns:
out: DataFrame with columns ref, site, and dir
'''
ref_data = ref_mast.return_primary_ano_vane_data()
ref_data.columns = ['ref', 'dir']
site_data = site_mast.return_primary_ano_vane_data()
site_data.columns = ['site', 'site_dir']
data = pd.concat([ref_data, site_data.site], axis=1).dropna()
data = data.loc[:, ['ref', 'site', 'dir']]
if not valid_ws_correlation_data(data=data, ref_ws_col='ref', site_ws_col='site'):
warning_string = "Warning: {} and {} don't seem to have valid concurrent data for a correlation.".format(ref_mast.name, site_mast.name)
warnings.warn(warning_string)
return data
### CORRELATION METHODS ###
def calculate_R2(data, ref_ws_col='ref', site_ws_col='site'):
'''Return a single R2 between two wind speed columns
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref_ws_col and site_ws_col
ref_ws_col: string, default 'ref'
Reference anemometer data column to use.
site_ws_col: string, default 'site'
Site anemometer data column to use.
'''
data = data.loc[:,[ref_ws_col, site_ws_col]].dropna()
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return np.nan
r2 = data[ref_ws_col].corr(data[site_ws_col])**2
return r2
def calculate_IEC_uncertainty(data, ref_ws_col='ref', site_ws_col='site'):
'''Calculate the IEC correlation uncertainty between two wind speed columns
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref_ws_col and site_ws_col
ref_ws_col: string, default 'ref'
Reference anemometer data column to use.
site_ws_col: string, default 'site'
Site anemometer data column to use.
'''
data = data.loc[:,[ref_ws_col, site_ws_col]].dropna()
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return np.nan
X = data.loc[:,ref_ws_col].values
Y = data.loc[:,site_ws_col].values
uncert = np.std(Y/X)*100/len(X)
return uncert*100.0
def calculate_EDF_uncertainty(data, ref_ws_col='ref', site_ws_col='site'):
'''Calculate the EDF estimated correaltion uncetianty between two wind speed columns.
Assumes a correalation forced through the origin
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref_ws_col and site_ws_col
ref_ws_col: string, default 'ref'
Reference anemometer data column to use.
site_ws_col: string, default 'site'
Site anemometer data column to use.
'''
data = data.loc[:,[ref_ws_col, site_ws_col]].dropna()
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return np.nan
X = data.loc[:,ref_ws_col].values
Y = data.loc[:,site_ws_col].values
Sxx = np.sum(X**2)
Syy = np.sum(Y**2)
Sxy = np.sum(X*Y)
B = 0.5*(Sxx - Syy)/Sxy
SU = -B + np.sqrt(B**2 + 1)
e2 = np.sum((Y - SU*X)**2)/(1 + SU**2)
Xsi2 = e2/(data.shape[0] - 1)
uncert = np.sqrt((Xsi2*SU**2)*(Sxx*Sxy**2 + 0.25*((Sxx - Syy)**2)*Sxx)/((B**2 + 1.0)*Sxy**4))
return uncert*100.0
def ws_correlation_least_squares_model(data, ref_ws_col='ref', site_ws_col='site', force_through_origin=False):
'''Calculate the slope and offset between two wind speed columns using ordinary least squares regression.
https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.lstsq.html
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:, [ref_ws_col, site_ws_col]].dropna()
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return results
points = data.shape[0]
R2 = calculate_R2(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
uncert = calculate_IEC_uncertainty(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
if force_through_origin:
data.loc[:,'offset'] = 0
else:
data.loc[:,'offset'] = 1
X = data.loc[:, [ref_ws_col,'offset']].values
Y = data.loc[:, site_ws_col].values
slope, offset = np.linalg.lstsq(X, Y)[0]
results.loc[pd.IndexSlice[ref_ws_col, site_ws_col],['slope', 'offset' , 'R2', 'uncert', 'points']] = np.array([slope, offset, R2, uncert, points])
return results
def f_with_offset(B, x):
return B[0]*x + B[1]
def f_without_offset(B, x):
return B[0]*x
def ws_correlation_orthoginal_distance_model(data, ref_ws_col='ref', site_ws_col='site', force_through_origin=False):
'''Calculate the slope and offset between two wind speed columns using orthoganal distance regression.
https://docs.scipy.org/doc/scipy-0.18.1/reference/odr.html
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:, [ref_ws_col, site_ws_col]].dropna().astype(np.float)
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return results
points = data.shape[0]
R2 = calculate_R2(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
uncert = calculate_IEC_uncertainty(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
X = data.loc[:, ref_ws_col].values
Y = data.loc[:, site_ws_col].values
data_mean = data.mean()
slope_estimate_via_ratio = data_mean[site_ws_col]/data_mean[ref_ws_col]
realdata = odrpack.RealData(X, Y)
if force_through_origin:
linear = odrpack.Model(f_without_offset)
odr = odrpack.ODR(realdata, linear, beta0=[slope_estimate_via_ratio])
slope = odr.run().beta[0]
offset = 0
else:
linear = odrpack.Model(f_with_offset)
odr = odrpack.ODR(realdata, linear, beta0=[slope_estimate_via_ratio, 0.0])
slope, offset = odr.run().beta[0], odr.run().beta[1]
results.loc[pd.IndexSlice[ref_ws_col, site_ws_col],['slope', 'offset' , 'R2', 'uncert', 'points']] = np.array([slope, offset, R2, uncert, points])
return results
def ws_correlation_robust_linear_model(data, ref_ws_col='ref', site_ws_col='site', force_through_origin=False):
'''Calculate the slope and offset between two wind speed columns using robust linear model.
http://www.statsmodels.org/dev/rlm.html
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:, [ref_ws_col, site_ws_col]].dropna().astype(np.float)
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return results
points = data.shape[0]
R2 = calculate_R2(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
uncert = calculate_IEC_uncertainty(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
X = data.loc[:, ref_ws_col].values
Y = data.loc[:, site_ws_col].values
if not force_through_origin:
X = sm.add_constant(X)
else:
X = [np.zeros(X.shape[0]), X]
X = np.column_stack(X)
mod = sm.RLM(Y, X)
resrlm = mod.fit()
offset, slope = resrlm.params
R2 = sm.WLS(mod.endog, mod.exog, weights=mod.fit().weights).fit().rsquared
results.loc[pd.IndexSlice[ref_ws_col, site_ws_col],['slope', 'offset' , 'R2', 'uncert', 'points']] = np.array([slope, offset, R2, uncert, points])
return results
def ws_correlation_method(data, ref_ws_col='ref', site_ws_col='site', method='ODR', force_through_origin=False):
'''Calculate the slope and offset, for a given correlation method, between two wind speed columns.
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
method: string, default 'ODR'
Correlation method to use.
* Orthoginal distance regression: 'ODR'
* Ordinary least squares: 'OLS'
* Robust linear models: 'RLM'
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
if method == 'ODR':
results = ws_correlation_orthoginal_distance_model(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col, force_through_origin=force_through_origin)
elif method == 'OLS':
results = ws_correlation_least_squares_model(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col, force_through_origin=force_through_origin)
elif method == 'RLM':
results = ws_correlation_robust_linear_model(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col, force_through_origin=force_through_origin)
return results
def ws_correlation_binned_by_direction(data, ref_ws_col='ref', site_ws_col='site', ref_dir_col='dir', dir_sectors=16, method='ODR', force_through_origin=False):
'''Calculate the slope and offset, binned by direction, between two wind speed columns.
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
ref_dir_col: string, default None (primary wind vane assumed)
Reference wind vane data to use. Extracted from MetMast.data
dir_sectors: int, default 16
Number of equally spaced direction sectors
method: string, default 'ODR'
Correlation method to use.
* Orthoginal distance regression: 'ODR'
* Ordinary least squares: 'OLS'
* Robust linear models: 'RLM'
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:,[ref_ws_col, site_ws_col, ref_dir_col]].dropna().astype(np.float)
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
dir_bins = np.arange(1,dir_sectors+1)
results = pd.concat([results]*dir_sectors, axis=0)
results.index = | pd.Index(dir_bins, name='dir_bin') | pandas.Index |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
result = df.groupby("A").first()["B"]
expected = Series(
Categorical([], categories=["test", "train"]),
index=Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_sort():
# https://stackoverflow.com/questions/23814368/sorting-pandas-
# categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = | Categorical(df["range"], ordered=True) | pandas.Categorical |
"""
Module for the import and manipulation of quantified targeted MS data sets.
"""
import copy
import os
import re
from datetime import datetime
import numpy
import pandas
import collections
import warnings
from .._toolboxPath import toolboxPath
from ._dataset import Dataset
from ..utilities import normalisation, rsd
from ..enumerations import VariableType, AssayRole, SampleType, QuantificationType, CalibrationMethod, AnalyticalPlatform
class TargetedDataset(Dataset):
"""
TargetedDataset(dataPath, fileType='TargetLynx', sop='Generic', \*\*kwargs)
:py:class:`~TargetedDataset` extends :py:class:`Dataset` to represent quantitative datasets, where compounds are already identified, the exactitude of the quantification can be established, units are known and calibration curve or internal standards are employed.
The :py:class:`~TargetedDataset` class include methods to apply limits of quantification (LLOQ and ULOQ), merge multiple analytical batch, and report accuracy and precision of each measurements.
In addition to the structure of :py:class:`~Dataset`, :py:class:`~TargetedDataset` requires the following attributes:
* :py:attr:`~TargetedDataset.expectedConcentration`:
A :math:`n` × :math:`m` pandas dataframe of expected concentrations (matching the :py:attr:`~Dataset.intensityData` dimension), with column names matching :py:attr:`~TargetedDataset.featureMetadata[‘Feature Name’]`
* :py:attr:`~TargetedDataset.calibration`:
A dictionary containing pandas dataframe describing calibration samples:
* :py:attr:`~TargetedDataset.calibration['calibIntensityData']`:
A :math:`r` x :math:`m` numpy matrix of measurements. Features must match features in :py:attr:`~TargetedDataset.intensityData`
* :py:attr:`~TargetedDataset.calibration['calibSampleMetadata']`:
A :math:`r` x :math:`m` pandas dataframe of calibration sample identifiers and metadata
* :py:attr:`~TargetedDataset.calibration['calibFeatureMetadata']`:
A :math:`m` × :math:`q` pandas dataframe of feature identifiers and metadata
* :py:attr:`~TargetedDataset.calibration['calibExpectedConcentration']`:
A :math:`r` × :math:`m` pandas dataframe of calibration samples expected concentrations
* :py:attr:`~TargetedDataset.Attributes` must contain the following (can be loaded from a method specific JSON on import):
* ``methodName``:
A (str) name of the method
* ``externalID``:
A list of external ID, each external ID must also be present in *Attributes* as a list of identifier (for that external ID) for each feature. For example, if ``externalID=['PubChem ID']``, ``Attributes['PubChem ID']=['ID1','ID2','','ID75']``
* :py:attr:`~TargetedDataset.featureMetadata` expects the following columns:
* ``quantificationType``:
A :py:class:`~nPYc.enumerations.QuantificationType` enum specifying the exactitude of the quantification procedure employed.
* ``calibrationMethod``:
A :py:class:`~nPYc.enumerations.CalibrationMethod` enum specifying the calibration method employed.
* ``Unit``:
A (str) unit corresponding the the feature measurement value.
* ``LLOQ``:
The lowest limit of quantification, used to filter concentrations < LLOQ
* ``ULOQ``:
The upper limit of quantification, used to filter concentrations > ULOQ
* externalID:
All externalIDs listed in :py:attr:`~TargetedDataset.Attributes['externalID']` must be present as their own column
Currently targeted assay results processed using **TargetLynx** or **Bruker quantification results** can be imported.
To create an import for any other form of semi-quantitative or quantitative results, the procedure is as follow:
* Create a new ``fileType == 'myMethod'`` entry in :py:meth:`~TargetedDataset.__init__`
* Define functions to populate all expected dataframes (using file readers, JSON,...)
* Separate calibration samples from study samples (store in :py:attr:`~TargetedDataset.calibration`). *If none exist, intialise empty dataframes with the correct number of columns and column names.*
* Execute pre-processing steps if required (note: all feature values should be expressed in the unit listed in :py:attr:`~TargetedDataset.featureMetadata['Unit']`)
* Apply limits of quantification using :py:meth:`~TargetedDataset._applyLimitsOfQuantification`. (This function does not apply limits of quantification to features marked as :py:class:`~nPYc.enumerations.QuantificationType` == QuantificationType.Monitored for compounds monitored for relative information.)
The resulting :py:class:`~TargetedDatset` created must satisfy to the criteria for *BasicTargetedDataset*, which can be checked with :py:meth:`~TargetedDataset.validatedObject` (list the minimum requirements for all class methods).
* ``fileType == 'TargetLynx'`` to import data processed using **TargetLynx**
TargetLynx import operates on ``xml`` files exported *via* the 'File -> Export -> XML' TargetLynx menu option. Import requires a ``calibration_report.csv`` providing lower and upper limits of quantification (LLOQ, ULOQ) with the ``calibrationReportPath`` keyword argument.
Targeted data measurements as well as calibration report information are read and mapped with pre-defined SOPs. All measurments are converted to pre-defined units and measurements inferior to the lowest limits of quantification or superior to the upper limits of quantification are replaced. Once the import is finished, only analysed samples are returned (no calibration samples) and only features mapped onto the pre-defined SOP and sufficiently described.
Instructions to created new ``TargetLynx`` SOP can be found on the :doc:`generation of targeted SOPs <configuration/targetedSOPs>` page.
Example: ``TargetedDataset(datapath, fileType='TargetLynx', sop='OxylipinMS', calibrationReportPath=calibrationReportPath, sampleTypeToProcess=['Study Sample','QC'], noiseFilled=False, onlyLLOQ=False, responseReference=None)``
* ``sop``
Currently implemented are `'OxylipinMS'` and `'AminoAcidMS'`
`AminoAcidMS`: Gray N. `et al`. Human Plasma and Serum via Precolumn Derivatization with 6‑Aminoquinolyl‑N‑hydroxysuccinimidyl Carbamate: Application to Acetaminophen-Induced Liver Failure. `Analytical Chemistry`, 2017, 89, 2478−87.
`OxylipinMS`: <NAME>. `et al.` Development and Validation of a High-Throughput Ultrahigh-Performance Liquid Chromatography-Mass Spectrometry Approach for Screening of Oxylipins and Their Precursors. `Analytical Chemistry`, 2015, 87 (23),11721–31
* ``calibrationReportPath``
Path to the calibration report `csv` following the provided report template.
The following columns are required (leave an empty value to reject a compound):
* Compound
The compound name, identical to the one employed in the SOP `json` file.
* TargetLynx ID
The compound TargetLynx ID, identical to the one employed in the SOP `json` file.
* LLOQ
Lowest limit of quantification concentration, in the same unit as indicated in TargetLynx.
* ULOQ
Upper limit of quantification concentration, in the same unit as indicated in TargetLynx.
The following columns are expected by :py:meth:`~TargetedDataset._targetLynxApplyLimitsOfQuantificationNoiseFilled`:
* Noise (area)
Area integrated in a blank sample at the same retention time as the compound of interest (if left empty noise concentration calculation cannot take place).
* a
:math:`a` coefficient in the calibration equation (if left empty noise concentration calculation cannot take place).
* b
:math:`b` coefficient in the calibration equation (if left empty noise concentration calculation cannot take place).
The following columns are recommended but not expected:
* Cpd Info
Additional information relating to the compound (can be left empty).
* r
:math:`r` goodness of fit measure for the calibration equation (can be left empty).
* r2
:math:`r^2` goodness of fit measure for the calibration equation (can be left empty).
* ``sampleTypeToProcess``
List of *['Study Sample','Blank','QC','Other']* for the sample types to process as defined in MassLynx. Only samples in 'sampleTypeToProcess' are returned. Calibrants should not be processed and are not returned. Most uses should only require `'Study Sample'` as quality controls are identified based on sample names by subsequent functions. `Default value is '['Study Sample','QC']'`.
* ``noiseFilled``
If True values <LLOQ will be replaced by a concentration equivalent to the noise level in a blank. If False <LLOQ is replaced by :math:`-inf`. `Default value is 'False'`
* ``onlyLLOQ``
If True only correct <LLOQ, if False correct <LLOQ and >ULOQ. `Default value is 'False'`.
* ``responseReference``
If noiseFilled=True the noise concentration needs to be calculated. Provide the 'Sample File Name' of a reference sample to use in order to establish the response to use, or list of samples to use (one per feature). If None, the middle of the calibration will be employed. `Default value is 'None'`.
* ``keepPeakInfo``
If keepPeakInfo=True (default `False`) adds the :py:attr:`peakInfo` dictionary to the :py:class:`~TargetedDataset.calibration`. :py:attr:`peakInfo` contains the `peakResponse`, `peakArea`, `peakConcentrationDeviation`, `peakIntegrationFlag` and `peakRT`.
* ``keepExcluded``
If keepExcluded=True (default `False`), import exclusions (:py:attr:`excludedImportSampleMetadata`, :py:attr:`excludedImportFeatureMetadata`, :py:attr:`excludedImportIntensityData` and :py:attr:`excludedImportExpectedConcentration`) are kept in the object.
* ``keepIS``
If keepIS=True (default `False`), features marked as Internal Standards (IS) are retained.
* ``fileType = 'Bruker Quantification'`` to import Bruker quantification results
* ``nmrRawDataPath``
Path to the parent folder where all result files are stored. All subfolders will be parsed and the ``.xml`` results files matching the ``fileNamePattern`` imported.
* ``fileNamePattern``
Regex to recognise the result data xml files
* ``pdata``
To select the right pdata folders (default 1)
Two form of Bruker quantification results are supported and selected using the ``sop`` option: *BrukerQuant-UR* and *Bruker BI-LISA*
* ``sop = 'BrukerQuant-UR'``
Example: ``TargetedDataset(nmrRawDataPath, fileType='Bruker Quantification', sop='BrukerQuant-UR', fileNamePattern='.*?urine_quant_report_b\.xml$', unit='mmol/mol Crea')``
* ``unit``
If features are duplicated with different units, ``unit`` limits the import to features matching said unit. (In case of duplication and no ``unit``, all available units will be listed)
* ``sop = ''BrukerBI-LISA'``
Example: ``TargetedDataset(nmrRawDataPath, fileType='Bruker Quantification', sop='BrukerBI-LISA', fileNamePattern='.*?results\.xml$')``
"""
def __init__(self, datapath, fileType='TargetLynx', sop='Generic', **kwargs):
"""
Initialisation and pre-processing of input data (load files and match data and calibration and SOP, apply limits of quantification).
"""
super().__init__(sop=sop, **kwargs)
self.filePath, fileName = os.path.split(datapath)
self.fileName, fileExtension = os.path.splitext(fileName)
self.name = self.fileName
# Load files and match data, calibration report and SOP, then Apply the limits of quantification
if fileType == 'TargetLynx':
# Read files, filter calibration samples, filter IS, applyLLOQ, clean object
self._loadTargetLynxDataset(datapath, **kwargs)
# Finalise object
self.VariableType = VariableType.Discrete
self.AnalyticalPlatform = AnalyticalPlatform.MS
self.initialiseMasks()
elif fileType == 'Bruker Quantification':
# Read files, clean object
self._loadBrukerXMLDataset(datapath, **kwargs)
# Finalise object
self.VariableType = VariableType.Discrete
self.AnalyticalPlatform = AnalyticalPlatform.NMR
self.initialiseMasks()
elif fileType == 'empty':
# Build empty object for testing
pass
else:
raise NotImplementedError
# Check the final object is valid and log
if fileType != 'empty':
validDataset = self.validateObject(verbose=False, raiseError=False, raiseWarning=False)
if not validDataset['BasicTargetedDataset']:
raise ValueError('Import Error: The imported dataset does not satisfy to the Basic TargetedDataset definition')
self.Attributes['Log'].append([datetime.now(),
'%s instance initiated, with %d samples, %d features, from %s'
% (self.__class__.__name__, self.noSamples, self.noFeatures, datapath)])
# Check later
if 'Metadata Available' not in self.sampleMetadata:
self.sampleMetadata['Metadata Available'] = False
@property
def rsdSP(self):
"""
Returns percentage :term:`relative standard deviations<RSD>` for each feature in the dataset, calculated on samples with the Assay Role :py:attr:`~nPYc.enumerations.AssayRole.PrecisionReference` and Sample Type :py:attr:`~nPYc.enumerations.SampleType.StudyPool` in :py:attr:`~Dataset.sampleMetadata`.
Implemented as a back-up to :py:Meth:`accuracyPrecision` when no expected concentrations are known
:return: Vector of feature RSDs
:rtype: numpy.ndarray
"""
# Check we have Study Reference samples defined
if not ('AssayRole' in self.sampleMetadata.keys() or 'SampleType' in self.sampleMetadata.keys()):
raise ValueError('Assay Roles and Sample Types must be defined to calculate RSDs.')
if not sum(self.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference) > 1:
raise ValueError('More than one precision reference is required to calculate RSDs.')
mask = numpy.logical_and(self.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference,
self.sampleMetadata['SampleType'].values == SampleType.StudyPool)
return rsd(self._intensityData[mask & self.sampleMask])
@property
def rsdSS(self):
"""
Returns percentage :term:`relative standard deviations<RSD>` for each feature in the dataset, calculated on samples with the Assay Role :py:attr:`~nPYc.enumerations.AssayRole.Assay` and Sample Type :py:attr:`~nPYc.enumerations.SampleType.StudySample` in :py:attr:`~Dataset.sampleMetadata`.
:return: Vector of feature RSDs
:rtype: numpy.ndarray
"""
# Check we have Study Reference samples defined
if not ('AssayRole' in self.sampleMetadata.keys() or 'SampleType' in self.sampleMetadata.keys()):
raise ValueError('Assay Roles and Sample Types must be defined to calculate RSDs.')
if not sum(self.sampleMetadata['AssayRole'].values == AssayRole.Assay) > 1:
raise ValueError('More than one assay sample is required to calculate RSDs.')
mask = numpy.logical_and(self.sampleMetadata['AssayRole'].values == AssayRole.Assay,
self.sampleMetadata['SampleType'].values == SampleType.StudySample)
return rsd(self._intensityData[mask & self.sampleMask])
def _loadTargetLynxDataset(self, datapath, calibrationReportPath, keepIS=False, noiseFilled=False, keepPeakInfo=False, keepExcluded=False, **kwargs):
"""
Initialise object from peak-picked and calibrated TargetLynx data. Filter calibration samples, filter IS.
Targeted data measurements as well as calibration report information are read and mapped with pre-defined SOPs. All units are converted to pre-defined units and measurements inferior to the lowest limits of quantification or superior to the upper limits of quantification are replaced. Once the import is finished, only analysed samples are returned (no calibration samples) and only features mapped onto the pre-defined SOP and sufficiently described.
* TargetLynx
TargetLynx import operates on xml files exported *via* the 'File -> Export -> XML' menu option. Import requires a calibration_report.csv providing lower and upper limits of quantification (LLOQ, ULOQ) with the ``calibrationReportPath`` keyword argument.
Example: ``TargetedDataset(datapath, fileType='TargetLynx', sop='OxylipinMS', calibrationReportPath=calibrationReportPath, sampleTypeToProcess=['Study Sample','QC'], noiseFilled=False, onlyLLOQ=False, responseReference=None)``
* ``datapath``
Path to the TargetLynx exported `xml` file.
* ``calibrationReportPath``
Path to the calibration report `csv` following the provided report template (leave an empty value in the predefined columns to reject a compound).
* ``sampleTypeToProcess``
List of ['Study Sample','Blank','QC','Other'] for the sample types to process as defined in MassLynx. Only samples in 'sampleTypeToProcess' are returned. Calibrants should not be processed and are not returned. Most uses should only require `'Study Sample'` as quality controls are identified based on sample names by subsequent functions. `Default value is '['Study Sample','QC']'`.
* ``noiseFilled``
If True values <LLOQ will be replaced by a concentration equivalent to the noise level in a blank. If False <LLOQ is replaced by :math:`-inf`. `Default value is 'False'`
* ``onlyLLOQ``
If True only correct <LLOQ, if False correct <LLOQ and >ULOQ. `Default value is 'False'`.
* ``responseReference``
If noiseFilled=True the noise concentration needs to be calculated. Provide the 'Sample File Name' of a reference sample to use in order to establish the response to use, or list of samples to use (one per feature). If None, the middle of the calibration will be employed. `Default value is 'None'`.
* ``keepIS
If keepIS=True (default `False`), features marked as Internal Standards (IS) are retained.
* ``keepPeakInfo``
If keepPeakInfo=True (default `False`) adds the :py:attr:`peakInfo` dictionary to the :py:class:`TargetedDataset` and py:attr:`calibration`. :py:attr:`peakInfo` contains the `peakResponse`, `peakArea`, `peakConcentrationDeviation`, `peakIntegrationFlag` and `peakRT`.
* ``keepExcluded``
If keepExcluded=True (default `False`), import exclusions (:py:attr:`excludedImportSampleMetadata`, :py:attr:`excludedImportFeatureMetadata`, :py:attr:`excludedImportIntensityData` and :py:attr:`excludedImportExpectedConcentration`) are kept in the object.
:param datapath: Path to the TargetLynx exported xml file
:type datapath: str
:param calibrationReportPath: Path to the calibration report csv file
:type calibrationReportPath: str
:param keepIS: If keepIS=True (default `False`), features marked as Internal Standards (IS) are retained.
:type keepIS: bool
:param noiseFilled: If noiseFilled=True (default `False`), values <LLOQ are replaced by the noise concentration
:type noiseFilled: bool
:param peakInfo: If keepExcluded=True (default `False`), import exclusions (:py:attr:`excludedImportSampleMetadata`, :py:attr:`excludedImportFeatureMetadata`, :py:attr:`excludedImportIntensityData` and :py:attr:`excludedImportExpectedConcentration`) are kept in the object.
:type peakInfo: bool
:param keepExcluded: If keepExcluded=True (default `False`), import exclusions (:py:attr:`excludedImportSampleMetadata`, :py:attr:`excludedImportFeatureMetadata`, :py:attr:`excludedImportIntensityData` and :py:attr:`excludedImportExpectedConcentration`) are kept in the object.
:type keepExcluded: bool
:param kwargs: Additional parameters such as `sampleTypeToProcess`, `onlyLLOQ` or `reponseReference`
:return: None
"""
# Load TargetLynx output file
self._readTargetLynxDataset(datapath, calibrationReportPath, **kwargs)
# Filter calibration samples
self._filterTargetLynxSamples(**kwargs)
# Filter IS features (default remove them)
if keepIS:
print('IS features are kept for processing:', sum(self.featureMetadata['IS'].values), 'IS features,', sum(~self.featureMetadata['IS'].values), 'other features.')
print('-----')
self.Attributes['Log'].append([datetime.now(), 'IS features kept for processing (%d samples). %d IS, %d other features.' % (self.noSamples, sum(self.featureMetadata['IS'].values), sum(~self.featureMetadata['IS'].values))])
else:
self._filterTargetLynxIS(**kwargs)
# Apply limits of quantification
if noiseFilled:
self._targetLynxApplyLimitsOfQuantificationNoiseFilled(**kwargs)
else:
self._applyLimitsOfQuantification(**kwargs)
# Remove peakInfo (default remove)
if keepPeakInfo:
self.Attributes['Log'].append([datetime.now(), 'TargetLynx peakInfo kept.'])
else:
delattr(self, 'peakInfo')
del self.calibration['calibPeakInfo']
# Remove import exclusions as they are not useful after import
if keepExcluded:
self.Attributes['Log'].append([datetime.now(), 'Features and Samples excluded during import have been kept.'])
else:
delattr(self, 'sampleMetadataExcluded')
delattr(self, 'featureMetadataExcluded')
delattr(self, 'intensityDataExcluded')
delattr(self, 'expectedConcentrationExcluded')
delattr(self, 'excludedFlag')
# clear **kwargs that have been copied to Attributes
for i in list(kwargs.keys()):
try:
del self.Attributes[i]
except:
pass
for j in ['keepIS','noiseFilled','keepPeakInfo','keepExcluded']:
try:
del self.Attributes[j]
except:
pass
def _readTargetLynxDataset(self, datapath, calibrationReportPath, **kwargs):
"""
Parse a TargetLynx output file (`xml`; sample metadata, feature metadata, intensity, peak area and peak response) and the matching calibration report (`csv`; limits of quantification, noise area, calibration equation parameters), then check their agreement before returning a sufficiently described dataset.
Sets :py:attr:`sampleMetadata`, :py:attr:`featureMetadata`, :py:attr:`intensityData`, :py:attr:`expectedConcentration`, :py:attr:`excludedImportSampleMetadata`, :py:attr:`excludedImportFeatureMetadata`, :py:attr:`excludedImportIntensityData` and :py:attr:`peakInfo`
:param datapath: Path to the TargetLynx export xml file
:type datapath: str
:param calibrationReportPath: Path to the calibration report csv file
:type calibrationReportPath: str
:return: None
"""
# Read XML (dumb, no checks, no metadata alteration)
sampleMetadata, featureMetadata, intensityData, expectedConcentration, peakResponse, peakArea, peakConcentrationDeviation, peakIntegrationFlag, peakRT = self.__getDatasetFromXML(datapath)
# Read calibration information from .csv (dumb, no metadata alteration, only checks for required columns)
calibReport = self.__getCalibrationFromReport(calibrationReportPath)
# Match XML, Calibration Report & SOP
sampleMetadata, featureMetadata, intensityData, expectedConcentration, excludedImportSampleMetadata, excludedImportFeatureMetadata, excludedImportIntensityData, excludedImportExpectedConcentration, excludedImportFlag, peakResponse, peakArea, peakConcentrationDeviation, peakIntegrationFlag, peakRT = self.__matchDatasetToCalibrationReport(sampleMetadata, featureMetadata, intensityData, expectedConcentration, peakResponse, peakArea, peakConcentrationDeviation, peakIntegrationFlag, peakRT, calibReport)
self.sampleMetadata = sampleMetadata
self.featureMetadata = featureMetadata
self._intensityData = intensityData
self.expectedConcentration = expectedConcentration
self.sampleMetadataExcluded = excludedImportSampleMetadata
self.featureMetadataExcluded = excludedImportFeatureMetadata
self.intensityDataExcluded = excludedImportIntensityData
self.expectedConcentrationExcluded = excludedImportExpectedConcentration
self.excludedFlag = excludedImportFlag
self.peakInfo = {'peakResponse': peakResponse, 'peakArea': peakArea, 'peakConcentrationDeviation': peakConcentrationDeviation, 'peakIntegrationFlag': peakIntegrationFlag, 'peakRT': peakRT}
# add Dataset mandatory columns
self.sampleMetadata['AssayRole'] = numpy.nan
self.sampleMetadata['SampleType'] = numpy.nan
self.sampleMetadata['Dilution'] = numpy.nan
self.sampleMetadata['Correction Batch'] = numpy.nan
self.sampleMetadata['Sample ID'] = numpy.nan
self.sampleMetadata['Exclusion Details'] = numpy.nan
#self.sampleMetadata['Batch'] = numpy.nan #already created
# clear SOP parameters not needed after __matchDatasetToCalibrationReport
AttributesToRemove = ['compoundID', 'compoundName', 'IS', 'unitFinal', 'unitCorrectionFactor', 'calibrationMethod', 'calibrationEquation', 'quantificationType']
AttributesToRemove.extend(self.Attributes['externalID'])
for k in AttributesToRemove:
del self.Attributes[k]
self.Attributes['Log'].append([datetime.now(),'TargetLynx data file with %d samples, %d features, loaded from \%s, calibration report read from \%s\'' % (self.noSamples, self.noFeatures, datapath, calibrationReportPath)])
def __getDatasetFromXML(self, path):
"""
Parse information for :py:attr:`sampleMetadata`, :py:attr:`featureMetadata`, :py:attr:`intensityData`, :py:attr:`expectedConcentration`, :py:attr:`peakResponse`, :py:attr:`peakArea`, :py:attr:`peakConcentrationDeviation`, :py:attr:`peakIntegrationFlag` and :py:attr:`peakRT` from a xml export file produced by TargetLynx (using the 'File -> Export -> XML' menu option)
:param path: Path to the TargetLynx export xml file
:type path: str
:return sampleMetadata: dataframe of sample identifiers and metadata.
:rtype: pandas.DataFrame, :math:`n` × :math:`p`
:return featureMetadata: pandas dataframe of feature identifiers and metadata.
:rtype: pandas.DataFrame, :math:`m` × :math:`q`
:return intensityData: numpy matrix of intensity measurements.
:rtype: numpy.ndarray, :math:`n` × :math:`m`
:return expectedConcentration: pandas dataframe of expected concentration for each sample/feature
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return peakResponse: pandas dataframe of analytical peak response.
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return peakArea: pandas dataframe of analytical peak area.
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return peakConcentrationDeviation: pandas dataframe of %deviation between expected and measured concentration for each sample/feature
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return peakIntegrationFlag: pandas dataframe of integration flag
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return peakRT: pandas dataframe of analytical peak Retention time.
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
"""
import xml.etree.ElementTree
inputData = xml.etree.ElementTree.ElementTree(file=path).getroot()[2][0]
nSamples = int(inputData[1].attrib['count'])
nFeatures = int(inputData[2].attrib['count'])
## Initialise
# sample metadata
sample_file_name = list()
sample_id = list()
sample_number = list()
sample_text = list()
sample_type = list()
sample_date = list()
sample_time = list()
sample_vial = list()
sample_instrument = list()
# feature metadata
compound_name = list()
compound_id = list()
compound_IS_id = list()
# intensity data
peak_conc = numpy.full([nSamples, nFeatures], numpy.nan)
# expected concentration
peak_expconc = numpy.full([nSamples, nFeatures], numpy.nan)
# Bonus peak info
peak_concdev = numpy.full([nSamples, nFeatures], numpy.nan)
peak_area = numpy.full([nSamples, nFeatures], numpy.nan)
peak_response = numpy.full([nSamples, nFeatures], numpy.nan)
peak_RT = numpy.full([nSamples, nFeatures], numpy.nan)
peak_integrationFlag = pandas.DataFrame(index=range(1, nSamples + 1), columns=range(1, nFeatures + 1), dtype='str')
## Read data
# sample metadata & intensity data
# iterate over samples
for i_spl in range(0, nSamples):
spl = inputData[1][i_spl]
# sample metadata
sample_file_name.append(spl.attrib['name'])
sample_id.append(int(spl.attrib['id']))
sample_number.append(int(spl.attrib['samplenumber']))
sample_text.append(spl.attrib['sampleid'])
sample_type.append(spl.attrib['type'])
sample_date.append(spl.attrib['createdate'])
sample_time.append(spl.attrib['createtime'])
sample_vial.append(spl.attrib['vial'])
sample_instrument.append(spl.attrib['instrument'])
# iterate over compounds
for i_cpd in range(0, nFeatures):
cpdData = spl[i_cpd][0]
# intensity data
# for whatever reason, TargetLynx sometimes report no peak by '0.0000' and sometimes by ''
try:
peak_conc[i_spl, i_cpd] = float(cpdData.attrib['analconc'])
except ValueError:
peak_conc[i_spl, i_cpd] = 0.0
# more peak info
peak_area[i_spl, i_cpd] = float(cpdData.attrib['area'])
peak_expconc[i_spl, i_cpd] = float(spl[i_cpd].attrib['stdconc'])
peak_concdev[i_spl, i_cpd] = float(cpdData.attrib['conccalc'])
peak_response[i_spl, i_cpd] = float(cpdData.attrib['response'])
peak_RT[i_spl, i_cpd] = float(cpdData.attrib['foundrt'])
peak_integrationFlag.iloc[i_spl, i_cpd] = cpdData.attrib['pkflags']
# feature metadata
for j_cpd in range(0, nFeatures):
cpd_calib = inputData[2][j_cpd]
compound_name.append(cpd_calib.attrib['name'])
compound_id.append(int(cpd_calib.attrib['id']))
compound_IS_id.append(cpd_calib[0].attrib['ref']) # not int() as some IS have ref=''
## Output Dataframe
# sampleMetadata
sampleMetadata = dict()
sampleMetadata['Sample File Name'] = sample_file_name
sampleMetadata['Sample Base Name'] = sample_file_name
sampleMetadata['TargetLynx Sample ID'] = sample_id
sampleMetadata['MassLynx Row ID'] = sample_number
sampleMetadata['Sample Name'] = sample_text
sampleMetadata['Sample Type'] = sample_type
sampleMetadata['Acqu Date'] = sample_date
sampleMetadata['Acqu Time'] = sample_time
sampleMetadata['Vial'] = sample_vial
sampleMetadata['Instrument'] = sample_instrument
# featureMetadata
featureMetadata = dict()
featureMetadata['Feature Name'] = compound_name
featureMetadata['TargetLynx Feature ID'] = compound_id
featureMetadata['TargetLynx IS ID'] = compound_IS_id
# intensityData
intensityData = peak_conc
# expectedConcentration
peak_expconc[peak_expconc == 0] = numpy.nan # remove 0 and replace them by nan
expectedConcentration = pandas.DataFrame(peak_expconc)
# Other peak info
peakResponse = pandas.DataFrame(peak_response)
peakArea = pandas.DataFrame(peak_area)
peakConcentrationDeviation = pandas.DataFrame(peak_concdev)
peakIntegrationFlag = peak_integrationFlag # already dataframe
peakIntegrationFlag.reset_index(drop=True, inplace=True)
peakRT = pandas.DataFrame(peak_RT)
# Convert to DataFrames
featureMetadata = pandas.concat([pandas.DataFrame(featureMetadata[c], columns=[c]) for c in featureMetadata.keys()], axis=1, sort=False)
sampleMetadata = pandas.concat([pandas.DataFrame(sampleMetadata[c], columns=[c]) for c in sampleMetadata.keys()], axis=1, sort=False)
expectedConcentration.columns = featureMetadata['Feature Name'].values.tolist()
peakIntegrationFlag.columns = featureMetadata['Feature Name'].values.tolist()
peakResponse.columns = featureMetadata['Feature Name'].values.tolist()
peakArea.columns = featureMetadata['Feature Name'].values.tolist()
peakConcentrationDeviation.columns = featureMetadata['Feature Name'].values.tolist()
peakRT.columns = featureMetadata['Feature Name'].values.tolist()
sampleMetadata['Metadata Available'] = False
return sampleMetadata, featureMetadata, intensityData, expectedConcentration, peakResponse, peakArea, peakConcentrationDeviation, peakIntegrationFlag, peakRT
def __getCalibrationFromReport(self, path):
"""
Read the calibration information from a calibration report `csv` following the provided report template.
The following columns are required (leave an empty value to reject a compound):
* Compound
The compound name, identical to the one employed in the SOP `json` file.
* TargetLynx ID
The compound TargetLynx ID, identical to the one employed in the SOP `json` file.
* LLOQ
Lowest limit of quantification concentration, in the same unit as indicated in TargetLynx.
* ULOQ
Upper limit of quantification concentration, in the same unit as indicated in TargetLynx.
The following columns are expected by :py:meth:`~TargetedDataset._targetLynxApplyLimitsOfQuantificationNoiseFilled`:
* Noise (area)
Area integrated in a blank sample at the same retention time as the compound of interest (if left empty noise concentration calculation cannot take place).
* a
:math:`a` coefficient in the calibration equation (if left empty noise concentration calculation cannot take place).
* b
:math:`b` coefficient in the calibration equation (if left empty noise concentration calculation cannot take place).
The following columns are recommended:
* Cpd Info
Additional information relating to the compound (can be left empty).
* r
:math:`r` goodness of fit measure for the calibration equation (can be left empty).
* r2
:math:`r^2` goodness of fit measure for the calibration equation (can be left empty).
:param path: Path to the calibration report csv file.
:type path: str
:return calibReport: pandas dataframe of feature identifiers and calibration information.
:rtype: pandas.DataFrame, :math:`m` × :math:`r`
:raises LookupError: if the expected columns are absent from the csv file.
"""
calibReport = pandas.read_csv(path)
# check minimum number of columns
expectedCol = ['Compound', 'TargetLynx ID', 'LLOQ', 'ULOQ']
foundCol = calibReport.columns.values.tolist()
# if the set is not empty, some columns are missing from the csv
if set(expectedCol) - set(foundCol) != set():
raise LookupError('Calibration report (' + os.path.split(path)[1] + ') does not contain the following expected column: ' + str(list(set(expectedCol) - set(foundCol))))
return calibReport
def __matchDatasetToCalibrationReport(self, sampleMetadata, featureMetadata, intensityData, expectedConcentration, peakResponse, peakArea, peakConcentrationDeviation, peakIntegrationFlag, peakRT, calibReport):
"""
Check the agreement of Feature IDs and Feature Names across all inputs (TargetLynx export `xml`, calibration report `csv` and SOP `json`).
First map the calibration report and SOP information, which raise errors in case of disagreement.
This block is then mapped to the TargetLynx `featureMetadata` (on compound ID) and overrides the TargetLynx information (raise warnings).
Features not matched are appended to an `excludedSampleMetadata`, `excludedFeatureMetadata` and `excludedIntensityData` (excluded `peakResponse`, `peakArea`, `peakConcentrationDeviation`, `peakIntegrationFlag` and `peakRT` are discarded).
Additional information is added to the `sampleMetadata` (chromatography, ionisation, acquired time, run order).
Apply the unitCorrectionFactor to the `intensityData`, `LLOQ` and `ULOQ` concentrations and `expectedConcentration`.
:param sampleMetadata: dataframe of sample identifiers and metadata.
:type sampleMetadata: pandas.DataFrame, :math:`n` × :math:`p`
:param featureMetadata: pandas dataframe of feature identifiers and metadata.
:type featureMetadata: pandas.DataFrame, :math:`m` × :math:`q`
:param intensityData: numpy matrix of intensity measurements.
:type intensityData: numpy.ndarray, :math:`n` × :math:`m`
:param expectedConcentration: pandas dataframe of analytical peak expected concentrations.
:type expectedConcentration: pandas.DataFrame, :math:`n` × :math:`m`
:param peakResponse: pandas dataframe of analytical peak response.
:type peakResponse: pandas.DataFrame, :math:`n` × :math:`m`
:param peakArea: pandas dataframe of analytical peak area.
:type peakArea: pandas.DataFrame, :math:`n` × :math:`m`
:param peakConcentrationDeviation: pandas dataframe of analytical peak concentration deviation.
:type peakConcentrationDeviation: pandas.DataFrame, :math:`n` × :math:`m`
:param peakIntegrationFlag: pandas dataFrame of analytical peak integration flags.
:type peakIntegrationFlag: pandas.DataFrame, :math:`n` × :math:`m`
:param peakRT: pandas dataframe of analytical Retention time.
:type peakRT: pandas.DataFrame, :math:`n` × :math:`m`
:param calibReport: pandas dataframe of feature identifiers and calibration informations.
:type calibReport: pandas.DataFrame, :math:`m` × :math:`r`
:return sampleMetadata: dataframe of sample identifiers and metadata.
:rtype: pandas.DataFrame, :math:`n` × :math:`p`
:return finalFeatureMetadata: pandas dataframe of feature identifiers and metadata.
:rtype: pandas.DataFrame, :math:`m` × :math:`q`
:return finalIntensityData: numpy matrix of intensity measurements.
:rtype: numpy.ndarray, :math:`n` × :math:`m`
:return finalExpectedConcentration: pandas dataframe of expected concentration for each sample/feature
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return excludedSampleMetadata: list of pandas dataframe of excluded sample measurements for excluded features.
:rtype: list
:return excludedFeatureMetadata: list of pandas dataframe of excluded feature identifiers and metadata.
:rtype: list
:return excludedIntensityData: list of matrix of intensity measurements for excluded features.
:rtype: list
:return excludedExpectedConcentration: list of pandas dataframe of excluded expected concentration.
:rtype: list
:return excludedFlag: list of str of exclusion type ('Samples' or 'Features').
:rtype: list
:return finalPeakResponse: pandas dataframe of analytical peak response.
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return finalPeakArea: pandas dataframe of analytical peak area.
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return finalPeakConcentrationDeviation: pandas dataframe of %deviation between expected and measured concentration for each sample/feature
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return finalPeakIntegrationFlag: pandas dataframe of integration flag
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return finalPeakRT: pandas dataframe of analytical peak Retention time
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:raises ValueError: if the shape of sampleMetadata, featureMetadata or intensityData shape do not match.
:raises ValueError: if features in the calibration report and in the SOP differ (number of compounds, compound ID or compound names).
:raises ValueError: if in the SOP 'quantificationType', 'calibrationMethod' or 'IS' are mismatched.
"""
import warnings
from datetime import datetime
## sampleMetadata, featureMetadata & intensityData should by construction have the same size
if sampleMetadata.shape[0] != intensityData.shape[0]:
raise ValueError('sampleMetadata and intensityData number of samples differ')
if featureMetadata.shape[0] != intensityData.shape[1]:
raise ValueError('featureMetadata and intensityData number of compounds differ')
if intensityData.shape != peakResponse.shape:
raise ValueError('intensityData and peakResponse number of compounds/samples differ')
if intensityData.shape != peakArea.shape:
raise ValueError('intensityData and peakArea number of compounds/samples differ')
if intensityData.shape != expectedConcentration.shape:
raise ValueError('intensityData and expectedConcentration number of compounds/samples differ')
if intensityData.shape != peakConcentrationDeviation.shape:
raise ValueError('intensityData and peakConcentrationDeviation number of compounds/samples differ')
if intensityData.shape != peakIntegrationFlag.shape:
raise ValueError('intensityData and peakIntegrationFlag number of compounds/samples differ')
if intensityData.shape != peakRT.shape:
raise ValueError('intensityData and peakRT number of compounds/samples differ')
# initialise excluded import data
excludedSampleMetadata = []
excludedFeatureMetadata = []
excludedIntensityData = []
excludedExpectedConcentration = []
excludedFlag = []
## SOP is used as 'Truth', if calibReport does not match, it's a problem (Error)
## Then if featureMetadata does not match SOP/calibReport, use SOP as reference (message conflict)
## Match SOP & calibReport
# Load SOP
# calibrationMethod is 'backcalculatedIS' (use response), 'noIS' (use area), or 'noCalibration' (no corrections at all)
# quantificationType is:
# 'IS' (expects calibrationMethod=noIS)
# 'QuantOwnLabeledAnalogue' (would expect 'backcalculatedIS' but could use 'noIS')
# 'QuantAltLabeledAnalogue' (would expect 'backcalculatedIS' but could use 'noIS')
# 'Monitored' (which expects 'noCalibration')
SOPColumnsToLoad = ['compoundID', 'compoundName', 'IS', 'unitFinal', 'unitCorrectionFactor', 'calibrationMethod', 'calibrationEquation', 'quantificationType']
SOPColumnsToLoad.extend(self.Attributes['externalID'])
SOPFeatureMetadata = pandas.DataFrame.from_dict(dict((k, self.Attributes[k]) for k in SOPColumnsToLoad), orient='columns')
SOPFeatureMetadata['compoundID'] = pandas.to_numeric(SOPFeatureMetadata['compoundID'])
SOPFeatureMetadata['unitCorrectionFactor'] = pandas.to_numeric(SOPFeatureMetadata['unitCorrectionFactor'])
SOPFeatureMetadata['IS'] = SOPFeatureMetadata['IS'].map({'True': True, 'False': False})
SOPFeatureMetadata['Unit'] = SOPFeatureMetadata['unitFinal']
SOPFeatureMetadata.drop('unitFinal', inplace=True, axis=1)
# convert quantificationType from str to enum
if 'quantificationType' in SOPFeatureMetadata.columns:
for qType in QuantificationType:
SOPFeatureMetadata.loc[SOPFeatureMetadata['quantificationType'].values == qType.name, 'quantificationType'] = qType
# convert calibrationMethod from str to enum
if 'calibrationMethod' in SOPFeatureMetadata.columns:
for cMethod in CalibrationMethod:
SOPFeatureMetadata.loc[SOPFeatureMetadata['calibrationMethod'].values == cMethod.name, 'calibrationMethod'] = cMethod
# check that all quantificationType='IS' are also flagged as IS
# (both have same number of feature + intersection has same number of feature as one of them)
if (sum((SOPFeatureMetadata['quantificationType'] == QuantificationType.IS)) != sum(SOPFeatureMetadata['IS'])) | (sum((SOPFeatureMetadata['quantificationType'] == QuantificationType.IS) & SOPFeatureMetadata['IS']) != sum(SOPFeatureMetadata['IS'])):
raise ValueError('Check SOP file, features with quantificationType=\'IS\' must have been flagged as IS=\'True\'')
# check that all quantificationType='Monitored' have a calibrationMethod='noCalibration'
# (both have same number of feature + intersection has same number of feature as one of them)
if (sum((SOPFeatureMetadata['quantificationType'] == QuantificationType.Monitored)) != (sum(SOPFeatureMetadata['calibrationMethod'] == CalibrationMethod.noCalibration))) | (sum((SOPFeatureMetadata['quantificationType'] == QuantificationType.Monitored) & (SOPFeatureMetadata['calibrationMethod'] == CalibrationMethod.noCalibration)) != sum(SOPFeatureMetadata['quantificationType'] == QuantificationType.Monitored)):
raise ValueError('Check SOP file, features with quantificationType=\'Monitored\' must have a calibrationMethod=\'noCalibration\'\n quantificationType are:\n\'IS\' (expects calibrationMethod=noIS)\n\'QuantOwnLabeledAnalogue\' (would expect \'backcalculatedIS\' but could use \'noIS\' or \'otherCalibration\')\n\'QuantAltLabeledAnalogue\' (would expect \'backcalculatedIS\' but could use \'noIS\' or \'otherCalibration\')\n\'QuantOther\' (can take any CalibrationMethod)\n\'Monitored\' (which expects \'noCalibration\')')
# check number of compounds in SOP & calibReport
if SOPFeatureMetadata.shape[0] != calibReport.shape[0]:
raise ValueError('SOP and Calibration Report number of compounds differ')
featureCalibSOP = pandas.merge(left=SOPFeatureMetadata, right=calibReport, how='inner', left_on='compoundName', right_on='Compound', sort=False)
featureCalibSOP.drop('TargetLynx ID', inplace=True, axis=1)
# check we still have the same number of features (inner join)
if featureCalibSOP.shape[0] != SOPFeatureMetadata.shape[0]:
raise ValueError('SOP and Calibration Report compounds differ')
# check compound names match in SOP and calibReport after join
if sum(featureCalibSOP['compoundName'] != featureCalibSOP['Compound']) != 0:
raise ValueError('SOP and Calibration Report compounds names differ: ' + str(featureCalibSOP.loc[(featureCalibSOP['compoundName'] != featureCalibSOP['Compound']), ['compoundName', 'Compound']].values.tolist()))
featureCalibSOP.drop('Compound', inplace=True, axis=1)
## Match calibSOP & featureMetadata
# left join to keep feature order and limit to features in XML
finalFeatureMetadata = pandas.merge(left=featureMetadata, right=featureCalibSOP, how='left', left_on='TargetLynx Feature ID', right_on='compoundID', sort=False)
# limit to compounds present in the SOP (no report of SOP compounds not in XML)
if finalFeatureMetadata['compoundID'].isnull().sum() != 0:
warnings.warn("Warning: Only " + str(finalFeatureMetadata['compoundID'].notnull().sum()) + " features shared across the SOP/Calibration report (" + str(featureCalibSOP.shape[0]) + " total) and the TargetLynx output file (" + str(featureMetadata.shape[0]) + " total). " + str(finalFeatureMetadata['compoundID'].isnull().sum()) + " features discarded from the TargetLynx output file.")
# filter out unavailable features
unavailableFeatVect = finalFeatureMetadata['compoundID'].isnull().values
excludedSampleMetadata.append(sampleMetadata)
excludedFeatureMetadata.append(finalFeatureMetadata.iloc[unavailableFeatVect, :])
excludedIntensityData.append(intensityData[:, unavailableFeatVect])
excludedExpectedConcentration.append(expectedConcentration.iloc[:, unavailableFeatVect])
excludedFlag.append('Features')
finalFeatureMetadata = finalFeatureMetadata.iloc[~unavailableFeatVect, :]
finalIntensityData = intensityData[:, ~unavailableFeatVect]
finalExpectedConcentration = expectedConcentration.iloc[:, ~unavailableFeatVect]
finalPeakResponse = peakResponse.iloc[:, ~unavailableFeatVect]
finalPeakArea = peakArea.iloc[:, ~unavailableFeatVect]
finalPeakConcentrationDeviation = peakConcentrationDeviation.iloc[:, ~unavailableFeatVect]
finalPeakIntegrationFlag = peakIntegrationFlag.iloc[:, ~unavailableFeatVect]
finalPeakRT = peakRT.iloc[:, ~unavailableFeatVect]
# remove duplicate col
finalFeatureMetadata.drop('compoundID', inplace=True, axis=1)
else:
finalIntensityData = intensityData
finalExpectedConcentration = expectedConcentration
finalPeakResponse = peakResponse
finalPeakArea = peakArea
finalPeakConcentrationDeviation = peakConcentrationDeviation
finalPeakIntegrationFlag = peakIntegrationFlag
finalPeakRT = peakRT
# remove duplicate col
finalFeatureMetadata.drop('compoundID', inplace=True, axis=1)
# check names, keep SOP value, report differences
if sum(finalFeatureMetadata['Feature Name'] != finalFeatureMetadata['compoundName']) != 0:
warnings.warn('TargetLynx feature names & SOP/Calibration Report compounds names differ; SOP names will be used: ' + str(finalFeatureMetadata.loc[(finalFeatureMetadata['Feature Name'] != finalFeatureMetadata['compoundName']), ['Feature Name','compoundName']].values.tolist()))
finalFeatureMetadata['Feature Name'] = finalFeatureMetadata['compoundName']
finalExpectedConcentration.columns = finalFeatureMetadata['Feature Name'].values.tolist()
finalPeakResponse.columns = finalFeatureMetadata['Feature Name'].values.tolist()
finalPeakArea.columns = finalFeatureMetadata['Feature Name'].values.tolist()
finalPeakConcentrationDeviation.columns = finalFeatureMetadata['Feature Name'].values.tolist()
finalPeakIntegrationFlag.columns = finalFeatureMetadata['Feature Name'].values.tolist()
finalPeakRT.columns = finalFeatureMetadata['Feature Name'].values.tolist()
finalFeatureMetadata.drop('compoundName', inplace=True, axis=1)
## Add information to the sampleMetada
finalSampleMetadata = copy.deepcopy(sampleMetadata)
# Add chromatography
finalSampleMetadata.join(pandas.DataFrame([self.Attributes['chromatography']] * finalSampleMetadata.shape[0], columns=['Chromatograpy']))
# Add ionisation
finalSampleMetadata.join(pandas.DataFrame([self.Attributes['ionisation']] * finalSampleMetadata.shape[0], columns=['Ionisation']))
# Add batch, default is 1
finalSampleMetadata.join(pandas.DataFrame([1] * finalSampleMetadata.shape[0], columns=['Batch']))
# Process Sample Type
finalSampleMetadata['Calibrant'] = finalSampleMetadata['Sample Type'] == 'Standard'
finalSampleMetadata['Study Sample'] = finalSampleMetadata['Sample Type'] == 'Analyte'
finalSampleMetadata['Blank'] = finalSampleMetadata['Sample Type'] == 'Blank'
finalSampleMetadata['QC'] = finalSampleMetadata['Sample Type'] == 'QC'
# unused Sample Types
# sampleMetadata['Solvent'] = sampleMetadata['Sample Type'] == 'Solvent'
# sampleMetadata['Recovery'] = sampleMetadata['Sample Type'] == 'Recovery'
# sampleMetadata['Donor'] = sampleMetadata['Sample Type'] == 'Donor'
# sampleMetadata['Receptor'] = sampleMetadata['Sample Type'] == 'Receptor'
finalSampleMetadata['Other'] = (~finalSampleMetadata['Calibrant'] & ~finalSampleMetadata['Study Sample'] & ~finalSampleMetadata['Blank'] & ~finalSampleMetadata['QC']) # & ~sampleMetadata['Solvent'] & ~sampleMetadata['Recovery'] & ~sampleMetadata['Donor'] & ~sampleMetadata['Receptor']
# Add Acquired Time
finalSampleMetadata['Acquired Time'] = numpy.nan
for i in range(finalSampleMetadata.shape[0]):
try:
finalSampleMetadata.loc[i, 'Acquired Time'] = datetime.strptime(str(finalSampleMetadata.loc[i, 'Acqu Date']) + " " + str(finalSampleMetadata.loc[i, 'Acqu Time']),'%d-%b-%y %H:%M:%S')
except ValueError:
pass
finalSampleMetadata['Acquired Time'] = pandas.to_datetime(finalSampleMetadata['Acquired Time'])
# Add Run Order
finalSampleMetadata['Order'] = finalSampleMetadata.sort_values(by='Acquired Time').index
finalSampleMetadata['Run Order'] = finalSampleMetadata.sort_values(by='Order').index
finalSampleMetadata.drop('Order', axis=1, inplace=True)
# Initialise the Batch to 1
finalSampleMetadata['Batch'] = [1]*finalSampleMetadata.shape[0]
## Apply unitCorrectionFactor
finalFeatureMetadata['LLOQ'] = finalFeatureMetadata['LLOQ'] * finalFeatureMetadata['unitCorrectionFactor'] # NaN will be kept
finalFeatureMetadata['ULOQ'] = finalFeatureMetadata['ULOQ'] * finalFeatureMetadata['unitCorrectionFactor']
finalIntensityData = finalIntensityData * finalFeatureMetadata['unitCorrectionFactor'].values
finalExpectedConcentration = finalExpectedConcentration * finalFeatureMetadata['unitCorrectionFactor'].values
## Summary
print('TagetLynx output, Calibration report and SOP information matched:')
print('Targeted Method: ' + self.Attributes['methodName'])
print(str(finalSampleMetadata.shape[0]) + ' samples (' + str(sum(finalSampleMetadata['Calibrant'])) + ' calibration points, ' + str(sum(finalSampleMetadata['Study Sample'])) + ' study samples)')
print(str(finalFeatureMetadata.shape[0]) + ' features (' + str(sum(finalFeatureMetadata['IS'])) + ' IS, ' + str(sum(finalFeatureMetadata['quantificationType'] == QuantificationType.QuantOwnLabeledAnalogue)) + ' quantified and validated with own labeled analogue, ' + str(sum(finalFeatureMetadata['quantificationType'] == QuantificationType.QuantAltLabeledAnalogue)) + ' quantified and validated with alternative labeled analogue, ' + str(sum(finalFeatureMetadata['quantificationType'] == QuantificationType.QuantOther)) + ' other quantification, ' + str(sum(finalFeatureMetadata['quantificationType'] == QuantificationType.Monitored)) + ' monitored for relative information)')
if len(excludedFeatureMetadata) != 0:
print(str(excludedFeatureMetadata[0].shape[0]) + ' features excluded as missing from the SOP')
print('All concentrations converted to final units')
print('-----')
return finalSampleMetadata, finalFeatureMetadata, finalIntensityData, finalExpectedConcentration, excludedSampleMetadata, excludedFeatureMetadata, excludedIntensityData, excludedExpectedConcentration, excludedFlag, finalPeakResponse, finalPeakArea, finalPeakConcentrationDeviation, finalPeakIntegrationFlag, finalPeakRT
def _filterTargetLynxSamples(self, sampleTypeToProcess=['Study Sample', 'QC'], **kwargs):
"""
Isolate 'Calibrant' samples ('Sample Type' == 'Standard' in MassLynx) and create the :py:attr:`calibration` dictionary, following :py:meth:`~TargetedDataset._readTargetLynxDataset`.
Exclude samples based on their MassLynx 'Sample Type'. Only the types passed in `sampleTypeToProcess` are kept. Values are 'Study Sample' ('Analyte' in MassLynx), 'Blank', 'QC' or 'Other' (for all other MassLynx entries).
:param sampleTypeToProcess: list of ['Study Sample','Blank','QC','Other'] for the sample types to keep.
:type sampleTypeToProcess: list of str
:return: None
:raises ValueError: if 'sampleTypeToProcess' is not recognised.
:raises AttributeError: if the excludedImport lists do not exist.
"""
# check inputs
if set(sampleTypeToProcess) - set(['Study Sample', 'Blank', 'QC', 'Other']) != set():
raise ValueError('sampleTypeToProcess ' + str(
set(sampleTypeToProcess) - set(['Study Sample', 'Blank', 'QC', 'Other'])) + ' is not recognised')
# check excluded exist
if((not hasattr(self,'sampleMetadataExcluded'))|(not hasattr(self,'featureMetadataExcluded'))|(not hasattr(self,'intensityDataExcluded'))|(not hasattr(self,'expectedConcentrationExcluded'))|(not hasattr(self,'excludedFlag'))):
raise AttributeError('sampleMetadataExcluded, featureMetadataExcluded, intensityDataExcluded, expectedConcentrationExcluded or excludedFlag have not bee previously initialised')
sampleMetadata = copy.deepcopy(self.sampleMetadata)
featureMetadata = copy.deepcopy(self.featureMetadata)
intensityData = copy.deepcopy(self._intensityData)
expectedConcentration = copy.deepcopy(self.expectedConcentration)
excludedImportSampleMetadata = copy.deepcopy(self.sampleMetadataExcluded)
excludedImportFeatureMetadata = copy.deepcopy(self.featureMetadataExcluded)
excludedImportIntensityData = copy.deepcopy(self.intensityDataExcluded)
excludedImportExpectedConcentration = copy.deepcopy(self.expectedConcentrationExcluded)
excludedImportFlag = copy.deepcopy(self.excludedFlag)
peakInfo = copy.deepcopy(self.peakInfo)
# Calibration information
calibFeatureMetadata = featureMetadata
calibSampleMetadata = sampleMetadata.loc[sampleMetadata['Calibrant'].values, :]
calibIntensityData = intensityData[sampleMetadata['Calibrant'].values, :]
calibExpectedConcentration = expectedConcentration.loc[sampleMetadata['Calibrant'].values, :]
calibPeakResponse = peakInfo['peakResponse'].loc[sampleMetadata['Calibrant'].values, :]
calibPeakArea = peakInfo['peakArea'].loc[sampleMetadata['Calibrant'].values, :]
calibPeakConcentrationDeviation = peakInfo['peakConcentrationDeviation'].loc[sampleMetadata['Calibrant'].values, :]
calibPeakIntegrationFlag = peakInfo['peakIntegrationFlag'].loc[sampleMetadata['Calibrant'].values, :]
calibPeakRT = peakInfo['peakRT'].loc[sampleMetadata['Calibrant'].values, :]
calibPeakInfo = {'peakResponse': calibPeakResponse, 'peakArea': calibPeakArea, 'peakConcentrationDeviation': calibPeakConcentrationDeviation, 'peakIntegrationFlag': calibPeakIntegrationFlag, 'peakRT': calibPeakRT}
calibration = {'calibSampleMetadata': calibSampleMetadata, 'calibFeatureMetadata': calibFeatureMetadata, 'calibIntensityData': calibIntensityData, 'calibExpectedConcentration': calibExpectedConcentration, 'calibPeakInfo': calibPeakInfo}
# Samples to keep
samplesToProcess = [False] * sampleMetadata.shape[0]
for i in sampleTypeToProcess:
samplesToProcess = (samplesToProcess | sampleMetadata[i]).values
# Filter
tmpSampleMetadata = sampleMetadata.loc[samplesToProcess, :]
tmpIntensityData = intensityData[samplesToProcess, :]
tmpExpectedConcentration = expectedConcentration.loc[samplesToProcess, :]
tmpPeakResponse = peakInfo['peakResponse'].loc[samplesToProcess, :]
tmpPeakArea = peakInfo['peakArea'].loc[samplesToProcess, :]
tmpPeakConcentrationDeviation = peakInfo['peakConcentrationDeviation'].loc[samplesToProcess, :]
tmpPeakIntegrationFlag = peakInfo['peakIntegrationFlag'].loc[samplesToProcess, :]
tmpPeakRT = peakInfo['peakRT'].loc[samplesToProcess, :]
tmpPeakInfo = {'peakResponse': tmpPeakResponse, 'peakArea': tmpPeakArea, 'peakConcentrationDeviation': tmpPeakConcentrationDeviation, 'peakIntegrationFlag': tmpPeakIntegrationFlag, 'peakRT': tmpPeakRT}
# Samples to exclude
samplesToExclude = ~samplesToProcess & ~sampleMetadata['Calibrant'].values # no need to exclude calibrant
if sum(samplesToExclude) != 0:
excludedImportSampleMetadata.append(sampleMetadata.loc[samplesToExclude, :])
excludedImportFeatureMetadata.append(featureMetadata)
excludedImportIntensityData.append(intensityData[samplesToExclude, :])
excludedImportExpectedConcentration.append(expectedConcentration.loc[samplesToExclude, :])
excludedImportFlag.append('Samples')
# Clean columns
tmpSampleMetadata.reset_index(drop=True, inplace=True)
tmpSampleMetadata = tmpSampleMetadata.drop(['Calibrant', 'Study Sample', 'Blank', 'QC', 'Other'], axis=1)
tmpExpectedConcentration.reset_index(drop=True, inplace=True)
# Output
self.sampleMetadata = tmpSampleMetadata
self.featureMetadata = featureMetadata
self._intensityData = tmpIntensityData
self.expectedConcentration = tmpExpectedConcentration
self.sampleMetadataExcluded = excludedImportSampleMetadata
self.featureMetadataExcluded = excludedImportFeatureMetadata
self.intensityDataExcluded = excludedImportIntensityData
self.expectedConcentrationExcluded = excludedImportExpectedConcentration
self.excludedFlag = excludedImportFlag
self.peakInfo = tmpPeakInfo
self.calibration = calibration
# log the modifications
print(sampleTypeToProcess, 'samples are kept for processing')
print('-----')
self.Attributes['Log'].append([datetime.now(), '%s samples kept for processing (%d samples, %d features). %d calibration samples filtered. %d samples excluded.' % (str(sampleTypeToProcess), self.noSamples, self.noFeatures, self.calibration['calibSampleMetadata'].shape[0], sum(samplesToExclude))])
def _filterTargetLynxIS(self, **kwargs):
"""
Filter out Internal Standard (IS) features and add them to excludedImportSampleMetadata, excludedImportFeatureMetadata, excludedImportIntensityData and excludedImportExpectedConcentration.
IS filtered from self.calibration are not saved.
:return: None
:raises AttributeError: if the excludedImport lists do not exist.
:raises AttributeError: if the calibration dictionary does not exist.
"""
# check excludedImport exist (ensures functions are run in the right order)
if ((not hasattr(self, 'sampleMetadataExcluded')) | (not hasattr(self, 'featureMetadataExcluded')) | (not hasattr(self, 'intensityDataExcluded')) | (not hasattr(self, 'expectedConcentrationExcluded')) | (not hasattr(self, 'excludedFlag'))):
raise AttributeError('sampleMetadataExcluded, featureMetadataExcluded, intensityDataExcluded, expectedConcentrationExcluded or excludedFlag have not bee previously initialised')
# check calibration dictionary exist (ensures functions are run in the right order)
if not hasattr(self, 'calibration'):
raise AttributeError('calibration dictionary has not been previously initialised')
sampleMetadata = copy.deepcopy(self.sampleMetadata)
featureMetadata = copy.deepcopy(self.featureMetadata)
intensityData = copy.deepcopy(self._intensityData)
expectedConcentration = copy.deepcopy(self.expectedConcentration)
excludedImportSampleMetadata = copy.deepcopy(self.sampleMetadataExcluded)
excludedImportFeatureMetadata = copy.deepcopy(self.featureMetadataExcluded)
excludedImportIntensityData = copy.deepcopy(self.intensityDataExcluded)
excludedImportExpectedConcentration = copy.deepcopy(self.expectedConcentrationExcluded)
excludedImportFlag = copy.deepcopy(self.excludedFlag)
calibration = copy.deepcopy(self.calibration)
peakInfo = copy.deepcopy(self.peakInfo)
# Feature to keep
keptFeat = ~featureMetadata['IS'].values.astype(bool)
# Filter
tmpFeatureMetadata = featureMetadata.loc[keptFeat, :]
tmpIntensityData = intensityData[:, keptFeat]
tmpExpectedConcentration = expectedConcentration.loc[:, keptFeat]
tmpCalibFeatureMetadata = calibration['calibFeatureMetadata'].loc[keptFeat, :]
tmpCalibIntensityData = calibration['calibIntensityData'][:, keptFeat]
tmpCalibExpectedConcentration = calibration['calibExpectedConcentration'].loc[:, keptFeat]
tmpCalibPeakResponse = calibration['calibPeakInfo']['peakResponse'].loc[:, keptFeat]
tmpCalibPeakArea = calibration['calibPeakInfo']['peakArea'].loc[:, keptFeat]
tmpCalibPeakConcentrationDeviation = calibration['calibPeakInfo']['peakConcentrationDeviation'].loc[:, keptFeat]
tmpCalibPeakIntegrationFlag = calibration['calibPeakInfo']['peakIntegrationFlag'].loc[:, keptFeat]
tmpCalibPeakRT = calibration['calibPeakInfo']['peakRT'].loc[:, keptFeat]
tmpCalibPeakInfo = {'peakResponse': tmpCalibPeakResponse, 'peakArea': tmpCalibPeakArea, 'peakConcentrationDeviation': tmpCalibPeakConcentrationDeviation, 'peakIntegrationFlag': tmpCalibPeakIntegrationFlag, 'peakRT': tmpCalibPeakRT}
tmpCalibration = {'calibSampleMetadata': calibration['calibSampleMetadata'], 'calibFeatureMetadata': tmpCalibFeatureMetadata, 'calibIntensityData': tmpCalibIntensityData, 'calibExpectedConcentration': tmpCalibExpectedConcentration, 'calibPeakInfo': tmpCalibPeakInfo}
tmpPeakResponse = peakInfo['peakResponse'].loc[:, keptFeat]
tmpPeakArea = peakInfo['peakArea'].loc[:, keptFeat]
tmpPeakConcentrationDeviation = peakInfo['peakConcentrationDeviation'].loc[:, keptFeat]
tmpPeakIntegrationFlag = peakInfo['peakIntegrationFlag'].loc[:, keptFeat]
tmpPeakRT = peakInfo['peakRT'].loc[:, keptFeat]
tmpPeakInfo = {'peakResponse': tmpPeakResponse, 'peakArea': tmpPeakArea, 'peakConcentrationDeviation': tmpPeakConcentrationDeviation, 'peakIntegrationFlag': tmpPeakIntegrationFlag, 'peakRT': tmpPeakRT}
# Features to exclude
ISFeat = ~keptFeat
if sum(ISFeat) != 0:
excludedImportSampleMetadata.append(sampleMetadata)
excludedImportFeatureMetadata.append(featureMetadata.loc[ISFeat, :])
excludedImportIntensityData.append(intensityData[:, ISFeat])
excludedImportExpectedConcentration.append(expectedConcentration.loc[:, ISFeat])
excludedImportFlag.append('Features')
# Clean columns
tmpFeatureMetadata.reset_index(drop=True, inplace=True)
tmpCalibration['calibFeatureMetadata'].reset_index(drop=True, inplace=True)
tmpFeatureMetadata = tmpFeatureMetadata.drop(['IS', 'TargetLynx IS ID'], axis=1)
# Output
self.featureMetadata = tmpFeatureMetadata
self._intensityData = tmpIntensityData
self.expectedConcentration = tmpExpectedConcentration
self.sampleMetadataExcluded = excludedImportSampleMetadata
self.featureMetadataExcluded = excludedImportFeatureMetadata
self.intensityDataExcluded = excludedImportIntensityData
self.expectedConcentrationExcluded = excludedImportExpectedConcentration
self.excludedFlag = excludedImportFlag
self.calibration = tmpCalibration
self.peakInfo = tmpPeakInfo
# log the modifications
print(sum(keptFeat), 'feature are kept for processing,',sum(ISFeat),'IS removed')
print('-----')
self.Attributes['Log'].append([datetime.now(), '%d features kept for processing (%d samples). %d IS features filtered.' % (sum(keptFeat), self.noSamples, sum(ISFeat))])
def _loadBrukerXMLDataset(self, datapath, fileNamePattern=None, pdata=1, unit=None, **kwargs):
"""
Initialise object from Bruker XML files. Read files and prepare a valid TargetedDataset.
Targeted data measurements are read and mapped to pre-defined SOPs. Once the import is finished, only properly read samples are returned and only features mapped onto the pre-defined SOP and sufficiently described. Only the first instance of a duplicated feature is kept.
:param str datapath: Path to the folder containing all `xml` files, all directories below :file:`datapath` will be scanned for valid `xml` files.
:param str fileNamePattern: Regex pattern to identify the `xml` files in `datapath` folder
:param int pdata: pdata files to parse (default 1)
:param unit: if features are present more than once, only keep the features with the unit passed as input.
:type unit: None or str
:raises TypeError: if `fileNamePattern` is not a string
:raises TypeError: if `pdata` is not an integer
:raises TypeError: if `unit` is not 'None' or a string
:raises ValueError: if `unit` is not one of the unit in the input data
:return: None
"""
from ..utilities._readBrukerXML import importBrukerXML
from ..utilities.extractParams import buildFileList
if fileNamePattern is None:
fileNamePattern = self.Attributes['fileNamePattern']
# Check inputs
if not isinstance(fileNamePattern, str):
raise TypeError('\'fileNamePattern\' must be a string')
if not isinstance(pdata, int):
raise TypeError('\'pdata\' must be an integer')
if unit is not None:
if not isinstance(unit, str):
raise TypeError('\'unit\' must be a string')
## Build a list of xml files matching the pdata in the right folder
pattern = re.compile(fileNamePattern)
filelist = buildFileList(datapath, pattern)
pdataPattern = re.compile('.*?pdata.*?%i' % (pdata))
filelist = [x for x in filelist if pdataPattern.match(x)]
## Load intensity, sampleMetadata and featureMetadata. Files that cannot be opened raise warnings, and are filtered from the returned matrices.
(self.intensityData, self.sampleMetadata, self.featureMetadata) = importBrukerXML(filelist)
## Filter unit if required
avUnit = self.featureMetadata['Unit'].unique().tolist()
if unit is not None:
if unit not in self.featureMetadata['Unit'].unique().tolist():
raise ValueError('The unit \'' + str(unit) + '\' is not present in the input data, available units: ' + str(avUnit))
keepMask = (self.featureMetadata['Unit'] == unit).values
self.featureMetadata = self.featureMetadata.loc[keepMask, :]
self.featureMetadata.reset_index(drop=True, inplace=True)
self.intensityData = self.intensityData[:, keepMask]
## Check all features are unique, and
u_ids, u_counts = numpy.unique(self.featureMetadata['Feature Name'], return_counts=True)
if not all(u_counts == 1):
dupFeat = u_ids[u_counts != 1].tolist()
warnings.warn('The following features are present more than once, only the first occurence will be kept: ' + str(dupFeat) + '. For further filtering, available units are: ' + str(avUnit))
# only keep the first of duplicated features
keepMask = ~self.featureMetadata['Feature Name'].isin(dupFeat).values
keepFirstVal = [(self.featureMetadata['Feature Name'] == Feat).idxmax() for Feat in dupFeat]
keepMask[keepFirstVal] = True
self.featureMetadata = self.featureMetadata.loc[keepMask, :]
self.featureMetadata.reset_index(drop=True, inplace=True)
self.intensityData = self.intensityData[:, keepMask]
## Reformat featureMetadata
# quantificationType
self.featureMetadata['quantificationType'] = numpy.nan
self.featureMetadata.loc[self.featureMetadata['type'] == 'quantification', 'quantificationType'] = QuantificationType.QuantOther
self.featureMetadata.loc[self.featureMetadata['type'] != 'quantification', 'quantificationType'] = QuantificationType.Monitored
self.featureMetadata.drop('type', inplace=True, axis=1)
# calibrationMethod
self.featureMetadata['calibrationMethod'] = numpy.nan
self.featureMetadata.loc[self.featureMetadata['quantificationType'] == QuantificationType.QuantOther, 'calibrationMethod'] = CalibrationMethod.otherCalibration
self.featureMetadata.loc[self.featureMetadata['quantificationType'] == QuantificationType.Monitored, 'calibrationMethod'] = CalibrationMethod.noCalibration
# rename columns
self.featureMetadata.rename(columns={'loq': 'LLOQ', 'lod': 'LOD', 'Lower Reference Bound': 'Lower Reference Percentile', 'Upper Reference Bound': 'Upper Reference Percentile'}, inplace=True)
# replace '-' with nan
self.featureMetadata['LLOQ'].replace('-', numpy.nan, inplace=True)
self.featureMetadata['LLOQ'] = [float(x) for x in self.featureMetadata['LLOQ'].tolist()]
self.featureMetadata['LOD'].replace('-', numpy.nan, inplace=True)
self.featureMetadata['LOD'] = [float(x) for x in self.featureMetadata['LOD'].tolist()]
# ULOQ
self.featureMetadata['ULOQ'] = numpy.nan
## Initialise sampleMetadata
self.sampleMetadata['AssayRole'] = numpy.nan
self.sampleMetadata['SampleType'] = numpy.nan
self.sampleMetadata['Dilution'] = 100
self.sampleMetadata['Correction Batch'] = numpy.nan
self.sampleMetadata['Sample ID'] = numpy.nan
self.sampleMetadata['Exclusion Details'] = None
# add Run Order
self.sampleMetadata['Order'] = self.sampleMetadata.sort_values(by='Acquired Time').index
self.sampleMetadata['Run Order'] = self.sampleMetadata.sort_values(by='Order').index
self.sampleMetadata.drop('Order', axis=1, inplace=True)
# initialise the Batch to 1
self.sampleMetadata['Batch'] = [1] * self.sampleMetadata.shape[0]
self.sampleMetadata['Metadata Available'] = False
## Initialise expectedConcentration
self.expectedConcentration = pandas.DataFrame(None, index=list(self.sampleMetadata.index), columns=self.featureMetadata['Feature Name'].tolist())
## Initialise empty Calibration info
self.calibration = dict()
self.calibration['calibIntensityData'] = numpy.ndarray((0, self.featureMetadata.shape[0]))
self.calibration['calibSampleMetadata'] = pandas.DataFrame(None, columns=self.sampleMetadata.columns)
self.calibration['calibSampleMetadata']['Metadata Available'] = False
self.calibration['calibFeatureMetadata'] = pandas.DataFrame({'Feature Name': self.featureMetadata['Feature Name'].tolist()})
self.calibration['calibExpectedConcentration'] = pandas.DataFrame(None, columns=self.featureMetadata['Feature Name'].tolist())
## Summary
print('Targeted Method: ' + self.Attributes['methodName'])
print(str(self.sampleMetadata.shape[0]) + ' study samples')
print(str(self.featureMetadata.shape[0]) + ' features (' + str(sum(self.featureMetadata['quantificationType'] == QuantificationType.IS)) + ' IS, ' + str(sum(self.featureMetadata['quantificationType'] == QuantificationType.QuantOwnLabeledAnalogue)) + ' quantified and validated with own labeled analogue, ' + str(sum(self.featureMetadata['quantificationType'] == QuantificationType.QuantAltLabeledAnalogue)) + ' quantified and validated with alternative labeled analogue, ' + str(sum(self.featureMetadata['quantificationType'] == QuantificationType.QuantOther)) + ' other quantification, ' + str(sum(self.featureMetadata['quantificationType'] == QuantificationType.Monitored)) + ' monitored for relative information)')
print('-----')
## Apply limit of quantification?
self._applyLimitsOfQuantification(**kwargs)
## clear **kwargs that have been copied to Attributes
for i in list(kwargs.keys()):
try:
del self.Attributes[i]
except:
pass
for j in ['fileNamePattern', 'pdata', 'unit']:
try:
del self.Attributes[j]
except:
pass
def _applyLimitsOfQuantification(self, onlyLLOQ=False, **kwargs):
"""
For each feature, replace intensity values inferior to the lowest limit of quantification or superior to the upper limit of quantification, by a fixed value.
Features missing the minimal required information are excluded from :py:attr:'featureMetadata', :py:attr:'intensityData', :py:attr:'expectedConcentration' and :py:attr:'calibration'. Features `'Monitored for relative information'` (and `'noCalibration'`) are not processed and returned without alterations. Features with `'Other quantification'` are allowed `Nan` in the LLOQ or ULOQ (no replacement takes place).
Calibration data should not be processed and therefore returned without modification.
Units in :py:attr:`_intensityData`, :py:attr:`featureMetadata['LLOQ'] and :py:attr:`featureMetadata['ULOQ']` are expected to be identical for a given feature.
Note: In merged datasets, calibration is a list of dict, with features in each calibration dict potentially different from features in featureMetadata and _intensityData.
Therefore in merged dataset, features are not filtered in each individual calibration.
If features are excluded due to the lack of required featureMetadata info, the masks will be reinitialised
:param onlyLLOQ: if True only correct <LLOQ, if False correct <LLOQ and >ULOQ
:type onlyLLOQ: bool
:return: None
:raises AttributeError: if :py:attr:`featureMetadata['LLOQ']` is missing
:raises AttributeError: if :py:attr:`featureMetadata['ULOQ']` is missing and onlyLLOQ==False
"""
sampleMetadata = copy.deepcopy(self.sampleMetadata)
featureMetadata = copy.deepcopy(self.featureMetadata)
intensityData = copy.deepcopy(self._intensityData)
expectedConcentration = copy.deepcopy(self.expectedConcentration)
calibration = copy.deepcopy(self.calibration)
if ((not hasattr(self, 'sampleMetadataExcluded')) | (not hasattr(self, 'featureMetadataExcluded')) | (not hasattr(self, 'intensityDataExcluded')) | (not hasattr(self, 'expectedConcentrationExcluded')) | (not hasattr(self, 'excludedFlag'))):
sampleMetadataExcluded = []
featureMetadataExcluded = []
intensityDataExcluded = []
expectedConcentrationExcluded = []
excludedFlag = []
else:
sampleMetadataExcluded = copy.deepcopy(self.sampleMetadataExcluded)
featureMetadataExcluded = copy.deepcopy(self.featureMetadataExcluded)
intensityDataExcluded = copy.deepcopy(self.intensityDataExcluded)
expectedConcentrationExcluded = copy.deepcopy(self.expectedConcentrationExcluded)
excludedFlag = copy.deepcopy(self.excludedFlag)
## Check input columns
if 'LLOQ' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'LLOQ\'] column is absent')
if onlyLLOQ==False:
if 'ULOQ' not in featureMetadata.columns:
raise AttributeError('featureMetadata[\'ULOQ\'] column is absent')
## Features only Monitored are not processed and passed untouched (concatenated back at the end)
untouched = (featureMetadata['quantificationType'] == QuantificationType.Monitored).values
if sum(untouched) != 0:
print('The following features are only monitored and therefore not processed for LOQs: ' + str(featureMetadata.loc[untouched, 'Feature Name'].values.tolist()))
untouchedFeatureMetadata = featureMetadata.loc[untouched, :]
featureMetadata = featureMetadata.loc[~untouched, :]
untouchedIntensityData = intensityData[:, untouched]
intensityData = intensityData[:, ~untouched]
untouchedExpectedConcentration = expectedConcentration.loc[:, untouched]
expectedConcentration = expectedConcentration.loc[:, ~untouched]
# same reordering of the calibration
if isinstance(calibration, dict):
untouchedCalibFeatureMetadata = calibration['calibFeatureMetadata'].loc[untouched, :]
calibration['calibFeatureMetadata'] = calibration['calibFeatureMetadata'].loc[~untouched, :]
untouchedCalibIntensityData = calibration['calibIntensityData'][:, untouched]
calibration['calibIntensityData'] = calibration['calibIntensityData'][:, ~untouched]
untouchedCalibExpectedConcentration = calibration['calibExpectedConcentration'].loc[:, untouched]
calibration['calibExpectedConcentration'] = calibration['calibExpectedConcentration'].loc[:, ~untouched]
## Exclude features without required information
unusableFeat = featureMetadata['LLOQ'].isnull().values & (featureMetadata['quantificationType'] != QuantificationType.QuantOther).values
if not onlyLLOQ:
unusableFeat = unusableFeat | (featureMetadata['ULOQ'].isnull().values & (featureMetadata['quantificationType'] != QuantificationType.QuantOther).values)
if sum(unusableFeat) != 0:
print(str(sum(unusableFeat)) + ' features cannot be pre-processed:')
print('\t' + str(sum(unusableFeat)) + ' features lack the required information to apply limits of quantification')
# store
sampleMetadataExcluded.append(sampleMetadata)
featureMetadataExcluded.append(featureMetadata.loc[unusableFeat, :])
intensityDataExcluded.append(intensityData[:, unusableFeat])
expectedConcentrationExcluded.append(expectedConcentration.loc[:, unusableFeat])
excludedFlag.append('Features')
#remove
featureMetadata = featureMetadata.loc[~unusableFeat, :]
intensityData = intensityData[:, ~unusableFeat]
expectedConcentration = expectedConcentration.loc[:, ~unusableFeat]
if isinstance(calibration, dict):
calibration['calibFeatureMetadata'] = calibration['calibFeatureMetadata'].loc[~unusableFeat, :]
calibration['calibIntensityData'] = calibration['calibIntensityData'][:, ~unusableFeat]
calibration['calibExpectedConcentration'] = calibration['calibExpectedConcentration'].loc[:, ~unusableFeat]
## Values replacement (-inf / +inf)
# iterate over the features
for i in range(0, featureMetadata.shape[0]):
# LLOQ
if not numpy.isnan(featureMetadata['LLOQ'].values[i]):
toReplaceLLOQ = intensityData[:, i] < featureMetadata['LLOQ'].values[i]
intensityData[toReplaceLLOQ, i] = -numpy.inf
# ULOQ
if not onlyLLOQ:
if not numpy.isnan(featureMetadata['ULOQ'].values[i]):
toReplaceULOQ = intensityData[:, i] > featureMetadata['ULOQ'].values[i]
intensityData[toReplaceULOQ, i] = numpy.inf
## Add back the untouched monitored features
if sum(untouched) != 0:
featureMetadata = pandas.concat([featureMetadata, untouchedFeatureMetadata], axis=0, sort=False)
intensityData = numpy.concatenate((intensityData, untouchedIntensityData), axis=1)
expectedConcentration = pandas.concat([expectedConcentration, untouchedExpectedConcentration], axis=1, sort=False)
# reorder the calib
if isinstance(calibration, dict):
calibration['calibFeatureMetadata'] = pandas.concat([calibration['calibFeatureMetadata'], untouchedCalibFeatureMetadata], axis=0, sort=False)
calibration['calibIntensityData'] = numpy.concatenate((calibration['calibIntensityData'], untouchedCalibIntensityData), axis=1)
calibration['calibExpectedConcentration'] = pandas.concat([calibration['calibExpectedConcentration'], untouchedCalibExpectedConcentration], axis=1, sort=False)
# Remove excess info
featureMetadata.reset_index(drop=True, inplace=True)
expectedConcentration.reset_index(drop=True, inplace=True)
if isinstance(calibration, dict):
calibration['calibFeatureMetadata'].reset_index(drop=True, inplace=True)
calibration['calibExpectedConcentration'].reset_index(drop=True, inplace=True)
## return dataset with limits of quantification applied
self.featureMetadata = featureMetadata
self._intensityData = intensityData
self.expectedConcentration = expectedConcentration
self.calibration = calibration
self.sampleMetadataExcluded = sampleMetadataExcluded
self.featureMetadataExcluded = featureMetadataExcluded
self.intensityDataExcluded = intensityDataExcluded
self.expectedConcentrationExcluded = expectedConcentrationExcluded
self.excludedFlag = excludedFlag
if sum(unusableFeat) != 0:
# featureMask size will be wrong, requires a reinitialisation
self.initialiseMasks()
## Output and Log
print('Values <LLOQ replaced by -inf')
if not onlyLLOQ:
print('Values >ULOQ replaced by +inf')
if isinstance(calibration, dict):
print('\n')
# log the modifications
if onlyLLOQ:
logLimits = 'Limits of quantification applied to LLOQ'
else:
logLimits = 'Limits of quantification applied to LLOQ and ULOQ'
if sum(untouched) != 0:
logUntouchedFeatures = ' ' + str(sum(untouched)) + ' features only monitored and not processed: ' + str(untouchedFeatureMetadata.loc[:, 'Feature Name'].values.tolist()) + '.'
else:
logUntouchedFeatures = ''
self.Attributes['Log'].append([datetime.now(), '%s (%i samples, %i features). LLOQ are replaced by -inf.%s' % (logLimits, self.noSamples, self.noFeatures, logUntouchedFeatures)])
def _targetLynxApplyLimitsOfQuantificationNoiseFilled(self, onlyLLOQ=False, responseReference=None, **kwargs):
"""
For each feature, replace intensity values inferior to the lowest limit of quantification or superior to the upper limit of quantification. Values inferior to the lowest limit of quantification are replaced by the feature noise concentration.
Features missing the minimal required information are excluded from :py:attr:'featureMetadata', :py:attr:'intensityData', :py:attr:'expectedConcentration' and :py:attr:'calibration'. Features `'Monitored for relative information'` (and `'noCalibration'`) are not processed and returned without alterations.
Calibration data should not be processed and therefore returned without modification.
Units in :py:attr:`_intensityData`, :py:attr:`featureMetadata['LLOQ'] and :py:attr:`featureMetadata['ULOQ']` are expected to be identical for a given feature.
.. Note:: To replace <LLOQ by the concentration equivalent to the noise level, the noise area, as well as the :math:`a` and :math:`b` parameters of the calibration equation must be known. For each feature, the ratio `(IS conc / IS Area)` defined as the responseFactor, is determined in a representative calibration sample. Then the concentration equivalent to the noise area is calculated, before being used to replace values <LLOQ.
:param onlyLLOQ: if True only correct <LLOQ, if False correct <LLOQ and >ULOQ
:type onlyLLOQ: bool
:param responseReference: 'Sample File Name' of reference sample to use in order to establish the response to use, or list of samples to use (one per feature). If None, the middle of the calibration will be employed.
:type responseReference: None or str or list
:return: None
:raises AttributeError: if :py:attr:`featureMetadata['LLOQ']` is missing
:raises AttributeError: if :py:attr:`featureMetadata['ULOQ']` is missing and onlyLLOQ==False
:raises AttributeError: if :py:attr:`featureMetadata['calibrationEquation']` is missing
:raises AttributeError: if :py:attr:`featureMetadata['unitCorrectionFactor']` is missing
:raises AttributeError: if :py:attr:`featureMetadata['Noise (area)']` is missing
:raises AttributeError: if :py:attr:`featureMetadata['a']` is missing
:raises AttributeError: if :py:attr:`featureMetadata['b']` is missing
:raises AttributeError: if :py:attr:`calibration['calibPeakInfo']` is missing
:raises ValueError: if :py:attr:`calibration['calibPeakInfo']['peakArea']` number of features or samples do not match the rest of if :py:attr:`calibration`
:raises ValueError: if :py:attr:`calibration['calibPeakInfo']['peakResponse']` number of features or samples do not match the rest of if :py:attr:`calibration`
:raises ValueError: if the 'responseReference' sample name is not recognised or the list is of erroneous length.
:raises ValueError: if calculation using the calibrationEquation fails.
"""
sampleMetadata = copy.deepcopy(self.sampleMetadata)
featureMetadata = copy.deepcopy(self.featureMetadata)
intensityData = copy.deepcopy(self._intensityData)
expectedConcentration = copy.deepcopy(self.expectedConcentration)
calibration = copy.deepcopy(self.calibration)
if ((not hasattr(self, 'sampleMetadataExcluded')) | (not hasattr(self, 'featureMetadataExcluded')) | (not hasattr(self, 'intensityDataExcluded')) | (not hasattr(self, 'expectedConcentrationExcluded')) | (not hasattr(self, 'excludedFlag'))):
sampleMetadataExcluded = []
featureMetadataExcluded = []
intensityDataExcluded = []
expectedConcentrationExcluded = []
excludedFlag = []
else:
sampleMetadataExcluded = copy.deepcopy(self.sampleMetadataExcluded)
featureMetadataExcluded = copy.deepcopy(self.featureMetadataExcluded)
intensityDataExcluded = copy.deepcopy(self.intensityDataExcluded)
expectedConcentrationExcluded = copy.deepcopy(self.expectedConcentrationExcluded)
excludedFlag = copy.deepcopy(self.excludedFlag)
## Check input columns
if 'LLOQ' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'LLOQ\'] column is absent')
if onlyLLOQ==False:
if 'ULOQ' not in featureMetadata.columns:
raise AttributeError('featureMetadata[\'ULOQ\'] column is absent')
if 'calibrationEquation' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'calibrationEquation\'] column is absent')
if 'unitCorrectionFactor' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'unitCorrectionFactor\'] column is absent')
if 'Noise (area)' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'Noise (area)\'] column is absent')
if 'a' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'a\'] column is absent')
if 'b' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'b\'] column is absent')
if 'calibPeakInfo' not in calibration.keys():
raise AttributeError('the calibPeakInfo dict is absent from the calibration dict')
if (not numpy.array_equal(calibration['calibPeakInfo']['peakArea'].index.values, calibration['calibSampleMetadata'].index.values)) | (not numpy.array_equal(calibration['calibPeakInfo']['peakArea'].columns.values, calibration['calibFeatureMetadata']['Feature Name'].values)):
raise ValueError('calibration[\'calibPeakInfo\'][\'peakArea\'] number of features or samples do not match the rest of \'calibration\'')
if (not numpy.array_equal(calibration['calibPeakInfo']['peakResponse'].index.values, calibration['calibSampleMetadata'].index.values)) | (not numpy.array_equal(calibration['calibPeakInfo']['peakResponse'].columns.values, calibration['calibFeatureMetadata']['Feature Name'].values)):
raise ValueError('calibration[\'calibPeakInfo\'][\'peakResponse\'] number of features or samples do not match the rest of \'calibration\'')
## Features only Monitored are not processed and passed untouched (concatenated back at the end)
untouched = (featureMetadata['quantificationType'] == QuantificationType.Monitored).values
if sum(untouched) != 0:
print('The following features are only monitored and therefore not processed: ' + str(featureMetadata.loc[untouched, 'Feature Name'].values.tolist()))
untouchedFeatureMetadata = featureMetadata.loc[untouched, :]
featureMetadata = featureMetadata.loc[~untouched, :]
untouchedIntensityData = intensityData[:, untouched]
intensityData = intensityData[:, ~untouched]
untouchedExpectedConcentration = expectedConcentration.loc[:, untouched]
expectedConcentration = expectedConcentration.loc[:, ~untouched]
# same reordering of the calibration
untouchedCalibFeatureMetadata = calibration['calibFeatureMetadata'].loc[untouched, :]
calibration['calibFeatureMetadata'] = calibration['calibFeatureMetadata'].loc[~untouched, :]
untouchedCalibIntensityData = calibration['calibIntensityData'][:, untouched]
calibration['calibIntensityData'] = calibration['calibIntensityData'][:, ~untouched]
untouchedCalibExpectedConcentration = calibration['calibExpectedConcentration'].loc[:, untouched]
calibration['calibExpectedConcentration'] = calibration['calibExpectedConcentration'].loc[:, ~untouched]
untouchedCalibPeakArea = calibration['calibPeakInfo']['peakArea'].loc[:, untouched]
calibration['calibPeakInfo']['peakArea'] = calibration['calibPeakInfo']['peakArea'].loc[:, ~untouched]
untouchedCalibPeakResponse = calibration['calibPeakInfo']['peakResponse'].loc[:, untouched]
calibration['calibPeakInfo']['peakResponse'] = calibration['calibPeakInfo']['peakResponse'].loc[:, ~untouched]
untouchedCalibPeakConcentrationDeviation = calibration['calibPeakInfo']['peakConcentrationDeviation'].loc[:, untouched]
calibration['calibPeakInfo']['peakConcentrationDeviation'] = calibration['calibPeakInfo']['peakConcentrationDeviation'].loc[:, ~untouched]
untouchedCalibPeakIntegrationFlag = calibration['calibPeakInfo']['peakIntegrationFlag'].loc[:, untouched]
calibration['calibPeakInfo']['peakIntegrationFlag'] = calibration['calibPeakInfo']['peakIntegrationFlag'].loc[:, ~untouched]
untouchedCalibPeakRT = calibration['calibPeakInfo']['peakRT'].loc[:, untouched]
calibration['calibPeakInfo']['peakRT'] = calibration['calibPeakInfo']['peakRT'].loc[:, ~untouched]
## Exclude features without required information
unusableFeat = featureMetadata['LLOQ'].isnull().values | featureMetadata['Noise (area)'].isnull() | featureMetadata['a'].isnull() | featureMetadata['b'].isnull()
if not onlyLLOQ:
unusableFeat = unusableFeat | featureMetadata['ULOQ'].isnull().values
unusableFeat = unusableFeat.values
if sum(unusableFeat) != 0:
print(str(sum(unusableFeat)) + ' features cannot be pre-processed:')
print('\t' + str(sum(unusableFeat)) + ' features lack the required information to replace limits of quantification by noise level')
# store
sampleMetadataExcluded.append(sampleMetadata)
featureMetadataExcluded.append(featureMetadata.loc[unusableFeat, :])
intensityDataExcluded.append(intensityData[:, unusableFeat])
#return(expectedConcentration, unusableFeat)
expectedConcentrationExcluded.append(expectedConcentration.loc[:, unusableFeat])
excludedFlag.append('Features')
#remove
featureMetadata = featureMetadata.loc[~unusableFeat, :]
intensityData = intensityData[:, ~unusableFeat]
expectedConcentration = expectedConcentration.loc[:, ~unusableFeat]
calibration['calibFeatureMetadata'] = calibration['calibFeatureMetadata'].loc[~unusableFeat, :]
calibration['calibIntensityData'] = calibration['calibIntensityData'][:, ~unusableFeat]
calibration['calibExpectedConcentration'] = calibration['calibExpectedConcentration'].loc[:, ~unusableFeat]
calibration['calibPeakInfo']['peakResponse'] = calibration['calibPeakInfo']['peakResponse'].loc[:, ~unusableFeat]
calibration['calibPeakInfo']['peakArea'] = calibration['calibPeakInfo']['peakArea'].loc[:, ~unusableFeat]
calibration['calibPeakInfo']['peakConcentrationDeviation'] = calibration['calibPeakInfo']['peakConcentrationDeviation'].loc[:, ~unusableFeat]
calibration['calibPeakInfo']['peakIntegrationFlag'] = calibration['calibPeakInfo']['peakIntegrationFlag'].loc[:, ~unusableFeat]
calibration['calibPeakInfo']['peakRT'] = calibration['calibPeakInfo']['peakRT'].loc[:, ~unusableFeat]
## Calculate each feature's replacement noise concentration
##
## Approximate the response reference
## Needed for calibrationMethod='backcalculatedIS', for 'noIS' responseFactor=1
# responseReference: None (guessed middle of the curve), 'Sample File Name' to use, or list of 'Sample File Name' (one per feature)
#
# ! The calibration curve is plotted in TargetLynx as x-axis concentration, y-axis response
# The calibration equation obtained is written as: response = a * concentration + b (eq. 1)
# The response uses the area measured and IS: response = Area * (IS conc / IS Area) (eq. 2) [for 'noIS' response = Area]
# We can define the responseFactor = (IS conc/IS Area), the ratio of IS Conc/IS Area that can changes from sample to sample.
# For noise concentration calculation, using eq. 2 and a reference sample,we can approximate responseFactor = response/area [works for both calibrationMethod]
# make a list of responseReference (one per feature)
if isinstance(responseReference, str):
# Check existance of this sample
if sum(calibration['calibSampleMetadata']['Sample File Name'] == responseReference) == 0:
raise ValueError('responseReference \'Sample File Name\' unknown: ' + str(responseReference))
responseReference = [responseReference] * featureMetadata.shape[0]
elif isinstance(responseReference, list):
# Check length to match the number of features
if len(responseReference) != featureMetadata.shape[0]:
raise ValueError('The number of responseReference \'Sample File Name\' provided does not match the number of features to process:\n' + str(featureMetadata['Feature Name'].values))
for i in responseReference:
if sum(calibration['calibSampleMetadata']['Sample File Name'] == i) == 0:
raise ValueError('ResponseReference \'Sample File Name\' unknown: ' + str(i))
elif responseReference is None:
# Get a compound in the middle of the calibration run, use to your own risks
responseReference = calibration['calibSampleMetadata'].sort_values(by='Run Order').iloc[int(numpy.ceil(calibration['calibSampleMetadata'].shape[0] / 2)) - 1]['Sample File Name'] # round to the highest value
warnings.warn('No responseReference provided, sample in the middle of the calibration run employed: ' + str(responseReference))
responseReference = [responseReference] * featureMetadata.shape[0]
else:
raise ValueError('The responseReference provided is not recognised. A \'Sample File Name\', a list of \'Sample File Name\' or None are expected')
# Get the right Area and Response for each feature
tmpArea = list()
tmpResponse = list()
# iterate over features, get value in responseReference spectra
for i in range(0, featureMetadata.shape[0]):
tmpArea.append(calibration['calibPeakInfo']['peakArea'][(calibration['calibSampleMetadata']['Sample File Name'] == responseReference[i]).values].values.flatten()[i])
tmpResponse.append(calibration['calibPeakInfo']['peakResponse'][(calibration['calibSampleMetadata']['Sample File Name'] == responseReference[i]).values].values.flatten()[i])
# responseFactor = response/Area
# Note: responseFactor will be ~equal for all compound sharing the same IS (as ISconc/ISArea will be identical)
resFact = [resp / area for resp, area in zip(tmpResponse, tmpArea)]
featureMetadata = featureMetadata.assign(responseFactor=resFact)
## Calculate noise concentration equivalent for each feature
## Note for equation in .json:
# calibration curve in TargetLynx is defined/established as: response = a * concentration + b (eq. 1)
# response is defined as: response = Area * (IS conc / IS Area) (eq. 2) [for 'noIS' response = Area]
# using eq. 2, we can approximate the ratio IS Conc/IS Area in a representative sample as: responseFactor = response / area (eq. 3)
# Therefore: concentration = ((area*responseFactor) - b) / a (eq. 4)
#
# If in TargetLynx 'axis transformation' is set to log ( but still use 'Polynomial Type'=linear and 'Fit Weighting'=None)
# eq.1 is changed to: log(response) = a * log(concentration) + b (eq. 5)
# and eq. 4 changed to: concentration = 10^( (log(area*responseFactor) - b) / a ) (eq. 5)
# The equation filled expect the following variables:
# area
# responseFactor | responseFactor=(IS conc/IS Area)=response/Area, for noIS, responseFactor will be 1.
# a
# b
#
# Examples:
# '((area * responseFactor)-b)/a'
# '10**((numpy.log10(area * responseFactor)-b)/a)'
# 'area/a' | if b not needed, set to 0 in csv [use for linear noIS, area=response, responseFactor=1, and response = a * concentration ]
tmpNoiseConc = []
for i in range(0, featureMetadata.shape[0]):
# set the right values before applying the equation
calibrationEquation = featureMetadata['calibrationEquation'].values[i]
area = featureMetadata['Noise (area)'].values[i]
responseFactor = featureMetadata['responseFactor'].values[i]
a = featureMetadata['a'].values[i]
b = featureMetadata['b'].values[i]
# apply the calibration equation, and the unitCorrectionFactor, as the equations were established with the original area/response/concentrations
try:
tmpNoiseConc.append(eval(calibrationEquation) * featureMetadata['unitCorrectionFactor'].values[i])
except:
raise ValueError('Verify calibrationEquation: \"' + calibrationEquation + '\", only variables expected are \"area\", \"responseFactor\", \"a\" or \"b\"')
featureMetadata = featureMetadata.assign(noiseConcentration=tmpNoiseConc)
## Values replacement by noise concentration (<LOQ) and +inf for (>ULOQ)
# iterate over the features
for i in range(0, featureMetadata.shape[0]):
# LLOQ
toReplaceLLOQ = intensityData[:, i] < featureMetadata['LLOQ'].values[i]
intensityData[toReplaceLLOQ, i] = featureMetadata['noiseConcentration'].values[i]
# ULOQ
if not onlyLLOQ:
toReplaceULOQ = intensityData[:, i] > featureMetadata['ULOQ'].values[i]
intensityData[toReplaceULOQ, i] = numpy.inf
## Add back the untouched monitored features
if sum(untouched) != 0:
featureMetadata = pandas.concat([featureMetadata, untouchedFeatureMetadata], axis=0, sort=False)
intensityData = numpy.concatenate((intensityData, untouchedIntensityData), axis=1)
expectedConcentration = pandas.concat([expectedConcentration, untouchedExpectedConcentration], axis=1, sort=False)
# reorder the calib
calibration['calibFeatureMetadata'] = pandas.concat([calibration['calibFeatureMetadata'], untouchedCalibFeatureMetadata], axis=0, sort=False)
calibration['calibIntensityData'] = numpy.concatenate((calibration['calibIntensityData'], untouchedCalibIntensityData), axis=1)
calibration['calibExpectedConcentration'] = pandas.concat([calibration['calibExpectedConcentration'], untouchedCalibExpectedConcentration], axis=1, sort=False)
calibration['calibPeakInfo']['peakArea'] = pandas.concat([calibration['calibPeakInfo']['peakArea'], untouchedCalibPeakArea], axis=1, sort=False)
calibration['calibPeakInfo']['peakResponse'] = pandas.concat([calibration['calibPeakInfo']['peakResponse'], untouchedCalibPeakResponse], axis=1, sort=False)
calibration['calibPeakInfo']['peakConcentrationDeviation'] = pandas.concat([calibration['calibPeakInfo']['peakConcentrationDeviation'], untouchedCalibPeakConcentrationDeviation], axis=1, sort=False)
calibration['calibPeakInfo']['peakIntegrationFlag'] = | pandas.concat([calibration['calibPeakInfo']['peakIntegrationFlag'], untouchedCalibPeakIntegrationFlag], axis=1, sort=False) | pandas.concat |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Plots
import matplotlib.pyplot as plt
import seaborn as sns
# Preprocessing
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans
import gc
# LightGBM framework
import lightgbm as lgb
"""
From github:
A fast, distributed, high performance gradient boosting (GBDT, GBRT, GBM or MART)
framework based on decision tree algorithms,
used for ranking, classification and many other machine learning tasks.
LightGBM is a gradient boosting framework that uses tree based learning algorithms.
It is designed to be distributed and efficient with the following advantages:
Faster training speed and higher efficiency
Lower memory usage
Better accuracy
Parallel and GPU learning supported
Capable of handling large-scale data
"""
########
# Load the data
########
#maing train and test
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
#load supplemental taxi route data
train_fastest_1 = pd.read_csv('fastest_routes_train_part_1.csv')
train_fastest_2 = pd.read_csv('fastest_routes_train_part_2.csv')
test_fastest = pd.read_csv('fastest_routes_test.csv')
#load the hourly weather
weather = pd.read_csv('weather_data_nyc_centralpark_2016.csv')
weather_hour = pd.read_csv('Weather.csv')
#split the training data into a train and validation set
train, valid, _ , _ = train_test_split(train, train.trip_duration,
test_size=0.2, random_state=2017)
# Add set marker
train['eval_set'] = 0; valid['eval_set'] = 1; test['eval_set'] = 2
test['trip_duration'] = np.nan; test['dropoff_datetime'] = np.nan
#merge train, valid and test to a single table
frame = pd.concat([train, valid, test], axis=0)
frame_fastest = pd.concat([train_fastest_1, train_fastest_2, test_fastest], axis = 0)
### Memory optimization
# Main dataframe
frame.eval_set = frame.eval_set.astype(np.uint8)
frame.passenger_count = frame.passenger_count.astype(np.int8)
frame.store_and_fwd_flag = pd.get_dummies(frame['store_and_fwd_flag'],
prefix='store_and_fwd_flag', drop_first=True)
frame.vendor_id = frame.vendor_id.astype(np.int8)
# Weather dataframe
weather.replace('T', 0.001, inplace=True)
weather['date'] = pd.to_datetime(weather['date'], dayfirst=True).dt.date
weather['average temperature'] = weather['average temperature'].astype(np.int64)
weather['precipitation'] = weather['precipitation'].astype(np.float64)
weather['snow fall'] = weather['snow fall'].astype(np.float64)
weather['snow depth'] = weather['snow depth'].astype(np.float64)
# Weather hourly dataframe
weather_hour['Datetime'] = pd.to_datetime(weather_hour['pickup_datetime'], dayfirst=True)
weather_hour['date'] = weather_hour.Datetime.dt.date
weather_hour['hour'] = weather_hour.Datetime.dt.hour
weather_hour['hour'] = weather_hour.hour.astype(np.int8)
weather_hour['fog'] = weather_hour.fog.astype(np.int8)
weather_hour = weather_hour[['date', 'hour', 'tempm', 'dewptm', 'hum', 'wspdm',
'wdird', 'vism', 'pressurei', 'fog']]
# Define clusters
def clusters(df):
coords = np.vstack((df[['pickup_longitude', 'pickup_latitude']].values,
df[['dropoff_longitude', 'dropoff_latitude']].values))
sample_ind = np.random.permutation(len(coords))[:500000]
kmeans = MiniBatchKMeans(n_clusters=100, batch_size=10000).fit(coords[sample_ind])
cl_pickup = kmeans.predict(df[['pickup_longitude', 'pickup_latitude']])
cl_dropoff = kmeans.predict(df[['dropoff_longitude', 'dropoff_latitude']])
return cl_pickup, cl_dropoff
# Rotate the map
def rotate_coords(df, col1, col2, pic=False):
alpha = 0.610865 # angle = 35 degrees
#alpha = 0.506145 # angle = 29 degrees
# Center of rotation
x_c = df[col1].mean()
y_c = df[col2].mean()
# Coordinates
C = df[[col1, col2]] - np.array([x_c, y_c])
# Rotation matrix
R = np.array([[np.cos(alpha), -np.sin(alpha)],
[np.sin(alpha), np.cos(alpha)]])
C_rot = np.matmul(R, C.transpose().values).transpose() + np.array([x_c, y_c])
return C_rot
# Manhattan distances
def my_manhattan_distances(x1, x2, y1, y2):
return np.abs(x1 - x2) + np.abs(y1 - y2)
# Euclidean distances
def my_euclidean_distances(x1, x2, y1, y2):
return np.square(x1 - x2) + np.square(y1 - y2)
my_manhattan_distances = np.vectorize(my_manhattan_distances)
my_euclidean_distances = np.vectorize(my_euclidean_distances)
# Adding features
def add_features(df, predict=False):
# If predict = True, this function will prepare (add new features)
# train set (all train data) and test (all test data); else,
# if predict = False, the function will prepare train and validation datasets
if predict:
train_inds = df[(df.eval_set != 2)].index
else:
df = df[(df.eval_set != 2)].copy()
train_inds = df[df.eval_set != 1].index
### Log trip
print('Log trip duration')
df['trip_duration'] = df.trip_duration.apply(np.log)
### PCA transformation
print('Add PCA geo-coordinates')
coords = np.vstack((df[['pickup_latitude', 'pickup_longitude']],
df[['dropoff_latitude', 'dropoff_longitude']]))
pca = PCA().fit(coords) # define 2 main axis
df['pickup_pca0'] = pca.transform(df[['pickup_longitude', 'pickup_latitude']])[:,0]
df['pickup_pca1'] = pca.transform(df[['pickup_longitude', 'pickup_latitude']])[:,1]
df['dropoff_pca0'] = pca.transform(df[['dropoff_longitude', 'dropoff_latitude']])[:,0]
df['dropoff_pca1'] = pca.transform(df[['dropoff_longitude', 'dropoff_latitude']])[:,1]
df['distance_pca0'] = np.abs(df.pickup_pca0-df.dropoff_pca0)
df['distance_pca1'] = np.abs(df.pickup_pca1-df.dropoff_pca1)
print('Rotate geo-coordinates')
C_rot_pickup = rotate_coords(df, 'pickup_longitude', 'pickup_latitude', not predict)
C_rot_dropoff = rotate_coords(df, 'dropoff_longitude', 'dropoff_latitude', not predict)
df['pickup_longitude_rot'] = C_rot_pickup[:, 0]
df['pickup_latitude_rot'] = C_rot_pickup[:, 1]
df['dropoff_longitude_rot'] = C_rot_dropoff[:, 0]
df['dropoff_latitude_rot'] = C_rot_dropoff[:, 1]
### Add clusters
print('Add clusters')
cl_pu, cl_do = clusters(df)#not predict,
df['pickup_clusters'] = cl_pu
df['dropoff_clusters'] = cl_do
### to DateTime
print('Convert to datetime format')
df['pickup_datetime'] = pd.to_datetime(df['pickup_datetime'])
# Add weather info
df['date'] = df['pickup_datetime'].dt.date # adding date column
df['hour'] = df['pickup_datetime'].dt.hour # adding hour column
df = pd.merge(left=df, right=weather, on='date', how='left')
# Add weather hourly
df = pd.merge(left=df, right=weather_hour.drop_duplicates(subset=['date', 'hour']),
on=['date', 'hour'], how='left')
df.drop(['date'], axis=1, inplace=True)
# Weather added
df['month'] = df['pickup_datetime'].dt.month
df['week_of_year'] = df['pickup_datetime'].dt.week
df['day'] = df['pickup_datetime'].dt.day
df['month_day'] = df['month'] + df['day']
df['day_of_year'] = df['pickup_datetime'].dt.dayofyear
#df['hour'] = df['pickup_datetime'].dt.hour
df['day_of_year_hour'] = 24*df['day_of_year'] + df['hour']
df['hour_minute'] = 60*df['hour'] + df['pickup_datetime'].dt.minute
df['day_week'] = df['pickup_datetime'].dt.weekday
df['month_day_hour'] = 31*24*df['month'] + 24*df['day'] + df['hour']
### Some usfull averages ###
print('Add averages')
train_info = df.iloc[train_inds].copy() # get only train data
# Month average
month_avg = train_info.groupby('month').trip_duration.mean()
month_avg = month_avg.reset_index(); month_avg.columns = ['month', 'month_avg']
df = pd.merge(left=df, right=month_avg, on='month', how='left')
# Week of year average
week_year_avg = train_info.groupby('week_of_year').trip_duration.mean()
week_year_avg = week_year_avg.reset_index()
week_year_avg.columns = ['week_of_year', 'week_of_year_avg']
df = pd.merge(left=df, right=week_year_avg, on='week_of_year', how='left')
# Day of month average
day_month_avg = train_info.groupby('day').trip_duration.mean()
day_month_avg = day_month_avg.reset_index()
day_month_avg.columns = ['day', 'day_of_month_avg']
df = pd.merge(left=df, right=day_month_avg, on='day', how='left')
# Day of year average
day_year_avg = train_info.groupby('day_of_year').trip_duration.mean()
day_year_avg = day_year_avg.reset_index()
day_year_avg.columns = ['day_of_year', 'day_of_year_avg']
df = | pd.merge(left=df, right=day_year_avg, on='day_of_year', how='left') | pandas.merge |
#!/usr/bin/env python
''' ---------------- About the script ----------------
Assignment 3: Sentiment Analysis
This script calculates sentiment scores of over a million headlines taken from the Australian news source ABC (Start Date: 2003-02-19 ; End Date: 2020-12-31) using the spaCyTextBlob approach, creates and saves two plots of sentiment over time with a 1-week and a 1-month rolling averages. Also, it creates one plot with 1-day, 1-week, 1-month and 1-year rolling averages together for a better comparison.
Example:
$ python sentiment.py
'''
"""---------------- Importing libraries ----------------
"""
# importing libraries
import spacy
import os
import sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
sys.path.append(os.path.join(".."))
from spacytextblob.spacytextblob import SpacyTextBlob
# initialising spacy
nlp = spacy.load("en_core_web_sm")
# initialising spaCyTextBlob and adding it as a new component to spaCy nlp pipeline.
spacy_text_blob = SpacyTextBlob()
nlp.add_pipe(spacy_text_blob)
"""---------------- Main script ----------------
"""
def main():
"""------ Reading data and preparation ------
"""
# Defining path to the csv file
in_file = os.path.join("..", "data", "abcnews-date-text", "abcnews-date-text.csv")
# Reading the csv file and saving into a variable
abc_news = pd.read_csv(in_file)
# Presenting a sample of 10 headlines to the user
print(f"\n[INFO] This is an overview of the dataset:\n")
print(abc_news.sample(10))
# Geting a list of every publish date, which I will loop through later
dates = sorted(abc_news["publish_date"].unique())
# Creating an empty list for total sentiment score for each day
total_scores = []
# Create ouput folder, if it doesn´t exist already, for saving the plots and other output
if not os.path.exists("../output"):
os.makedirs("../output")
"""------ The loop calculating sentiment scores ------
"""
print(f"\n[INFO] Calculating sentiment scores\n")
# Looping through each day
for day in dates:
# Creating a variable to store a total sentiment score for all the headlines per day
polarity_score_total = 0
# Getting a list of every headline in the day that the code is looping through
headlines = abc_news[abc_news["publish_date"]==day]
# Looping through each headline in a day
for headline in nlp.pipe(headlines["headline_text"], batch_size=1000):
# Calculating sentiment score for that headline
headline_polarity = headline._.sentiment.polarity
# Adding the above calculated headline score to the total score. In the end this will result in a total sentiment score from all the headlines per one day
polarity_score_total = headline_polarity + polarity_score_total
# Appending the total score to an empty list
total_scores.append(polarity_score_total)
"""------ Calculating and ploting rolling averages ------
"""
# Calculating 7 day rolling average
smoothed_sentiment_weeks = | pd.Series(total_scores) | pandas.Series |
import sys
from intopt_energy_mlp import intopt_energy
sys.path.insert(0,'../..')
sys.path.insert(0,"../../Interior")
sys.path.insert(0,"../../EnergyCost")
from intopt_energy_mlp import *
from KnapsackSolving import *
from get_energy import *
from ICON import *
import itertools
import scipy as sp
import numpy as np
import time,datetime
import pandas as pd
import logging
from scipy.stats import poisson
from get_energy import get_energy
import time,datetime
import logging
from get_energy import get_energy
import time,datetime
import logging
from scipy.stats import expon
from scipy.stats import beta
from scipy.stats import poisson
if __name__ == '__main__':
formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(filename='ICONexp.log', level=logging.INFO,format=formatter)
(X_1gtrain, y_train, X_1gtest, y_test) = get_energy("prices2013.dat")
X_1gvalidation = X_1gtest[0:2880,:]
y_validation = y_test[0:2880]
y_test= y_test[2880:]
X_1gtest = X_1gtest[2880:,:]
weights = [[1 for i in range(48)]]
weights = np.array(weights)
X_1gtrain = X_1gtrain[:,1:]
X_1gvalidation = X_1gvalidation[:,1:]
X_1gtest = X_1gtest[:,1:]
file = "../../EnergyCost/load1/instance34.txt"
param = data_reading(file)
## twostage
# clf = twostage_energy(input_size=X_1gtrain.shape[1], param=param,hidden_size=1,
# optimizer= optim.SGD, lr=0.1,num_layers=1,epochs=3,validation_relax=False)
# clf.fit(X_1gtrain,y_train)
# test_rslt = clf.validation_result(X_1gtest,y_test)
#
# two_stage_rslt = {'model':'Two-stage','MSE-loss':test_rslt [1],'Regret':test_rslt[0]}
#
# # SPO
# clf = SPO_energy(input_size=X_1gtrain.shape[1], param=param,hidden_size=1,
# optimizer= optim.Adam, lr=0.7,num_layers=1,epochs=5,validation_relax=False)
# clf.fit(X_1gtrain,y_train)
# test_rslt = clf.validation_result(X_1gtest,y_test)
# spo_rslt = {'model':'SPO','MSE-loss':test_rslt [1],'Regret':test_rslt[0] }
## Intopt HSD
clf = intopt_energy(input_size=X_1gtrain.shape[1], param=param,hidden_size=1,
optimizer= optim.Adam, lr=0.7,num_layers=1,epochs=8,
damping= 1e-6,thr = 0.1,validation_relax=False)
clf.fit(X_1gtrain,y_train)
test_rslt = clf.validation_result(X_1gtest,y_test)
intopt_rslt = {'model':'IntOpt','MSE-loss':test_rslt [1],'Regret':test_rslt[0]}
# QPT
clf = qptl_energy(input_size=X_1gtrain.shape[1], param=param,hidden_size=1,num_layers=1,
optimizer= optim.Adam, lr=0.1,epochs= 6,tau=100000,validation_relax=False)
clf.fit(X_1gtrain,y_train,X_test= X_1gtest,y_test= y_test)
test_rslt = clf.validation_result(X_1gtest,y_test)
qpt_rslt = {'model':'QPTL','MSE-loss':test_rslt [1],'Regret':test_rslt[0] }
rslt= | pd.DataFrame([two_stage_rslt,spo_rslt, qpt_rslt,intopt_rslt ]) | pandas.DataFrame |
import io
import itertools
import pytest
from pandas.util.testing import (
assert_series_equal, assert_frame_equal, assert_index_equal)
from numpy.testing import assert_array_equal
import pandas as pd
import numpy as np
import matplotlib.figure
import matplotlib.pyplot as plt
from upsetplot import plot
from upsetplot import UpSet
from upsetplot import generate_counts, generate_samples
from upsetplot.plotting import _process_data
# TODO: warnings should raise errors
def is_ascending(seq):
# return np.all(np.diff(seq) >= 0)
return sorted(seq) == list(seq)
@pytest.mark.parametrize('x', [
generate_counts(),
generate_counts().iloc[1:-2],
])
@pytest.mark.parametrize('sort_by', ['cardinality', 'degree'])
@pytest.mark.parametrize('sort_categories_by', [None, 'cardinality'])
def test_process_data_series(x, sort_by, sort_categories_by):
assert x.name == 'value'
for subset_size in ['auto', 'legacy', 'sum', 'count']:
for sum_over in ['abc', False]:
with pytest.raises(ValueError, match='sum_over is not applicable'):
_process_data(x, sort_by=sort_by,
sort_categories_by=sort_categories_by,
subset_size=subset_size, sum_over=sum_over)
df, intersections, totals = _process_data(
x, subset_size='auto', sort_by=sort_by,
sort_categories_by=sort_categories_by, sum_over=None)
assert intersections.name == 'value'
x_reordered = (x
.reorder_levels(intersections.index.names)
.reindex(index=intersections.index))
assert len(x) == len(x_reordered)
assert x_reordered.index.is_unique
assert_series_equal(x_reordered, intersections,
check_dtype=False)
if sort_by == 'cardinality':
assert is_ascending(intersections.values[::-1])
else:
# check degree order
assert is_ascending(intersections.index.to_frame().sum(axis=1))
# TODO: within a same-degree group, the tuple of active names should
# be in sort-order
if sort_categories_by:
assert is_ascending(totals.values[::-1])
assert np.all(totals.index.values == intersections.index.names)
assert np.all(df.index.names == intersections.index.names)
assert set(df.columns) == {'_value', '_bin'}
assert_index_equal(df['_value'].reorder_levels(x.index.names).index,
x.index)
assert_array_equal(df['_value'], x)
assert_index_equal(intersections.iloc[df['_bin']].index,
df.index)
assert len(df) == len(x)
@pytest.mark.parametrize('x', [
generate_samples()['value'],
generate_counts(),
])
def test_subset_size_series(x):
kw = {'sort_by': 'cardinality',
'sort_categories_by': 'cardinality',
'sum_over': None}
df_sum, intersections_sum, totals_sum = _process_data(
x, subset_size='sum', **kw)
if x.index.is_unique:
expected_warning = None
else:
expected_warning = FutureWarning
with pytest.warns(expected_warning):
df, intersections, totals = _process_data(
x, subset_size='legacy', **kw)
assert_frame_equal(df, df_sum)
assert_series_equal(intersections, intersections_sum)
assert_series_equal(totals, totals_sum)
if x.index.is_unique:
df, intersections, totals = _process_data(
x, subset_size='auto', **kw)
assert_frame_equal(df, df_sum)
assert_series_equal(intersections, intersections_sum)
| assert_series_equal(totals, totals_sum) | pandas.util.testing.assert_series_equal |
import pandas as pd
def cleaner(extractor):
tables = extractor.get_all_tables()
df = extractor.get_column_list(tables)
df['default'] = df['default'].fillna('0')
df['default'][df['default'].str.contains('next', na=False)] = '1'
df['default'][df['default'].isin(['0', '1']) == False] = '2'
df = pd.concat([df, | pd.get_dummies(df['default']) | pandas.get_dummies |
# coding=utf-8
"""This is the core implementation of the evaluation."""
import concurrent.futures
from threading import Lock, Thread
import numpy as np
import pandas as pd
import torch
from tensorboardX import SummaryWriter
from tqdm.autonotebook import tqdm
from ..utils import evaluation as eval_model
from ..utils.common_util import print_dict_as_table, save_to_csv, timeit
from ..utils.constants import (
DEFAULT_ITEM_COL,
DEFAULT_PREDICTION_COL,
DEFAULT_RATING_COL,
DEFAULT_USER_COL,
)
from ..utils.seq_evaluation import mrr, ndcg, precision, recall
lock_train_eval = Lock()
lock_test_eval = Lock()
def computeRePos(time_seq, time_span):
"""Compute position matrix for a user.
Args:
time_seq ([type]): [description]
time_span ([type]): [description]
Returns:
[type]: [description]
"""
size = time_seq.shape[0]
time_matrix = np.zeros([size, size], dtype=np.int32)
for i in range(size):
for j in range(size):
span = abs(time_seq[i] - time_seq[j])
if span > time_span:
time_matrix[i][j] = time_span
else:
time_matrix[i][j] = span
return time_matrix
def evaluate(data_df, predictions, metrics, k_li):
"""Evaluate the performance of a prediction by different metrics.
Args:
data_df (DataFrame): the dataset to be evaluated.
predictions (narray): 1-D array. The predicted scores for each user-item pair in the dataset.
metrics (list): metrics to be evaluated.
k_li (int or list): top k (s) to be evaluated.
Returns:
result_dic (dict): Performance result.
"""
user_ids = data_df[DEFAULT_USER_COL].to_numpy()
item_ids = data_df[DEFAULT_ITEM_COL].to_numpy()
pred_df = pd.DataFrame(
{
DEFAULT_USER_COL: user_ids,
DEFAULT_ITEM_COL: item_ids,
DEFAULT_PREDICTION_COL: predictions,
}
)
metric_mapping = {
"rmse": eval_model.rmse,
"mae": eval_model.mae,
"rsquared": eval_model.rsquared,
"ndcg": eval_model.ndcg_at_k,
"map": eval_model.map_at_k,
"precision": eval_model.precision_at_k,
"recall": eval_model.recall_at_k,
}
result_dic = {}
if type(k_li) != list:
k_li = [k_li]
for k in k_li:
for metric in metrics:
result = metric_mapping[metric](data_df, pred_df, k=k)
result_dic[f"{metric}@{k}"] = result
return result_dic
@timeit
def train_eval_worker(testEngine, valid_df, test_df, valid_pred, test_pred, epoch):
"""Start a worker for the evaluation during training.
Args:
testEngine:
valid_df:
test_df:
valid_pred:
test_pred:
epoch (int):
Returns:
(dict,dict): dictionary with performances on validation and testing sets.
"""
testEngine.n_worker += 1
valid_result = evaluate(
valid_df, valid_pred, testEngine.metrics, testEngine.valid_k
)
test_result = evaluate(test_df, test_pred, testEngine.metrics, testEngine.valid_k)
lock_train_eval.acquire()
testEngine.record_performance(valid_result, test_result, epoch)
if (
valid_result[f"{testEngine.valid_metric}@{testEngine.valid_k}"]
> testEngine.best_valid_performance
):
testEngine.n_no_update = 0
print(
"Current testEngine.best_valid_performance"
f" {testEngine.best_valid_performance}"
)
testEngine.best_valid_performance = valid_result[
f"{testEngine.valid_metric}@{testEngine.valid_k}"
]
print_dict_as_table(
valid_result,
tag=f"performance on validation at epoch {epoch}",
columns=["metrics", "values"],
)
print_dict_as_table(
test_result,
tag=f"performance on testing at epoch {epoch}",
columns=["metrics", "values"],
)
else:
testEngine.n_no_update += 1
print(f"number of epochs that have no update {testEngine.n_no_update}")
testEngine.n_worker -= 1
lock_train_eval.release()
# lock record and get best performance
return valid_result, test_result
@timeit
def test_eval_worker(testEngine, eval_data_df, prediction):
"""Start a worker for the evaluation during training.
Prediction and evaluation on the testing set.
"""
config = testEngine.config["system"]
result_para = {
"run_time": [testEngine.config["run_time"]],
}
testEngine.n_worker += 1
for cfg in ["model", "dataset"]:
for col in testEngine.config[cfg]["result_col"]:
result_para[col] = [testEngine.config[cfg][col]]
test_result_dic = evaluate(
eval_data_df, prediction, testEngine.metrics, testEngine.k
)
print_dict_as_table(
test_result_dic,
tag="performance on test",
columns=["metrics", "values"],
)
test_result_dic.update(result_para)
lock_test_eval.acquire() # need to be test
result_df = | pd.DataFrame(test_result_dic) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
# In[2]:
# In[3]:
def ptetaplot(ptbins,etabins,data,ax,title):
etabinstext=[]
ptbinstext=[]
for i in range(len(ptbins)):
if i==len(ptbins)-1:
ptbinstext.append('overflow')
continue
ptbinstext.append(str(ptbins[i])+'-'+str(ptbins[i+1]))
for i in range(len(etabins)):
if i==len(etabins)-1:
etabinstext.append('overflow')
continue
etabinstext.append(str(etabins[i])+'-'+str(etabins[i+1]))
import seaborn as sns
ptbinstext.reverse()
import pandas as pd
df = | pd.DataFrame(data=data, columns=etabinstext, index=ptbinstext) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 11:52:00 2020
@author: <NAME>
Ewp
Takes a little while to run.
This code is a pretty average (poorly named variables and reuse of dat, and different names for the moorings).
All of the values are calculated on the fly and printed. Not saved anywhere.
Quite inefficient, but provided as is.
Recommended to turn warnings off to save the data and put in text.
Could have turned this into a function. Have refractored where possible but this script is provided as is.
Requirements:
processed/combined_dataset/month_data_exports.nc
processed/flux/pco2grams.nc
Produces:
figs/Figure5a_ENSO_seasonality.png
"""
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
from carbon_math import *
from matplotlib.dates import MonthLocator, DateFormatter
from matplotlib.ticker import FuncFormatter
xl0=0.0
yl0=0.18
xl1=0.0
yl1=0.18
xl2=-0.09
yl2=0
lanina=pd.read_csv('processed/indexes/la_nina_events.csv')
cp_nino=pd.read_csv('processed/indexes/cp_events.csv')
ep_nino= | pd.read_csv('processed/indexes/ep_events.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 15 11:21:51 2020
@author: jcampbellwhite001
"""
import time
from matplotlib import *
from matplotlib.pyplot import *
import numpy as np
import pandas as pd
import astropy
from astropy.time import Time
import astropy.units as u
from astropy.stats import sigma_clip
from astroquery.simbad import Simbad
from astropy.timeseries import LombScargle
import numpy.ma as ma
import os
from PyAstronomy import pyasl
from lmfit.models import GaussianModel, LinearModel
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from scipy.signal import savgol_filter
from scipy.signal import find_peaks_cwt
from scipy.signal import argrelextrema
from ESO_fits_get_spectra import *
from ESP_fits_get_spectra import *
from utils_data import *
from utils_spec import *
#variables shared between module files
target='USH'
instrument='USH'
radvel=0
vsini=0
#figure sizes used in module file
fig_size_s=(6,6)
fig_size_sn=(4.5,5)
fig_size_l=(9,6)
fig_size_n=(9,4)
#Read in line files
lines_file='Line_Resources/JCW_all_lines_2_20kA_100321.csv'
line_table = pd.read_csv(lines_file,delimiter='\t',index_col=False,comment='#',skip_blank_lines=True)
line_table['Ei']=line_table['Ei'].str.replace('[^\d.]', '',regex=True).astype(float)
line_table['Ek']=line_table['Ek'].str.replace('[^\d.]', '',regex=True).astype(float)
line_table.drop(columns=('rm'),inplace=True)
line_table['obs_wl_air']=np.round(line_table['obs_wl_air'],2)
line_table.sort_values(['obs_wl_air','Aki'],ascending=[True,False],inplace=True)#keep the highest Aki for duplicated obs_wl_air entries?
line_table.drop_duplicates('obs_wl_air',inplace=True)
line_table.reset_index(drop=True,inplace=True)
xr_lines_file='Line_Resources/JCW_XR_lines_2_50A.csv'
xr_line_table = pd.read_csv(xr_lines_file,delimiter='\t',index_col=False,comment='#',skip_blank_lines=True)
xr_line_table['Ei']=xr_line_table['Ei'].str.replace('[^\d.]', '',regex=True).astype(float)
xr_line_table['Ek']=xr_line_table['Ek'].str.replace('[^\d.]', '',regex=True).astype(float)
xr_line_table['ritz_wl_vac']=xr_line_table['ritz_wl_vac'].str.replace('[^\d.]', '',regex=True).astype(float)
xr_line_table['ritz_wl_vac']=np.round(xr_line_table['ritz_wl_vac'],3)
xrs_lines_file='Line_Resources/JCW_Stelzer_XR_lines_2_50A.csv'
xrs_line_table = | pd.read_csv(xrs_lines_file,delimiter=',',index_col=False,comment='#',skip_blank_lines=True) | pandas.read_csv |
#!/usr/bin/python3
import pandas as pd
import subprocess
import os
import matplotlib.pyplot as plt
import numpy as np
import time
import glob
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# Set up a bunch of settings to test, more than will be plotted to ensure that I can change things around to plot different values
dimensions = [(660, 120), (1000, 200), (2000, 400), (4000, 800), (8000, 1600)]
sim_times = [0.5, 0.2, 0.05, 0.01, 0.004]
omp_num_threads_tested = [1, 2, 3, 4, 5, 6]
sbatch_nodes_tested = [1, 2, 3, 4, 8]
# List of items to actually plot as different lines
omp_num_threads_plot = [1, 2, 4, 6]
sbatch_nodes_plot = [1, 2, 4, 8]
dimensions_plot = [(660, 120), (2000, 400), (8000, 1600)]
# Extract timing data from line
def get_time_from_timing_line(line):
string_time = line.split(" ")[3]
return float(string_time)
class CFDRunner:
"""
Class to handle running a configuration via slurm and process the output
"""
def __init__(self, id):
"""
Set the default parameters
Takes an id to keep config files separate
"""
self.x = 660
self.y = 120
self.t = 0.2
self.sbatch_nodes = 1
self.sbatch_tasks = 0.5
self.sbatch_time = "00:07:00"
self.omp_threads = 6
self.in_file = os.path.join("test", f"initial-{id}.bin")
self.out_file = os.path.join("test", f"completed-{id}.bin")
self.sbatch_file = os.path.join("test", f"submit-{id}.sbatch")
self.single_thread = False
def run(self):
"""
Run the slurm batch file and extract the slurm job id to read the file later
"""
process_output = subprocess.run(["sbatch", self.sbatch_file], stdout=subprocess.PIPE)
output_lines = process_output.stdout.decode().split("\n")
self.sbatch_id = output_lines[0].split(" ")[3]
def is_still_running(self):
"""
Check if the job still appears in the queue -> probably still running
"""
process_output = subprocess.run(["squeue"], stdout=subprocess.PIPE)
output_lines = process_output.stdout.decode().split("\n")
return any([self.sbatch_id in line for line in output_lines])
def parse_output(self):
"""
Parse the output into a dataframe of timing data
"""
with open(f"slurm-{self.sbatch_id}.out", "r") as fh:
lines = fh.readlines()
i = 0
# while i < len(lines) and "I am process" not in lines[i]:
# i += 1
# shape_output = lines[i]
timing_results = []
# Basically go line by line and extract the timing data
# If a timestep label is seen it knows a new set of measurements is starting
# Add the current of the data to the dataframe
# Note: Uses this weird method because it wasn't known which order the measurements would be output in
current_time = None
timestep_time_taken = None
compute_velocity_time_taken = None
rhs_time_taken = None
possion_time_taken = None
update_velocity_time_taken = None
boundary_time_taken = None
sync_time_taken = None
possion_p_loop_time_taken = None
possion_res_loop_time_taken = None
for line in lines[i:]:
try:
if "--- Timestep" in line:
if current_time is not None:
timing_results.append([
current_time,
timestep_time_taken,
compute_velocity_time_taken,
rhs_time_taken,
possion_time_taken,
update_velocity_time_taken,
boundary_time_taken,
sync_time_taken,
possion_p_loop_time_taken,
possion_res_loop_time_taken,
])
current_time = float(line.split(" ")[3])
elif "timestep_time_taken" in line:
timestep_time_taken = float(line.split(" ")[1])
elif "compute_velocity_time_taken" in line:
compute_velocity_time_taken = float(line.split(" ")[1])
elif "rhs_time_taken" in line:
rhs_time_taken = float(line.split(" ")[1])
elif "possion_time_taken" in line:
possion_time_taken = float(line.split(" ")[1])
elif "update_velocity_time_taken" in line:
update_velocity_time_taken = float(line.split(" ")[1])
elif "boundary_time_taken" in line:
boundary_time_taken = float(line.split(" ")[1])
elif "sync_time_taken" in line:
sync_time_taken = float(line.split(" ")[1])
elif "possion_p_loop_time_taken" in line:
possion_p_loop_time_taken = float(line.split(" ")[1])
elif "possion_res_loop_time_taken" in line:
possion_res_loop_time_taken = float(line.split(" ")[1])
except Exception as e:
print("Exception", e)
# Label the dataframe columns and return
df = pd.DataFrame(timing_results, columns=("Timestep", "timestep_time_taken", "compute_velocity_time_taken", "rhs_time_taken", "possion_time_taken", "update_velocity_time_taken", "boundary_time_taken", "sync_time_taken", "possion_p_loop_time_taken", "possion_res_loop_time_taken"))
return df
def save_sbatch(self):
"""
Export the configuration as a file to be run by sbatch
Bind to socket to avoid performing openmp across two sockets to avoid memory latency
"""
# Default to using the parallel implementation
command = f"time mpirun -n {self.sbatch_nodes} -npernode 1 --bind-to socket ./karman-par -x {self.x} -y {self.y} --infile {self.in_file} -o {self.out_file} -t {self.t}\n"
omp_line = f"export OMP_NUM_THREADS={self.omp_threads}\n"
# If singlethread use the other executable
if self.single_thread:
command = f"time ./karman -x {self.x} -y {self.y} --infile {self.in_file} -o {self.out_file} -t {self.t}\n"
omp_line = "\n"
# Write out the file
with open(self.sbatch_file, "w") as fh:
fh.writelines([
"#!/bin/bash\n",
"#SBATCH --job-name=cfd-graphs\n",
"#SBATCH --partition=cs402\n",
"#SBATCH --nice=9000\n",
"#SBATCH --ntasks-per-socket=1\n", # avoid going from socket to socket with openmp
f"#SBATCH --nodes={self.sbatch_nodes}\n",
f"#SBATCH --ntasks-per-node=1\n",
f"#SBATCH --cpus-per-task=12\n" # required for 6x scaling running on slurm scaled correctly with openmp up to 6 threads but after that failed to improve. I think it is only allocating one socket.
f"#SBATCH --time={self.sbatch_time}\n",
". /etc/profile.d/modules.sh\n",
"module purge\n",
"module load cs402-mpi\n",
omp_line,
command,
"#gprof ./karman\n",
"./bin2ppm < karman.bin > karman.ppm\n",
"./diffbin karman.vanilla.bin karman.bin\n",
])
def collect_data():
"""
Run all configurations
"""
all_df = pd.DataFrame({
"x": pd.Series(dtype='int32'),
"y": pd.Series(dtype='int32'),
"sbatch_nodes": pd.Series(dtype='int32'),
"sbatch_tasks": pd.Series(dtype='int32'),
"omp_threads": | pd.Series(dtype='int32') | pandas.Series |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[ | Timestamp("20130101") | pandas.Timestamp |
import json
import re
import pandas as pd
import os
import numpy as np
from tqdm import tqdm
def get_stop_words(json_file="./word2vec_af_wikipedia.json",
number_of_stop_words=None,
word_count_threshold=0.30,
remove_punctuation=True,
lowercase_words=True,
remove_proper_nouns=True):
with open(json_file, "r") as read_file:
data = json.load(read_file)
token_counts = data["token_counts"]
if remove_punctuation:
token_counts = [(re.sub(r"[^a-zA-Z]", "", x), y) for x, y in token_counts if re.sub(r"[^a-zA-Z]", "", x) != ""]
if lowercase_words:
token_counts = [(x.lower(), y) for x, y in token_counts]
if remove_proper_nouns:
proper_nouns = ["January", "january", "Januarie", "januarie",
"February", "february", "Februarie", "februarie",
"March", "march", "Maart", "maart",
"April", "april", "April", "april",
"May", "may", "Mei", "mei",
"June", "june", "Junie", "junie",
"July", "july", "Julie", "julie",
"August", "august", "Augustus", "augustus",
"September", "september", "September", "september",
"October", "october", "Oktober", "oktober",
"November", "november", "November", "november",
"December", "december", "Desember", "desember"]
token_counts = [(x, y) for x, y in token_counts if x not in proper_nouns]
token_dict = {}
for token, count in token_counts:
if token in token_dict:
token_dict[token] = token_dict[token] + count
else:
token_dict[token] = count
tokens_sorted = sorted(token_dict.items(), key=lambda x: x[1], reverse=True)
if not number_of_stop_words:
count_total = sum(token_dict.values())
# print("count_total :", count_total)
threshold_count = count_total * word_count_threshold
# print("threshold_count :", threshold_count)
running_total = 0
idx = 0
for _, count in token_counts:
running_total += count
idx += 1
if running_total > threshold_count:
break
return_tokens = [x for x, y in tokens_sorted]
return_tokens = return_tokens[:idx]
else:
return_tokens = [x for x, y in tokens_sorted]
return_tokens = return_tokens[:number_of_stop_words]
return return_tokens
def convert_df_to_text(input_df, section_name):
len_input = len(input_df)
if len_input == 0:
return "", 0
else:
if len(section_name) > 0:
return_text = section_name
# return_text += "\n"
else:
return_text = ""
rows = input_df.values
for row in rows:
return_text += "\n"
return_text += " ".join(row.tolist())
return return_text, len(rows)
def create_question_words_file_from_excel(source_excel_file,
words_file_dir="./",
words_file_name="question-words-afrikaans.txt"):
excel_data = | pd.ExcelFile(source_excel_file) | pandas.ExcelFile |
import pandas, sys
import pandas as pd
a = pd.read_csv("/home/risana/Downloads/Signal-Processing--master/5.Py_Scripts/kushagra_lie_R.csv")
b = pd.read_csv("/home/risana/Downloads/Signal-Processing--master/5.Py_Scripts/kushagra_truth_R.csv")
c=pd.read_csv("/home/risana/Downloads/Signal-Processing--master/5.Py_Scripts/komal_lie_R.csv")
d= | pd.read_csv("/home/risana/Downloads/Signal-Processing--master/5.Py_Scripts/komal_truth_R.csv") | pandas.read_csv |
"""
Download, transform and simulate various datasets.
"""
# Author: <NAME> <<EMAIL>>
# License: MIT
from os.path import join
from urllib.parse import urljoin
from string import ascii_lowercase
from sqlite3 import connect
from rich.progress import track
import numpy as np
import pandas as pd
from .base import Datasets, FETCH_URLS
class ContinuousCategoricalDatasets(Datasets):
"""Class to download, transform and save datasets with both continuous
and categorical features."""
@staticmethod
def _modify_columns(data, categorical_features):
"""Rename and reorder columns of dataframe."""
X, y = data.drop(columns="target"), data.target
X.columns = range(len(X.columns))
return pd.concat([X, y], axis=1), categorical_features
def download(self):
"""Download the datasets."""
if self.names == "all":
func_names = [func_name for func_name in dir(self) if "fetch_" in func_name]
else:
func_names = [
f"fetch_{name}".lower().replace(" ", "_") for name in self.names
]
self.content_ = []
for func_name in track(func_names, description="Datasets"):
name = func_name.replace("fetch_", "").upper().replace("_", " ")
fetch_data = getattr(self, func_name)
data, categorical_features = self._modify_columns(*fetch_data())
self.content_.append((name, data, categorical_features))
return self
def save(self, path, db_name):
"""Save datasets."""
with connect(join(path, f"{db_name}.db")) as connection:
for name, data in self.content_:
data.to_sql(name, connection, index=False, if_exists="replace")
def fetch_adult(self):
"""Download and transform the Adult Data Set.
https://archive.ics.uci.edu/ml/datasets/Adult
"""
data = pd.read_csv(FETCH_URLS["adult"], header=None, na_values=" ?").dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 3, 5, 6, 7, 8, 9, 13]
return data, categorical_features
def fetch_abalone(self):
"""Download and transform the Abalone Data Set.
https://archive.ics.uci.edu/ml/datasets/Abalone
"""
data = pd.read_csv(FETCH_URLS["abalone"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0]
return data, categorical_features
def fetch_acute(self):
"""Download and transform the Acute Inflammations Data Set.
https://archive.ics.uci.edu/ml/datasets/Acute+Inflammations
"""
data = pd.read_csv(
FETCH_URLS["acute"], header=None, sep="\t", decimal=",", encoding="UTF-16"
)
data["target"] = data[6].str[0] + data[7].str[0]
data.drop(columns=[6, 7], inplace=True)
categorical_features = list(range(1, 6))
return data, categorical_features
def fetch_annealing(self):
"""Download and transform the Annealing Data Set.
https://archive.ics.uci.edu/ml/datasets/Annealing
"""
data = pd.read_csv(FETCH_URLS["annealing"], header=None, na_values="?")
# some features are dropped; they have too many missing values
missing_feats = (data.isnull().sum(0) / data.shape[0]) < 0.1
data = data.iloc[:, missing_feats.values]
data[2].fillna(data[2].mode().squeeze(), inplace=True)
data = data.T.reset_index(drop=True).T
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0, 1, 5, 9]
return data, categorical_features
def fetch_census(self):
"""Download and transform the Census-Income (KDD) Data Set.
https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29
"""
data = pd.read_csv(FETCH_URLS["census"], header=None)
categorical_features = (
list(range(1, 5))
+ list(range(6, 16))
+ list(range(19, 29))
+ list(range(30, 38))
+ [39]
)
# some features are dropped; they have too many missing values
cols_ids = [1, 6, 9, 13, 14, 20, 21, 29, 31, 37]
categorical_features = np.argwhere(
np.delete(
data.rename(columns={k: f"nom_{k}" for k in categorical_features})
.columns.astype("str")
.str.startswith("nom_"),
cols_ids,
)
).squeeze()
data = data.drop(columns=cols_ids).T.reset_index(drop=True).T
# some rows are dropped; they have rare missing values
data = data.iloc[
data.applymap(lambda x: x != " Not in universe").all(1).values, :
]
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
return data, categorical_features
def fetch_contraceptive(self):
"""Download and transform the Contraceptive Method Choice Data Set.
https://archive.ics.uci.edu/ml/datasets/Contraceptive+Method+Choice
"""
data = pd.read_csv(FETCH_URLS["contraceptive"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [4, 5, 6, 8]
return data, categorical_features
def fetch_covertype(self):
"""Download and transform the Covertype Data Set.
https://archive.ics.uci.edu/ml/datasets/Covertype
"""
data = pd.read_csv(FETCH_URLS["covertype"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
wilderness_area = pd.Series(
np.argmax(data.iloc[:, 10:14].values, axis=1), name=10
)
soil_type = pd.Series(np.argmax(data.iloc[:, 14:54].values, axis=1), name=11)
data = (
data.drop(columns=list(range(10, 54)))
.join(wilderness_area)
.join(soil_type)[list(range(0, 12)) + ["target"]]
)
categorical_features = [10, 11]
return data, categorical_features
def fetch_credit_approval(self):
"""Download and transform the Credit Approval Data Set.
https://archive.ics.uci.edu/ml/datasets/Credit+Approval
"""
data = pd.read_csv(
FETCH_URLS["credit_approval"], header=None, na_values="?"
).dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0, 3, 4, 5, 6, 8, 9, 11, 12]
return data, categorical_features
def fetch_dermatology(self):
"""Download and transform the Dermatology Data Set.
https://archive.ics.uci.edu/ml/datasets/Dermatology
"""
data = pd.read_csv(
FETCH_URLS["dermatology"], header=None, na_values="?"
).dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = list(range(data.shape[1] - 1))
categorical_features.remove(33)
return data, categorical_features
def fetch_echocardiogram(self):
"""Download and transform the Echocardiogram Data Set.
https://archive.ics.uci.edu/ml/datasets/Echocardiogram
"""
data = pd.read_csv(
FETCH_URLS["echocardiogram"],
header=None,
error_bad_lines=False,
warn_bad_lines=False,
na_values="?",
)
data.drop(columns=[10, 11], inplace=True)
data.dropna(inplace=True)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 3]
return data, categorical_features
def fetch_flags(self):
"""Download and transform the Flags Data Set.
https://archive.ics.uci.edu/ml/datasets/Flags
"""
data = pd.read_csv(FETCH_URLS["flags"], header=None)
target = data[6].rename("target")
data = data.drop(columns=[0, 6]).T.reset_index(drop=True).T.join(target)
categorical_features = [
0,
1,
4,
8,
9,
10,
11,
12,
13,
14,
15,
21,
22,
23,
24,
25,
26,
27,
]
return data, categorical_features
def fetch_heart_disease(self):
"""Download and transform the Heart Disease Data Set.
https://archive.ics.uci.edu/ml/datasets/Heart+Disease
"""
data = (
pd.concat(
[
pd.read_csv(url, header=None, na_values="?")
for url in FETCH_URLS["heart_disease"]
],
ignore_index=True,
)
.drop(columns=[10, 11, 12])
.dropna()
)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 2, 5, 6, 8]
return data, categorical_features
def fetch_hepatitis(self):
"""Download and transform the Hepatitis Data Set.
https://archive.ics.uci.edu/ml/datasets/Hepatitis
"""
data = (
pd.read_csv(FETCH_URLS["hepatitis"], header=None, na_values="?")
.drop(columns=[15, 18])
.dropna()
)
target = data[0].rename("target")
data = data.drop(columns=[0]).T.reset_index(drop=True).T.join(target)
categorical_features = list(range(1, 13)) + [16]
return data, categorical_features
def fetch_german_credit(self):
"""Download and transform the German Credit Data Set.
https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29
"""
data = pd.read_csv(FETCH_URLS["german_credit"], header=None, sep=" ")
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = (
np.argwhere(data.iloc[0, :-1].apply(lambda x: str(x)[0] == "A").values)
.squeeze()
.tolist()
)
return data, categorical_features
def fetch_heart(self):
"""Download and transform the Heart Data Set.
http://archive.ics.uci.edu/ml/datasets/statlog+(heart)
"""
data = pd.read_csv(FETCH_URLS["heart"], header=None, delim_whitespace=True)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 2, 5, 6, 8, 10, 12]
return data, categorical_features
def fetch_thyroid(self):
"""Download and transform the Thyroid Disease Data Set.
Label 0 corresponds to no disease found.
Label 1 corresponds to one or multiple diseases found.
https://archive.ics.uci.edu/ml/datasets/Thyroid+Disease
"""
data = (
pd.read_csv(FETCH_URLS["thyroid"], header=None, na_values="?")
.drop(columns=27)
.dropna()
.T.reset_index(drop=True)
.T
)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
data["target"] = (
data["target"].apply(lambda x: x.split("[")[0]) != "-"
).astype(int)
categorical_features = [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
18,
20,
22,
24,
26,
27,
]
return data, categorical_features
class MultiClassDatasets(Datasets):
"""Class to download, transform and save multi-class datasets."""
def fetch_first_order_theorem(self):
"""Download and transform the First Order Theorem Data Set.
https://www.openml.org/d/1475
"""
data = pd.read_csv(FETCH_URLS["first_order_theorem"])
data.rename(columns={"Class": "target"}, inplace=True)
return data
def fetch_gas_drift(self):
"""Download and transform the Gas Drift Data Set.
https://www.openml.org/d/1476
"""
data = pd.read_csv(FETCH_URLS["gas_drift"])
data.rename(columns={"Class": "target"}, inplace=True)
return data
def fetch_autouniv_au7(self):
"""Download and transform the AutoUniv au7 Data Set
https://www.openml.org/d/1552
"""
data = pd.read_csv(FETCH_URLS["autouniv_au7"])
data.rename(columns={"Class": "target"}, inplace=True)
data.target = data.target.apply(lambda x: x.replace("class", "")).astype(int)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_autouniv_au4(self):
"""Download and transform the AutoUniv au4 Data Set
https://www.openml.org/d/1548
"""
data = pd.read_csv(FETCH_URLS["autouniv_au4"])
data.rename(columns={"Class": "target"}, inplace=True)
data.target = data.target.apply(lambda x: x.replace("class", "")).astype(int)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_mice_protein(self):
"""Download and transform the Mice Protein Data Set
https://www.openml.org/d/40966
"""
data = pd.read_csv(FETCH_URLS["mice_protein"])
data.rename(columns={"class": "target"}, inplace=True)
data.drop(columns=["MouseID"], inplace=True)
data.replace("?", np.nan, inplace=True)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
mask2 = data.isna().sum() < 10
data = data.loc[:, mask & mask2].dropna().copy()
data.iloc[:, :-1] = data.iloc[:, :-1].astype(float)
mapper = {v: k for k, v in enumerate(data.target.unique())}
data.target = data.target.map(mapper)
return data
def fetch_steel_plates(self):
"""Download and transform the Steel Plates Fault Data Set.
https://www.openml.org/d/40982
"""
data = pd.read_csv(FETCH_URLS["steel_plates"])
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
mapper = {v: k for k, v in enumerate(data.target.unique())}
data.target = data.target.map(mapper)
return data
def fetch_cardiotocography(self):
"""Download and transform the Cardiotocography Data Set.
https://www.openml.org/d/1560
"""
data = pd.read_csv(FETCH_URLS["cardiotocography"])
data.rename(columns={"Class": "target"}, inplace=True)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_waveform(self):
"""Download and transform the Waveform Database Generator (version 2) Data Set.
https://www.openml.org/d/60
"""
data = pd.read_csv(FETCH_URLS["waveform"])
data.rename(columns={"class": "target"}, inplace=True)
return data
def fetch_volkert(self):
"""Download and transform the Volkert Data Set.
https://www.openml.org/d/41166
"""
data = pd.read_csv(FETCH_URLS["volkert"])
data.rename(columns={"class": "target"}, inplace=True)
mask = (data.iloc[:, 1:].nunique() > 100).tolist()
mask.insert(0, True)
data = data.loc[:, mask].copy()
return data
def fetch_vehicle(self):
"""Download and transform the Vehicle Silhouettes Data Set.
https://archive.ics.uci.edu/ml/datasets/Statlog+(Vehicle+Silhouettes)
"""
data = | pd.DataFrame() | pandas.DataFrame |
from pathlib import Path
from abc import ABC, abstractmethod
from typing import List, Dict
from pandas import DataFrame
import pickle
import os
import datetime
import pandas as pd
import numpy as np
SORTED_COLUMNS = ['global_cell_id', 'date', 'session_cell_id', 'mouse_line', 'sex', 'age', 'brain_region', 'cell_type', 'recording_type',
'internal_solution', 'stimulation_in_percent', 'pharmacology',
'stimulation_string', 'stimulation_frequency-Hz', 'stimulation_duration-ms', 'filepath_main_excel_sheet', 'filepath_detected_events']
#'Recordings and cell properties' for reasons of backwards compatibility
VOLTAGE_CLAMP_IDENTIFIERS = ['Voltage-clamp', 'voltage-clamp', 'Voltage_clamp', 'voltage_clamp', 'Recordings and cell properties']
CURRENT_CLAMP_IDENTIFIERS = ['Current-clamp', 'current-clamp', 'Current_clamp', 'current_clamp']
def listdir_nohidden(path: Path) -> List:
return [f for f in os.listdir(path.as_posix()) if f.startswith('.') == False]
class Database:
"""
The database is supposed to hold all information about all recorded cells that were added to it.
These information exclude the raw data (only contain filepaths to raw data),
but include general metadata like celltype, stimulus type, brain region, pharamacological treatment.
Potentially, some intermediate data could be added (like how the cell reacted upon stimulation, see dashboard).
Overall, the database should allow the selection of cells based on mix-and-match criteria
for further (statistical) analyses and plotting.
"""
def __init__(self, root_dir: Path):
self.root_dir = root_dir
self.subdirectories = Subdirectories(root_dir = root_dir)
self.global_cell_id = 0
self.cells_in_database = list()
def list_all_column_values(self, global_cell_id: str, recording_type: str, column_name: str) -> List:
if recording_type not in ['current_clamp', 'IPSPs', 'EPSPs']:
raise ValueError('The attribute "recording_type" has to be one of the following: "current_clamp", "IPSPs", or "EPSPs"')
if recording_type == 'current_clamp':
attribute = 'current_clamp_recordings'
elif recording_type == 'IPSPs':
attribute = 'inhibitory_voltage_clamp_recordings'
elif recording_type == 'EPSPs':
attribute = 'excitatory_voltage_clamp_recordings'
if hasattr(self, attribute):
df_to_use = getattr(self, attribute)
if global_cell_id in df_to_use['global_cell_id'].unique():
values = list(df_to_use.loc[df_to_use['global_cell_id'] == global_cell_id, column_name].values)
return values
else:
raise ValueError(f'{global_cell_id} is not in the database yet!')
else:
print('No entries in the database yet')
def add_new_cell_recording(self, cell_recordings_dir: Path, overwrite: bool):
increase_global_cell_id_count = True
if overwrite:
if cell_recordings_dir.name in self.cells_in_database:
date = cell_recordings_dir.name[:10]
session_cell_id = cell_recordings_dir.name[cell_recordings_dir.name.rfind('_')+1:]
global_cell_id = self.cell_recordings_metadata.loc[(self.cell_recordings_metadata['date'] == date) &
(self.cell_recordings_metadata['session_cell_id'] == session_cell_id),
'global_cell_id'].iloc[0]
increase_global_cell_id_count = False
self.cell_recordings_metadata.drop(self.cell_recordings_metadata.loc[self.cell_recordings_metadata['global_cell_id'] == global_cell_id].index, inplace = True)
self.cells_in_database.remove(cell_recordings_dir.name)
else:
global_cell_id = self.global_cell_id
else:
global_cell_id = self.global_cell_id
if cell_recordings_dir.name not in self.cells_in_database:
new_recording = CellRecording(cell_recordings_dir = cell_recordings_dir, global_cell_id = global_cell_id)
recording_overview = new_recording.create_recordings_overview()
if hasattr(self, 'cell_recordings_metadata') == False:
for column_name in list(recording_overview.columns):
if column_name not in SORTED_COLUMNS:
print(f'Warning: {column_name} not defined in SORTED_COLUMNS.')
self.cell_recordings_metadata = recording_overview[SORTED_COLUMNS]
else:
for column_name in list(recording_overview.columns):
if column_name not in list(self.cell_recordings_metadata.columns):
print(f'Warning: {column_name} represents a new column. Consider updating of all previously added cell recordings.')
self.cell_recordings_metadata = pd.concat([self.cell_recordings_metadata, recording_overview], ignore_index = True)
self.cell_recordings_metadata.sort_values(by=['global_cell_id'], inplace = True, ignore_index = True)
for recording_type_key, dataframe in new_recording.recording_type_specific_datasets.items():
if type(dataframe) == DataFrame:
if hasattr(self, recording_type_key) == False:
setattr(self, recording_type_key, dataframe)
else:
# check if all columns are matching?
current_df = getattr(self, recording_type_key)
updated_df = pd.concat([current_df, dataframe], ignore_index = True)
updated_df.sort_values(by = ['global_cell_id'], inplace = True, ignore_index = True)
setattr(self, recording_type_key, updated_df)
self.cells_in_database.append(cell_recordings_dir) #simply add .name to get cell_id (probably requires adaptations at multiple other sites too)
if increase_global_cell_id_count:
self.global_cell_id += 1
# Trigger update of mix-and-match categories?
else:
note_line1 = f'Note: The recordings in "{cell_recordings_dir.as_posix()}" were already added to the database.\n'
note_line2 = ' Consider passing "overwrite = True" when calling this function in order to update the information.'
print(note_line1 + note_line2)
def save_all(self):
self.save_all_attributes_as_pickle()
self.save_cell_recordings_metadata_as_csv()
def save_all_attributes_as_pickle(self):
project_summary = self.__dict__.copy()
filepath = f'{self.subdirectories.project_summary.as_posix()}/{datetime.datetime.now().strftime("%Y_%m_%d")}_dclpatch_project_summary.p'
with open(filepath, 'wb') as io:
pickle.dump(project_summary, io)
def save_cell_recordings_metadata_as_csv(self):
filepath = f'{self.subdirectories.project_summary.as_posix()}/{datetime.datetime.now().strftime("%Y_%m_%d")}_dclpatch_database_overview.csv'
self.cell_recordings_metadata.to_csv(filepath)
def load_all(self):
result_files = [fname for fname in listdir_nohidden(self.subdirectories.project_summary) if fname.endswith('dclpatch_project_summary.p')]
result_files.sort(reverse = True)
with open(f'{self.subdirectories.project_summary.as_posix()}/{result_files[0]}', 'rb') as io:
project_summary = pickle.load(io)
for key, value in project_summary.items():
if hasattr(self, key) == False:
setattr(self, key, value)
class CellRecording:
def __init__(self, cell_recordings_dir: Path, global_cell_id: int) -> None:
self.cell_recordings_dir = cell_recordings_dir
self.global_cell_id = str(global_cell_id).zfill(4)
self.recording_type_specific_datasets = {'excitatory_voltage_clamp_recordings': None,
'inhibitory_voltage_clamp_recordings': None,
'current_clamp_recordings': None}
self.extract_all_data_from_directory()
def extract_all_data_from_directory(self) -> None:
self.general_metadata_df = self.create_general_metadata_df()
recording_type_data_extractors = self.prepare_all_recording_type_data_extractors()
for data_extractor in recording_type_data_extractors:
extracted_df = data_extractor.create_recording_specific_dataframe(cell_recording_obj = self)
self.recording_type_specific_datasets[data_extractor.recording_type_key] = extracted_df
def create_general_metadata_df(self) -> DataFrame:
metadata_df = pd.read_excel(self.cell_recordings_dir.joinpath(f'{self.cell_recordings_dir.name}.xlsx'), sheet_name = 'General information')
general_metadata = {'date': self.cell_recordings_dir.name[:10],
'session_cell_id': self.cell_recordings_dir.name[self.cell_recordings_dir.name.rfind('_') + 1:],
'mouse_line': metadata_df['Animal line'][0],
'brain_region': metadata_df['Region'][0],
'cell_type': metadata_df['Type'][0],
'sex': metadata_df['Sex'][0],
'age': metadata_df['Age (days)'][0],
'stimulation_in_percent': metadata_df['Stimulation (%)'][0],
'internal_solution': metadata_df['Internal used'][0]}
return pd.DataFrame(general_metadata, index=[0])
def prepare_all_recording_type_data_extractors(self) -> List: #returns a list of DataExtractor objects
main_excel_sheet = pd.read_excel(self.cell_recordings_dir.joinpath(f'{self.cell_recordings_dir.name}.xlsx'), sheet_name = None)
recording_type_data_extractors = list()
for tab_name in main_excel_sheet.keys():
if tab_name in VOLTAGE_CLAMP_IDENTIFIERS:
recording_type_data_extractors.append(ExtractVoltageClampData(df = main_excel_sheet[tab_name]))
elif tab_name in CURRENT_CLAMP_IDENTIFIERS:
recording_type_data_extractors.append(ExtractCurrentClampData(df = main_excel_sheet[tab_name]))
elif tab_name == 'General information':
continue
else:
raise ValueError(f'"{tab_name}" is not a valid identifier of recording type for {self.cell_recordings_dir.as_posix()}')
return recording_type_data_extractors
def create_recordings_overview(self) -> DataFrame:
recording_specific_dataframes = []
for recording_type, value in self.recording_type_specific_datasets.items():
if type(value) == DataFrame:
relevant_columns1 = ['global_cell_id', 'recording_type','pharmacology', 'filepath_main_excel_sheet', 'filepath_detected_events']
relevant_columns2 = ['stimulation_string', 'stimulation_frequency-Hz', 'stimulation_duration-ms']
df_tmp = value[relevant_columns1 + relevant_columns2].copy()
recording_specific_dataframes.append(df_tmp)
overview_df = pd.concat(recording_specific_dataframes)
overview_df = overview_df.reset_index(drop = True)
shape_adapted_general_metadata_df = | pd.concat([self.general_metadata_df]*overview_df.shape[0], ignore_index = True) | pandas.concat |
import os
import json
import click
import logging
import itertools
import pandas as pd
from tqdm import tqdm
from collections import OrderedDict
from more_itertools import ichunked
from twarc.expansions import flatten
log = logging.getLogger("twarc")
DEFAULT_TWEET_COLUMNS = """__twarc.retrieved_at
__twarc.url
__twarc.version
attachments.media
attachments.media_keys
attachments.poll.duration_minutes
attachments.poll.end_datetime
attachments.poll.id
attachments.poll.options
attachments.poll.voting_status
attachments.poll_ids
author.created_at
author.description
author.entities.description.cashtags
author.entities.description.hashtags
author.entities.description.mentions
author.entities.description.urls
author.entities.url.urls
author.id
author.location
author.name
author.pinned_tweet_id
author.profile_image_url
author.protected
author.public_metrics.followers_count
author.public_metrics.following_count
author.public_metrics.listed_count
author.public_metrics.tweet_count
author.url
author.username
author.verified
author.withheld.country_codes
author_id
context_annotations
conversation_id
created_at
entities.annotations
entities.cashtags
entities.hashtags
entities.mentions
entities.urls
geo.coordinates.coordinates
geo.coordinates.type
geo.country
geo.country_code
geo.full_name
geo.geo.bbox
geo.geo.type
geo.id
geo.name
geo.place_id
geo.place_type
id
in_reply_to_user.created_at
in_reply_to_user.description
in_reply_to_user.entities.description.cashtags
in_reply_to_user.entities.description.hashtags
in_reply_to_user.entities.description.mentions
in_reply_to_user.entities.description.urls
in_reply_to_user.entities.url.urls
in_reply_to_user.id
in_reply_to_user.location
in_reply_to_user.name
in_reply_to_user.pinned_tweet_id
in_reply_to_user.profile_image_url
in_reply_to_user.protected
in_reply_to_user.public_metrics.followers_count
in_reply_to_user.public_metrics.following_count
in_reply_to_user.public_metrics.listed_count
in_reply_to_user.public_metrics.tweet_count
in_reply_to_user.url
in_reply_to_user.username
in_reply_to_user.verified
in_reply_to_user.withheld.country_codes
in_reply_to_user_id
lang
possibly_sensitive
public_metrics.like_count
public_metrics.quote_count
public_metrics.reply_count
public_metrics.retweet_count
referenced_tweets
reply_settings
source
text
type
withheld.copyright
withheld.country_codes""".split(
"\n"
)
DEFAULT_USERS_COLUMNS = """__twarc.retrieved_at
__twarc.url
__twarc.version
created_at
description
entities.description.cashtags
entities.description.hashtags
entities.description.mentions
entities.description.urls
entities.url.urls
id
location
name
pinned_tweet_id
pinned_tweet
profile_image_url
protected
public_metrics.followers_count
public_metrics.following_count
public_metrics.listed_count
public_metrics.tweet_count
url
username
verified
withheld.country_codes""".split(
"\n"
)
class CSVConverter:
def __init__(
self,
infile,
outfile,
json_encode_all=False,
json_encode_lists=True,
json_encode_text=False,
inline_referenced_tweets=True,
inline_pinned_tweets=False,
allow_duplicates=False,
input_tweet_columns=True,
input_users_columns=False,
input_columns="",
output_columns="",
batch_size=5000,
):
self.infile = infile
self.outfile = outfile
self.json_encode_all = json_encode_all
self.json_encode_lists = json_encode_lists
self.json_encode_text = json_encode_text
self.inline_referenced_tweets = inline_referenced_tweets
self.inline_pinned_tweets = inline_pinned_tweets
self.allow_duplicates = allow_duplicates
self.batch_size = batch_size
self.dataset_ids = set()
self.std = infile.name == "<stdin>" or outfile.name == "<stdout>"
self.progress = tqdm(
unit="B",
unit_scale=True,
unit_divisor=1024,
total=os.stat(infile.name).st_size if not self.std else 1,
disable=self.std,
)
self.columns = list()
if input_tweet_columns:
self.columns.extend(DEFAULT_TWEET_COLUMNS)
if input_users_columns:
self.columns.extend(DEFAULT_USERS_COLUMNS)
if input_columns:
self.columns.extend(manual_columns.split(","))
self.output_columns = (
output_columns.split(",") if output_columns else self.columns
)
self.counts = {
"lines": 0,
"tweets": 0,
"referenced_tweets": 0,
"parse_errors": 0,
"duplicates": 0,
"rows": 0,
"input_columns": len(self.columns),
"output_columns": len(self.output_columns),
}
def _read_lines(self):
"""
Generator for reading files line byline from a file. Progress bar is based on file size.
"""
line = self.infile.readline()
while line:
self.counts["lines"] = self.counts["lines"] + 1
if line.strip() != "":
try:
o = json.loads(line)
yield o
except Exception as ex:
self.counts["parse_errors"] = self.counts["parse_errors"] + 1
log.error(f"Error when trying to parse json: '{line}' {ex}")
if not self.std:
self.progress.update(self.infile.tell() - self.progress.n)
line = self.infile.readline()
def _handle_formats(self, batch):
"""
Handle different types of json formats, generating 1 tweet at a time
a batch is a number of lines from a json,
these can be full pages of requests or individual tweets.
"""
for item in batch:
# if it has a "data" key ensure data it is flattened
if "data" in item:
# flatten a list of tweets
if isinstance(item["data"], list):
for i in flatten(item)["data"]:
yield i
# flatten a single tweet, eg, from stream
else:
yield flatten(item)["data"]
else:
# this assumes the data is flattened
yield item
def _inline_referenced_tweets(self, tweet):
"""
Insert referenced tweets into the main CSV
"""
if "referenced_tweets" in tweet and self.inline_referenced_tweets:
for referenced_tweet in tweet["referenced_tweets"]:
# extract the referenced tweet as a new row
self.counts["referenced_tweets"] = self.counts["referenced_tweets"] + 1
yield referenced_tweet
# leave behind the reference, but not the full tweet
tweet["referenced_tweets"] = [
{"type": r["type"], "id": r["id"]} for r in tweet["referenced_tweets"]
]
# Deal with pinned tweets for user datasets:
# Todo: This is not fully implemented!
if self.inline_pinned_tweets:
if "pinned_tweet" in tweet:
# extract the referenced tweet as a new row
tweet["pinned_tweet"]["type"] = "pinned_tweet"
self.counts["referenced_tweets"] = self.counts["referenced_tweets"] + 1
yield referenced_tweet
# pinned_tweet_id remains:
tweet.pop("pinned_tweet")
yield tweet
def _process_tweets(self, tweets):
"""
Process a single tweet before adding it to the dataframe.
ToDo: Drop columns and dedupe etc here.
"""
for tweet in tweets:
# Order the fields in the json, because JSON key order isn't guaranteed.
# Needed so that different batches won't produce different ordered columns
json_keys = sorted(tweet.keys())
selected_field_order = list()
# Opinion: always put in id,created_at,text first, and then the rest
if "id" in json_keys:
selected_field_order.append(json_keys.pop(json_keys.index("id")))
if "created_at" in json_keys:
selected_field_order.append(
json_keys.pop(json_keys.index("created_at"))
)
if "text" in json_keys:
selected_field_order.append(json_keys.pop(json_keys.index("text")))
selected_field_order.extend(json_keys)
tweet = OrderedDict((k, tweet[k]) for k in selected_field_order)
self.counts["tweets"] = self.counts["tweets"] + 1
if tweet["id"] in self.dataset_ids:
self.counts["duplicates"] = self.counts["duplicates"] + 1
if self.allow_duplicates:
yield tweet
else:
if tweet["id"] not in self.dataset_ids:
yield tweet
self.dataset_ids.add(tweet["id"])
def _process_dataframe(self, _df):
# (Optional) json encode all
if self.json_encode_all:
_df = _df.applymap(json.dumps, na_action="ignore")
else:
# (Optional) text escape for any text fields
if self.json_encode_text:
_df = _df.applymap(
lambda x: json.dumps(x) if type(x) is str else x,
na_action="ignore",
)
else:
# Mandatory newline escape to prevent breaking csv format:
_df = _df.applymap(
lambda x: x.replace("\r", "").replace("\n", r"\n")
if type(x) is str
else x,
na_action="ignore",
)
# (Optional) json for lists
if self.json_encode_lists:
_df = _df.applymap(
lambda x: json.dumps(x) if pd.api.types.is_list_like(x) else x,
na_action="ignore",
)
return _df
def _process_batch(self, batch):
# (Optional) append referenced tweets as new rows
tweet_batch = itertools.chain.from_iterable(
self._process_tweets(self._inline_referenced_tweets(tweet))
for tweet in self._handle_formats(batch)
)
_df = pd.json_normalize([tweet for tweet in tweet_batch], errors="ignore")
# Check for mismatched columns
if len(_df.columns) > len(self.columns):
diff = set(_df.columns) - set(self.columns)
click.echo(
click.style(
f"💔 ERROR: Unexpected Data: \n\"{','.join(diff)}\"\n to fix, add these with --input-columns. Skipping entire batch of {len(_df)} tweets!",
fg="red",
),
err=True,
)
return | pd.DataFrame(columns=self.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
orig = | tm.makeTimeDataFrame() | pandas.util.testing.makeTimeDataFrame |
import argparse
import glob
import json
import logging
import os
import re
import msgpack
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torchvision.models as model_zoo
import torchvision.transforms as transforms
from PIL import Image
from pycocotools.mask import decode
from scipy import ndimage
from torch.utils.data import DataLoader, TensorDataset
from cameras import _2D_to_planar_world
def config():
parser = argparse.ArgumentParser()
parser.add_argument('segmentations_dir')
parser.add_argument('--output-dir', type=str, required=True)
parser.add_argument('--log', type=str, required=True)
parser.add_argument('--backbone', type=str, required=True)
parser.add_argument('--model', type=str, nargs='*', required=True)
parser.add_argument('--n-parameters', type=int, required=True)
parser.add_argument('--glob', type=str, default='\.msgpack$')
parser.add_argument(
'--categories',
type=json.loads,
default='{"obj0": 0, "obj1": 1, "obj2": 2, "obj3": 3, "obj4": 4, "obj5": 5, "obj6": 6, "obj7": 7}')
parser.add_argument('--fixed_categories', type=str, nargs='*', default=['obj0', 'obj4', 'obj6', 'obj8'])
parser.add_argument('--elevated_areas', type=str, nargs='*', default=['obj4'])
parser.add_argument('--elevated_categories', type=str, nargs='*', default=['obj2'])
parser.add_argument('--point_categories', type=str, nargs='*', default=['obj1', 'obj2'])
parser.add_argument('--mesh_categories', type=str, nargs='*', default=['obj3', 'obj5', 'obj7'])
parser.add_argument('--elevation_value', type=float, default=0.1)
return parser.parse_args()
class Model:
"""Contains either 1 general neural network or 1 neural network for each object categories"""
def __init__(self, model, backbone, n_parameters, categories):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if len(model) > 1:
self.model = {}
for weight_file in model:
category_idx = categories[os.path.splitext(os.path.basename(weight_file))[0]]
self.model[category_idx] = self.load_model(weight_file, backbone, n_parameters)
else:
self.model = self.load_model(model[0], args)
self.transforms = transforms.Compose(
[transforms.Resize((224, 224)),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
def load_model(self, weight_file, backbone, n_parameters):
model = eval(f'model_zoo.{backbone}(pretrained=True)')
model.fc = nn.Linear(model.fc.in_features, n_parameters)
model.load_state_dict(torch.load(weight_file, map_location=self.device))
model = model.to(self.device)
model.eval()
return model
def infer(self, data, cat):
"""Read data in batches and apply specific neural network"""
spatial_parameters = []
if isinstance(self.model, dict):
with torch.no_grad():
for batch in DataLoader(data, batch_size=128):
spatial_parameters.append(self.model[cat](batch.to(self.device)).cpu().numpy())
else:
with torch.no_grad():
for batch in DataLoader(data, batch_size=128):
spatial_parameters.append(self.model(batch.to(self.device)).cpu().numpy())
return np.vstack(spatial_parameters)
class Inference:
def __init__(self, model, segmentations_input, output_dir, categories, fixed_cat, elevated_areas, elevated_cat,
point_cat, mesh_cat, elevation_value):
self.model = model
self.segmentations_input = segmentations_input
basename = os.path.basename(segmentations_input)
output_fname = os.path.join(output_dir, basename)
output_fname = os.path.splitext(output_fname)[0] + ".h5"
if os.path.exists(output_fname):
os.remove(output_fname)
self.store = pd.HDFStore(output_fname)
self.categories = categories
self.fixed_categories = [self.categories[cat] for cat in fixed_cat if self.categories.get(cat) is not None]
self.elevated_area_categories = [
self.categories[cat] for cat in elevated_areas if self.categories.get(cat) is not None
]
self.elevated_categories = [
self.categories[cat] for cat in elevated_cat if self.categories.get(cat) is not None
]
self.point_categories = [self.categories[cat] for cat in point_cat if self.categories.get(cat) is not None]
self.mesh_categories = [self.categories[cat] for cat in mesh_cat if self.categories.get(cat) is not None]
self.elevation_value = elevation_value
def _read_mspgack(self):
with open(self.segmentations_input, 'rb') as f:
yield from msgpack.Unpacker(f)
def create_df_chunks(self, maxlen=1024):
"""Chunks of frames generator
Join chunks of frames (represented as lines in msgpack file) in a pandas dataframe
As video files typically have >100'000 frames with 10-60 segmented objects each,
it is necessary to efficiently read the data by chunks.
"""
buffer = []
for count, frame in enumerate(self._read_mspgack(), start=1):
buffer.append( | pd.DataFrame(frame) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Benchmark Results
# This notebook visualizes the output from the different models on different classification problems
# In[1]:
import collections
import glob
import json
import os
import numpy as np
import pandas as pd
from plotnine import *
from saged.utils import split_sample_names, create_dataset_stat_df, get_dataset_stats, parse_map_file
# ## Set Up Functions and Get Metadata
# In[3]:
def return_unlabeled():
# For use in a defaultdict
return 'unlabeled'
# In[4]:
data_dir = '../../data/'
map_file = os.path.join(data_dir, 'sample_classifications.pkl')
sample_to_label = parse_map_file(map_file)
sample_to_label = collections.defaultdict(return_unlabeled, sample_to_label)
# In[ ]:
metadata_path = os.path.join(data_dir, 'aggregated_metadata.json')
metadata = None
with open(metadata_path) as json_file:
metadata = json.load(json_file)
sample_metadata = metadata['samples']
# In[ ]:
experiments = metadata['experiments']
sample_to_study = {}
for study in experiments:
for accession in experiments[study]['sample_accession_codes']:
sample_to_study[accession] = study
# ## Sepsis classification
# In[8]:
in_files = glob.glob('../../results/single_label.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# In[11]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=3)
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# ## All labels
# In[12]:
in_files = glob.glob('../../results/all_labels.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[13]:
metrics = None
for path in in_files:
if metrics is None:
metrics = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
metrics['unsupervised'] = unsupervised_model
metrics['supervised'] = supervised_model
else:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
metrics = pd.concat([metrics, new_df])
metrics
# In[14]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# In[15]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=2)
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# # Subsets of healthy labels
# In[16]:
in_files = glob.glob('../../results/subset_label.sepsis*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[17]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[18]:
print(sepsis_metrics[sepsis_metrics['healthy_used'] == 1])
# In[19]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[20]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[21]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Same analysis, but with tb instead of sepsis
# In[22]:
in_files = glob.glob('../../results/subset_label.tb*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[23]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[24]:
print(tuberculosis_metrics[tuberculosis_metrics['healthy_used'] == 1])
# In[25]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[26]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[27]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Supervised Results Only
# The results above show that unsupervised learning mostly hurts performance rather than helping.
# The visualizations below compare each model based only on its supervised results.
# In[28]:
supervised_sepsis = sepsis_metrics[sepsis_metrics['unsupervised'] == 'untransformed']
# In[29]:
plot = ggplot(supervised_sepsis, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[30]:
supervised_tb = tuberculosis_metrics[tuberculosis_metrics['unsupervised'] == 'untransformed']
# In[31]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[32]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Batch Effect Correction
# In[33]:
in_files = glob.glob('../../results/subset_label.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[34]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
print(model_info)
model_info = model_info.split('.')
print(model_info)
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[35]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[36]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## TB Batch effect corrected
# In[37]:
in_files = glob.glob('../../results/subset_label.tb*be_corrected.tsv')
print(in_files[:5])
# In[38]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[39]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[40]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Better Metrics, Same Label Distribution in Train and Val sets
# In[11]:
in_files = glob.glob('../../results/keep_ratios.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[12]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[13]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[14]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[15]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[16]:
sepsis_stat_df = create_dataset_stat_df(sepsis_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'sepsis')
sepsis_stat_df.tail(5)
# In[17]:
ggplot(sepsis_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[18]:
plot = ggplot(sepsis_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Effect of All Sepsis Data')
plot
# ## Same Distribution Tuberculosis
# In[19]:
in_files = glob.glob('../../results/keep_ratios.tb*be_corrected.tsv')
print(in_files[:5])
# In[20]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics = tb_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
tb_metrics['healthy_used'] = tb_metrics['healthy_used'].round(1)
tb_metrics
# In[21]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[22]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[23]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[24]:
tb_stat_df = create_dataset_stat_df(tb_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'tb')
tb_stat_df.tail(5)
# In[55]:
ggplot(tb_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[25]:
plot = ggplot(tb_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot
# ## Results from Small Datasets
# In[57]:
in_files = glob.glob('../../results/small_subsets.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[58]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[59]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Dataset Size Effects (equal label counts)')
print(plot)
# In[60]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Datset Size by Model (equal label counts)')
print(plot)
# In[61]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## Small Training Set TB
# In[62]:
in_files = glob.glob('../../results/small_subsets.tb*be_corrected.tsv')
print(in_files[:5])
# In[63]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = | pd.read_csv(path, sep='\t') | pandas.read_csv |
"""
Author: <NAME>
Date: October 2016
File: supervised_labels.py
This python code gives the top supervised labels for that topic. The paramters needed are passed
through get_labels.py. It generates letter_trigram,pagerank, Topic overlap and num of words in
features. Then puts it into SVM classify format and finally uses the already trained supervised model
to make ranking predictions and get the best label. You will need SVM classify binary from SVM rank.
The URL is provided in readme.
(adapted and refactored to Python 3 and our current data scheme.
January 2019, <NAME>)
"""
from os.path import join
import numpy as np
import re
import os
import argparse
import pandas as pd
from constants import DATA_BASE, DSETS
from rank_labels_train_svm import load_topics, load_labels, load_pageranks, generate_lt_feature, \
change_format, prepare_features, convert_dataset
parser = argparse.ArgumentParser()
parser.add_argument("num_top_labels") # number of top labels
parser.add_argument("ratings_version")
args = parser.parse_args()
# Global parameters for the model.
ratings_version = args.ratings_version
svm_path = join(DATA_BASE, 'ranker')
topics_path = join(svm_path, 'topics.csv')
labels_path = join(svm_path, f'ratings_{ratings_version}.csv')
svm_model = join(svm_path, f'svm_model_{ratings_version}')
tmp_file_path = join(svm_path, f"test_temp_{ratings_version}.dat")
output_path = join(svm_path, f"supervised_labels_{ratings_version}")
svm_classify_path = join(svm_path, 'svm_rank_classify')
pagerank_path = join(svm_path, 'pagerank-titles-sorted_de_categories_removed.txt')
tesets = ['dewac']
datasets = [DSETS.get(d, d) for d in tesets]
testsets = ('_' + '-'.join(tesets)) if tesets else ''
trsets = ['N']
trainsets = ('_'+'-'.join(trsets)) if trsets else ''
svm_model = join(svm_path, f'svm_model_{ratings_version}{trainsets}')
output_path = join(
svm_path, f"supervised_labels_{ratings_version}__testset{testsets}__trainset{trainsets}"
)
def chunks(l, n):
n = max(1, n)
return [l[i:i + n] for i in range(0, len(l), n)]
def predict(test_set, pred_chunk_size, tmp_file, svm_classifier_file, svm_model_file, out_file):
""" calls SVM classify and gets predictions for each topic. """
with open(tmp_file, "w") as fp:
for item in test_set:
fp.write("%s\n" % item)
query = ' '.join([
svm_classifier_file,
tmp_file,
svm_model_file,
"predictionstemp"
])
print(query)
print()
os.system(query)
h = open("predictionstemp")
pred_list = []
for line in h:
pred_list.append(line.strip())
h.close()
pred_chunks = chunks(pred_list, pred_chunk_size)
test_chunks = chunks(test_set, pred_chunk_size)
df_pred = | pd.DataFrame.from_records(pred_chunks) | pandas.DataFrame.from_records |
#%% Load Libs
import geopandas
from pathlib import Path
import pandas as pd
import datetime
from geopandas.tools import sjoin
#%% Setup paths
INPUT_PATH = Path.cwd() / "input"
# Paths to field boundaries
ce_boundary_path = INPUT_PATH / "20170206_CafRoughBoundaries" / "CafCookEastArea.shp"
cw_boundary_path = INPUT_PATH / "FromIanLeslie_CafGeospatial" / "CE_CW_WGS1984" / "CookWestBoundariesWGS1984" / "CookWestBoundariesWGS1984.shp"
# Paths to georeference points
ce_gp_path = INPUT_PATH / "CookEast_GeoReferencePoints_2016_IL" / "All_CookEast.shp"
cw_gp_path = INPUT_PATH / "FromIanLeslie_CafGeospatial" / "CE_CW_WGS1984" / "CookWestGeoRefPointsWGS1984" / "CookWestGeoRefPoints_WGS1984.shp"
# Paths to treatment boundaries
ce_treatment_path_1999To2016 = INPUT_PATH / "CookEastStrips" / "Field_Plan_Final.shp"
ce_treatment_path_2016 = INPUT_PATH / "CE_WGS1984_2016_OperationalFieldBoundaries" / "C01" / "C0117001.shp"
ce_treatment_path_2017 = INPUT_PATH / "20200408_CookEastFertZones" / "CE_SW_2zones2017rates" / "CE_SW_2zones2017rates.shp"
#%% Load and clean inputs
# CE Gridpoints
ce_gp_utm_11n = geopandas.read_file(ce_gp_path)
ce_gp_utm_11n.crs = {"init": "epsg:26911"}
ce_gp = (ce_gp_utm_11n
.to_crs({"init": "epsg:4326"})
.drop(
["FID_1", "COLUMN", "ROW", "ROW2", "COL_ROW", "COL_ROW2",
"EASTING", "NORTHING", "CROP", "AREA",
"PERIMETER", "AREA_AC", "TARGET"],
axis = 1))
#%%
# CE strips
ce_tx_1999To2016_utm_11n = geopandas.read_file(ce_treatment_path_1999To2016)
ce_tx_1999To2016_utm_11n.crs = {"init": "epsg:26911"}
ce_tx_1999To2016 = (ce_tx_1999To2016_utm_11n
.to_crs({"init": "epsg:4326"})
.drop(
["Crop", "Area", "Perimeter", "Area_ac", "Ind_Field"],
axis = 1))
#ce_tx_1999To2016.plot()
# %%
pointInPolys = sjoin(ce_gp, ce_tx_1999To2016, how="left")
# Check if original Strip and Field id's were assigned correctly
if((pointInPolys["STRIP"] == pointInPolys["Strip"]).any() != True):
raise Exception("Strips not equal")
if((pointInPolys["FIELD"] == pointInPolys["Field"]).any() != True):
raise Exception("Fields not equal")
# Clean up
ce_1999To2016 = (pointInPolys
.assign(TreatmentId = pointInPolys["Field"].astype(str) + pointInPolys["Strip"].astype(str))
.assign(StartYear = 1999)
.assign(EndYear = 2015)
.drop(["geometry", "STRIP", "FIELD", "index_right", "Strip", "Field"], axis = 1))
# Reassign TreatmentIds at Field C; some treatments in Field C were split between two strips due to smaller area of the strips relative to those in Field A and Field B
ce_1999To2016.loc[(ce_1999To2016["TreatmentId"] == "C8"), "TreatmentId"] = "C5"
ce_1999To2016.loc[(ce_1999To2016["TreatmentId"] == "C7"), "TreatmentId"] = "C6"
# %%
points_in_treatment = pd.DataFrame(ce_1999To2016).to_csv("foo.csv", index = False)
#%% Create data dictionaries
data_dictionary_columns = ["FieldName", "Units", "Description", "DataType"]
georef_treatment_data_dictionary = pd.DataFrame(
data = [
["ID2",
"unitless",
"Numeric value used to identify georeferenced points for long-term sample collection. Values are unique among both Cook fields (CE and CW). Use 'ID2' instead of 'Id' for historic reasons.",
"Int"],
["TreatmentId",
"unitless",
"String designation used to identify the treatment that the georeference point was located within for the given timespan between start and end year",
"String"],
["StartYear",
"unitless",
"The harvest year that the treatment designation was first assigned, inclusive",
"String"],
["EndYear",
"unitless",
"The harvest year that the treatment designation ended, inclusive",
"String"]
],
columns = data_dictionary_columns
)
#%% Output files
date = datetime.datetime.now().strftime("%Y%m%d")
OUT_PATH = Path.cwd() / "output"
OUT_PATH.mkdir(parents = True, exist_ok = True)
| pd.DataFrame(ce_1999To2016) | pandas.DataFrame |
import numpy as np
import pandas as pd
import akshare as ak
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rc("font",family='PingFang HK')
def get_fund_categories(open_fund=False):
fund_em_fund_name_df = ak.fund_em_fund_name()
if open_fund:
fund_em_open_fund_daily_df = ak.fund_em_open_fund_daily()
df = pd.merge(fund_em_open_fund_daily_df, fund_em_fund_name_df, on='基金代码')
fund_categories = np.unique(df['基金类型'].values)
else:
fund_categories = np.unique(fund_em_fund_name_df['基金类型'].values)
return fund_categories
def get_category_all_funds(category):
df = ak.fund_em_fund_name()
df = df[df['基金类型'] == category]
fund_code = df['基金代码'].values
return df, fund_code
def get_fund_net_worth(fund_code, start_date, end_date, fund_category):
"""
:param fund_code: string, input a fund code
:param start_date: string, input a date format 'yyyy-mm-dd'
:param end_date: string, input a date format 'yyyy-mm-dd'
:param fund_category: string, input either ['open', 'money', 'financial', 'etf']
:return: dataframe, sliced dataframe between start_date and end_date
"""
start_date = pd.to_datetime(start_date, format='%Y/%m/%d')
end_date = pd.to_datetime(end_date, format='%Y/%m/%d')
if fund_category == 'open':
df = ak.fund_em_open_fund_info(fund=fund_code)
elif fund_category == 'money':
df = ak.fund_em_money_fund_info(fund=fund_code)
df['净值日期'] = pd.to_datetime(df['净值日期'], format='%Y/%m/%d')
elif fund_category == 'financial':
df = ak.fund_em_financial_fund_info(fund=fund_code)
df['净值日期'] = pd.to_datetime(df['净值日期'], format='%Y/%m/%d')
elif fund_category == 'etf':
df = ak.fund_em_etf_fund_info(fund=fund_code)
df['净值日期'] = pd.to_datetime(df['净值日期'], format='%Y/%m/%d')
mask = (df['净值日期'] >= start_date) & (df['净值日期'] <= end_date)
df = df.loc[mask].reset_index().drop('index', axis=1)
return df
def get_open_fund_rank(category, rank, order_by, ascending=False):
"""
:param category: string, input ['股票型','混合型',"指数型",'QDII','LOF','FOF']
:param rank: int, return how many rows of the dataframe
:param order_by: string, input ['近1周', '近1月', '近3月',
'近6月', '近1年', '近2年', '近3年']
:param ascending: bool, default False
:return: dataframe, with specific sorted dataframe
"""
if category == '股票型':
df = ak.fund_em_open_fund_rank(symbol="股票型").sort_values(by=[order_by], ascending=ascending)
elif category == '混合型':
df = ak.fund_em_open_fund_rank(symbol="混合型").sort_values(by=[order_by], ascending=ascending)
elif category == '债券型':
df = ak.fund_em_open_fund_rank(symbol="债券型").sort_values(by=[order_by], ascending=ascending)
elif category == "指数型":
df = ak.fund_em_open_fund_rank(symbol="指数型").sort_values(by=[order_by], ascending=ascending)
elif category == 'QDII':
df = ak.fund_em_open_fund_rank(symbol="QDII").sort_values(by=[order_by], ascending=ascending)
elif category == 'LOF':
df = ak.fund_em_open_fund_rank(symbol="LOF").sort_values(by=[order_by], ascending=ascending)
elif category == 'FOF':
df = ak.fund_em_open_fund_rank(symbol="FOF").sort_values(by=[order_by], ascending=ascending)
return df.head(rank)
def get_etf_rank(rank, order_by, ascending=False):
"""
:param rank: int, return how many rows of the dataframe
:param order_by: string, input ['近1周', '近1月', '近3月', '近6月',
'近1年', '近2年', '近3年', '今年来', '成立来']
:param ascending: bool, default False
:return: dataframe, with specific sorted dataframe
"""
df = ak.fund_em_exchange_rank()
df = df.sort_values(by=[order_by], ascending=ascending)
return df.head(rank)
def get_money_fund_rank(rank, order_by, ascending=False):
"""
:param rank: int, return how many rows of the dataframe
:param order_by: string, input ['万份收益', '年化收益率7日', '年化收益率14日', '年化收益率28日',
'近1月', '近3月', '近6月', '近1年', '近2年', '近3年', '近5年', '今年来', '成立来']
:param ascending: bool, default False
:return: dataframe, with specific sorted dataframe
"""
df = ak.fund_em_money_rank().sort_values(by=[order_by], ascending=ascending)
return df.head(rank)
def get_fund_cumulative_return(fund_code, start_date, end_date):
start_date = pd.to_datetime(start_date, format='%Y/%m/%d')
end_date = pd.to_datetime(end_date, format='%Y/%m/%d')
df = ak.fund_em_open_fund_info(fund=fund_code, indicator="累计收益率走势")
mask = (df['净值日期'] > start_date) & (df['净值日期'] <= end_date)
df = df.loc[mask].reset_index().drop('index', axis=1)
return df
def sw_industry_return_plot(start_date, end_date, year_start_date='2021-01-01', save_pic=True):
"""
:param start_date: string, start date of the week
:param end_date: string, end date of the week
:param year_start_date: string, start date of the year
:param save_pic: bool, save picture or not
:return: dataframe
"""
startDate = | pd.to_datetime(start_date, format='%Y/%m/%d') | pandas.to_datetime |
import os
import re
import zipfile
from io import BytesIO
from pathlib import Path
import numpy as np
import pandas as pd
import pandas_read_xml as pdx
from pandas_read_xml import flatten, fully_flatten
import requests
import typer
from eu_state_aids.utils import validate_year, validate_year_month
app = typer.Typer()
def normalize_cod_ce(x):
c = re.compile(r'SA[. ](\d+)')
m = re.match(c, x)
if m and len(m.groups()) == 1:
return f"SA.{m.group(1)}"
else:
return np.NAN
@app.command()
def generate_measures(
local_path: str = typer.Option(
"./data/it",
help="Local path to use for XML files. Always use forward slashes."
)
):
"""Fetch all Misure XML files locally, generate a DataFrame with the fields:
- COD_CE,
- DESC_FONDO
and store a CSV in local_path.
Create local_path if it does not exist. Forward slaches based paths
are translated into proper paths using `pathlib`,
so, even on Windows, there's no need to use backward slashes.
"""
# create directory if not existing
local_path = Path(local_path)
if not os.path.exists(local_path):
os.makedirs(local_path)
# build remote url and filepath, out of the year,
typer.echo("Fetching all misure files")
df = pd.DataFrame()
for year in range(2014, 2022):
for month in range(1, 13):
if year == 2021 and month in (8, 10):
continue
if year == 2014 and month < 5:
continue
xml_url = f"http://eu-state-aids.s3-eu-west-1.amazonaws.com/it/rna_mirror/" \
f"OpenDataMisure/OpenData_Misura_{year}_{month:02}.xml.zip"
typer.echo(f"Processing {xml_url}")
# read remote excel file content
r = requests.get(xml_url)
z = zipfile.ZipFile(BytesIO(r.content))
z_filename = [f.filename for f in z.filelist][0]
with z.open(z_filename, "r") as zf:
ydf = pdx.read_xml(zf.read().decode(), ["ns0:LISTA_MISURE_TYPE"])
ydf = ydf.pipe(flatten).pipe(flatten)
if "MISURA|LISTA_COFINANZIAMENTI" not in ydf.columns:
continue
ydf = ydf[
ydf.notnull()["MISURA|LISTA_COFINANZIAMENTI"]
][['MISURA|COD_CE', 'MISURA|LISTA_COFINANZIAMENTI']]
ydf = ydf.pipe(flatten).pipe(flatten).pipe(flatten)
ydf.rename(columns={
'MISURA|COD_CE': 'cod_ce',
'MISURA|LISTA_COFINANZIAMENTI|COFINANZIAMENTO|DESCRIZIONE_FONDO': 'fondo_desc',
}, inplace=True)
ydf = ydf[ydf.notnull()["cod_ce"]]
if 'MISURA|LISTA_COFINANZIAMENTI|COFINANZIAMENTO|COD_FONDO' in ydf.columns:
del ydf['MISURA|LISTA_COFINANZIAMENTI|COFINANZIAMENTO|COD_FONDO']
del ydf['MISURA|LISTA_COFINANZIAMENTI|COFINANZIAMENTO|IMPORTO']
ydf['cod_ce'] = ydf.cod_ce.apply(normalize_cod_ce)
ydf = ydf[ydf.notnull()["cod_ce"]].drop_duplicates()
df = df.append(ydf)
df = df.drop_duplicates()
# emit csv
typer.echo(f"{len(df)} recordss found.")
csv_filepath = local_path / "misure.csv"
if len(df):
typer.echo(f"Writing results to {csv_filepath}")
df.to_csv(csv_filepath, na_rep='', index=False)
@app.command()
def fetch(
year_month: str,
local_path: str = typer.Option(
"./data/it",
help="Local path to use for XML files. Always use forward slashes."
)
) -> bool:
"""Fetch Aiuti XML file for the given year and month from the source and store it locally.
Create the directory if it does not exist. Default directory is ./data/it,
and it can be changed with local_path.
Forward slaches based paths are translated into proper paths using `pathlib`,
so, even on Windows, there's no need to use backward slashes.
Returns True if the operation fetched the file successfully, False otherwise.
"""
# script parameters validations
assert(validate_year_month(year_month))
year, month = year_month.split("_")
# create directory if not existing
local_path = Path(local_path)
if not os.path.exists(local_path):
os.makedirs(local_path)
# build remote url and filepath, out of the year,
typer.echo(f"Fetching Aiuti data for year: {year}, month: {month}")
z_url = f"http://eu-state-aids.s3-eu-west-1.amazonaws.com/it/rna_mirror/" \
f"OpenDataAiuti/OpenData_Aiuti_{year}_{month}.xml.zip"
# read remote excel file content
r = requests.get(z_url)
if r.status_code == 200:
# store it locally
filepath = local_path / f"aiuti_{year}_{month}.xml.zip"
with open(filepath, 'wb') as f:
f.write(r.content)
typer.echo(f"File saved to {filepath}")
return True
else:
typer.echo("File not found")
return False
@app.command()
def export(
year_month: str,
local_path: str = typer.Option(
"./data/it",
help="Local path to use for XML files. "
),
delete_processed: bool = typer.Option(False, help="Delete zipped xml file after processing")
):
"""Read XML from local path, filter with misure from misure.csv, then
compute and emit data as CSV file.
Local path defaults to ./data/it, and can be changed with local_path.
"""
# script parameters validations
# for both use cases: single month (YYYY_MM) and full year (YYYY)
if validate_year_month(year_month):
year, month = year_month.split("_")
elif validate_year(year_month):
year, month = year_month, None
else:
typer.echo(f"Invalid year, month value: {year_month}. Use YYYY or YYYY_MM.")
return
# read misure csv from local_path
csv_filepath = Path(local_path) / "misure.csv"
misure_df = pd.read_csv(csv_filepath)
typer.echo(f"Misure dataframe read from {csv_filepath}.")
# define months range in both cases
if month is None:
months = range(1, 13)
else:
months = [int(month)]
df = | pd.DataFrame() | pandas.DataFrame |
from pandas import read_csv
from datetime import datetime
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
# load and process data
def parse(x):
return datetime.strptime(x, '%Y %m %d %H')
dataset = read_csv('data.csv', parse_dates = [['year', 'month', 'day', 'hour']], index_col=0, date_parser=parse)
dataset.drop('No', axis=1, inplace=True)
dataset.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain']
dataset.index.name = 'date'
dataset['pollution'].fillna(0, inplace=True)
dataset = dataset[24:]
print("||"*40)
print("** DATA PROCESSING COMPLETED **")
print(dataset.head(5))
print("||"*40)
dataset.to_csv('pollution.csv')
# generating dataset plot
from pandas import read_csv
from matplotlib import pyplot
dataset = | read_csv('pollution.csv', header=0, index_col=0) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
import pandas as pd
import pytest
import pytz
from eeweather import (
ISDStation,
get_isd_station_metadata,
get_isd_filenames,
get_gsod_filenames,
get_isd_file_metadata,
fetch_isd_raw_temp_data,
fetch_isd_hourly_temp_data,
fetch_isd_daily_temp_data,
fetch_gsod_raw_temp_data,
fetch_gsod_daily_temp_data,
fetch_tmy3_hourly_temp_data,
fetch_cz2010_hourly_temp_data,
get_isd_hourly_temp_data_cache_key,
get_isd_daily_temp_data_cache_key,
get_gsod_daily_temp_data_cache_key,
get_tmy3_hourly_temp_data_cache_key,
get_cz2010_hourly_temp_data_cache_key,
cached_isd_hourly_temp_data_is_expired,
cached_isd_daily_temp_data_is_expired,
cached_gsod_daily_temp_data_is_expired,
validate_isd_hourly_temp_data_cache,
validate_isd_daily_temp_data_cache,
validate_gsod_daily_temp_data_cache,
validate_tmy3_hourly_temp_data_cache,
validate_cz2010_hourly_temp_data_cache,
serialize_isd_hourly_temp_data,
serialize_isd_daily_temp_data,
serialize_gsod_daily_temp_data,
serialize_tmy3_hourly_temp_data,
serialize_cz2010_hourly_temp_data,
deserialize_isd_hourly_temp_data,
deserialize_isd_daily_temp_data,
deserialize_gsod_daily_temp_data,
deserialize_tmy3_hourly_temp_data,
deserialize_cz2010_hourly_temp_data,
read_isd_hourly_temp_data_from_cache,
read_isd_daily_temp_data_from_cache,
read_gsod_daily_temp_data_from_cache,
read_tmy3_hourly_temp_data_from_cache,
read_cz2010_hourly_temp_data_from_cache,
write_isd_hourly_temp_data_to_cache,
write_isd_daily_temp_data_to_cache,
write_gsod_daily_temp_data_to_cache,
write_tmy3_hourly_temp_data_to_cache,
write_cz2010_hourly_temp_data_to_cache,
destroy_cached_isd_hourly_temp_data,
destroy_cached_isd_daily_temp_data,
destroy_cached_gsod_daily_temp_data,
destroy_cached_tmy3_hourly_temp_data,
destroy_cached_cz2010_hourly_temp_data,
load_isd_hourly_temp_data_cached_proxy,
load_isd_daily_temp_data_cached_proxy,
load_gsod_daily_temp_data_cached_proxy,
load_tmy3_hourly_temp_data_cached_proxy,
load_cz2010_hourly_temp_data_cached_proxy,
load_isd_hourly_temp_data,
load_isd_daily_temp_data,
load_gsod_daily_temp_data,
load_tmy3_hourly_temp_data,
load_cz2010_hourly_temp_data,
load_cached_isd_hourly_temp_data,
load_cached_isd_daily_temp_data,
load_cached_gsod_daily_temp_data,
load_cached_tmy3_hourly_temp_data,
load_cached_cz2010_hourly_temp_data,
)
from eeweather.exceptions import (
UnrecognizedUSAFIDError,
ISDDataNotAvailableError,
GSODDataNotAvailableError,
TMY3DataNotAvailableError,
CZ2010DataNotAvailableError,
NonUTCTimezoneInfoError,
)
from eeweather.testing import (
MockNOAAFTPConnectionProxy,
MockKeyValueStoreProxy,
mock_request_text_tmy3,
mock_request_text_cz2010,
)
@pytest.fixture
def monkeypatch_noaa_ftp(monkeypatch):
monkeypatch.setattr(
"eeweather.connections.noaa_ftp_connection_proxy", MockNOAAFTPConnectionProxy()
)
@pytest.fixture
def monkeypatch_tmy3_request(monkeypatch):
monkeypatch.setattr("eeweather.mockable.request_text", mock_request_text_tmy3)
@pytest.fixture
def monkeypatch_cz2010_request(monkeypatch):
monkeypatch.setattr("eeweather.mockable.request_text", mock_request_text_cz2010)
@pytest.fixture
def monkeypatch_key_value_store(monkeypatch):
key_value_store_proxy = MockKeyValueStoreProxy()
monkeypatch.setattr(
"eeweather.connections.key_value_store_proxy", key_value_store_proxy
)
return key_value_store_proxy.get_store()
def test_get_isd_station_metadata():
assert get_isd_station_metadata("722874") == {
"ba_climate_zone": "Hot-Dry",
"ca_climate_zone": "CA_08",
"elevation": "+0054.6",
"icao_code": "KCQT",
"iecc_climate_zone": "3",
"iecc_moisture_regime": "B",
"latitude": "+34.024",
"longitude": "-118.291",
"name": "DOWNTOWN L.A./USC CAMPUS",
"quality": "high",
"recent_wban_id": "93134",
"state": "CA",
"usaf_id": "722874",
"wban_ids": "93134",
}
def test_isd_station_no_load_metadata():
station = ISDStation("722880", load_metadata=False)
assert station.usaf_id == "722880"
assert station.iecc_climate_zone is None
assert station.iecc_moisture_regime is None
assert station.ba_climate_zone is None
assert station.ca_climate_zone is None
assert station.elevation is None
assert station.latitude is None
assert station.longitude is None
assert station.coords is None
assert station.name is None
assert station.quality is None
assert station.wban_ids is None
assert station.recent_wban_id is None
assert station.climate_zones == {}
assert str(station) == "722880"
assert repr(station) == "ISDStation('722880')"
def test_isd_station_no_load_metadata_invalid():
with pytest.raises(UnrecognizedUSAFIDError):
station = ISDStation("FAKE", load_metadata=False)
def test_isd_station_with_load_metadata():
station = ISDStation("722880", load_metadata=True)
assert station.usaf_id == "722880"
assert station.iecc_climate_zone == "3"
assert station.iecc_moisture_regime == "B"
assert station.ba_climate_zone == "Hot-Dry"
assert station.ca_climate_zone == "CA_09"
assert station.elevation == 236.2
assert station.icao_code == "KBUR"
assert station.latitude == 34.201
assert station.longitude == -118.358
assert station.coords == (34.201, -118.358)
assert station.name == "<NAME>"
assert station.quality == "high"
assert station.wban_ids == ["23152", "99999"]
assert station.recent_wban_id == "23152"
assert station.climate_zones == {
"ba_climate_zone": "Hot-Dry",
"ca_climate_zone": "CA_09",
"iecc_climate_zone": "3",
"iecc_moisture_regime": "B",
}
def test_isd_station_json():
station = ISDStation("722880", load_metadata=True)
assert station.json() == {
"elevation": 236.2,
"icao_code": "KBUR",
"latitude": 34.201,
"longitude": -118.358,
"name": "<NAME>",
"quality": "high",
"recent_wban_id": "23152",
"wban_ids": ["23152", "99999"],
"climate_zones": {
"ba_climate_zone": "Hot-Dry",
"ca_climate_zone": "CA_09",
"iecc_climate_zone": "3",
"iecc_moisture_regime": "B",
},
}
def test_isd_station_unrecognized_usaf_id():
with pytest.raises(UnrecognizedUSAFIDError):
station = ISDStation("FAKE", load_metadata=True)
def test_get_isd_filenames_bad_usaf_id():
with pytest.raises(UnrecognizedUSAFIDError) as excinfo:
get_isd_filenames("000000", 2007)
assert excinfo.value.value == "000000"
def test_get_isd_filenames_single_year(snapshot):
filenames = get_isd_filenames("722860", 2007)
snapshot.assert_match(filenames, "filenames")
def test_get_isd_filenames_multiple_year(snapshot):
filenames = get_isd_filenames("722860")
snapshot.assert_match(filenames, "filenames")
def test_get_isd_filenames_future_year():
filenames = get_isd_filenames("722860", 2050)
assert filenames == ["/pub/data/noaa/2050/722860-23119-2050.gz"]
def test_get_isd_filenames_with_host():
filenames = get_isd_filenames("722860", 2017, with_host=True)
assert filenames == [
"ftp://ftp.ncdc.noaa.gov/pub/data/noaa/2017/722860-23119-2017.gz"
]
def test_isd_station_get_isd_filenames(snapshot):
station = ISDStation("722860")
filenames = station.get_isd_filenames()
snapshot.assert_match(filenames, "filenames")
def test_isd_station_get_isd_filenames_with_year(snapshot):
station = ISDStation("722860")
filenames = station.get_isd_filenames(2007)
snapshot.assert_match(filenames, "filenames")
def test_isd_station_get_isd_filenames_with_host():
station = ISDStation("722860")
filenames = station.get_isd_filenames(2017, with_host=True)
assert filenames == [
"ftp://ftp.ncdc.noaa.gov/pub/data/noaa/2017/722860-23119-2017.gz"
]
def test_get_gsod_filenames_bad_usaf_id():
with pytest.raises(UnrecognizedUSAFIDError) as excinfo:
get_gsod_filenames("000000", 2007)
assert excinfo.value.value == "000000"
def test_get_gsod_filenames_single_year(snapshot):
filenames = get_gsod_filenames("722860", 2007)
snapshot.assert_match(filenames, "filenames")
def test_get_gsod_filenames_multiple_year(snapshot):
filenames = get_gsod_filenames("722860")
snapshot.assert_match(filenames, "filenames")
def test_get_gsod_filenames_future_year():
filenames = get_gsod_filenames("722860", 2050)
assert filenames == ["/pub/data/gsod/2050/722860-23119-2050.op.gz"]
def test_get_gsod_filenames_with_host():
filenames = get_gsod_filenames("722860", 2017, with_host=True)
assert filenames == [
"ftp://ftp.ncdc.noaa.gov/pub/data/gsod/2017/722860-23119-2017.op.gz"
]
def test_isd_station_get_gsod_filenames(snapshot):
station = ISDStation("722860")
filenames = station.get_gsod_filenames()
snapshot.assert_match(filenames, "filenames")
def test_isd_station_get_gsod_filenames_with_year(snapshot):
station = ISDStation("722860")
filenames = station.get_gsod_filenames(2007)
snapshot.assert_match(filenames, "filenames")
def test_isd_station_get_gsod_filenames_with_host():
station = ISDStation("722860")
filenames = station.get_gsod_filenames(2017, with_host=True)
assert filenames == [
"ftp://ftp.ncdc.noaa.gov/pub/data/gsod/2017/722860-23119-2017.op.gz"
]
def test_get_isd_file_metadata():
assert get_isd_file_metadata("722874") == [
{"usaf_id": "722874", "wban_id": "93134", "year": "2006"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2007"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2008"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2009"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2010"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2011"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2012"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2013"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2014"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2015"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2016"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2017"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2018"},
]
with pytest.raises(UnrecognizedUSAFIDError) as excinfo:
get_isd_file_metadata("000000")
assert excinfo.value.value == "000000"
def test_isd_station_get_isd_file_metadata():
station = ISDStation("722874")
assert station.get_isd_file_metadata() == [
{"usaf_id": "722874", "wban_id": "93134", "year": "2006"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2007"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2008"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2009"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2010"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2011"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2012"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2013"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2014"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2015"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2016"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2017"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2018"},
]
# fetch raw
def test_fetch_isd_raw_temp_data(monkeypatch_noaa_ftp):
data = fetch_isd_raw_temp_data("722874", 2007)
assert round(data.sum()) == 185945
assert data.shape == (11094,)
def test_fetch_gsod_raw_temp_data(monkeypatch_noaa_ftp):
data = fetch_gsod_raw_temp_data("722874", 2007)
assert data.sum() == 6509.5
assert data.shape == (365,)
# station fetch raw
def test_isd_station_fetch_isd_raw_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_isd_raw_temp_data(2007)
assert round(data.sum()) == 185945
assert data.shape == (11094,)
def test_isd_station_fetch_gsod_raw_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_gsod_raw_temp_data(2007)
assert data.sum() == 6509.5
assert data.shape == (365,)
# fetch raw invalid station
def test_fetch_isd_raw_temp_data_invalid_station():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_isd_raw_temp_data("INVALID", 2007)
def test_fetch_gsod_raw_temp_data_invalid_station():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_gsod_raw_temp_data("INVALID", 2007)
# fetch raw invalid year
def test_fetch_isd_raw_temp_data_invalid_year(monkeypatch_noaa_ftp):
with pytest.raises(ISDDataNotAvailableError):
fetch_isd_raw_temp_data("722874", 1800)
def test_fetch_gsod_raw_temp_data_invalid_year(monkeypatch_noaa_ftp):
with pytest.raises(GSODDataNotAvailableError):
fetch_gsod_raw_temp_data("722874", 1800)
# fetch file full of nans
def test_isd_station_fetch_isd_raw_temp_data_all_nan(monkeypatch_noaa_ftp):
station = ISDStation("994035")
data = station.fetch_isd_raw_temp_data(2013)
assert round(data.sum()) == 0
assert data.shape == (8611,)
# fetch
def test_fetch_isd_hourly_temp_data(monkeypatch_noaa_ftp):
data = fetch_isd_hourly_temp_data("722874", 2007)
assert data.sum() == 156160.0355
assert data.shape == (8760,)
def test_fetch_isd_daily_temp_data(monkeypatch_noaa_ftp):
data = fetch_isd_daily_temp_data("722874", 2007)
assert data.sum() == 6510.002260821784
assert data.shape == (365,)
def test_fetch_gsod_daily_temp_data(monkeypatch_noaa_ftp):
data = fetch_gsod_daily_temp_data("722874", 2007)
assert data.sum() == 6509.5
assert data.shape == (365,)
def test_fetch_tmy3_hourly_temp_data(monkeypatch_tmy3_request):
data = fetch_tmy3_hourly_temp_data("722880")
assert data.sum() == 156194.3
assert data.shape == (8760,)
def test_fetch_cz2010_hourly_temp_data(monkeypatch_cz2010_request):
data = fetch_cz2010_hourly_temp_data("722880")
assert data.sum() == 153430.90000000002
assert data.shape == (8760,)
# station fetch
def test_isd_station_fetch_isd_hourly_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_isd_hourly_temp_data(2007)
assert data.sum() == 156160.0355
assert data.shape == (8760,)
def test_isd_station_fetch_isd_daily_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_isd_daily_temp_data(2007)
assert data.sum() == 6510.002260821784
assert data.shape == (365,)
def test_isd_station_fetch_gsod_daily_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_gsod_daily_temp_data(2007)
assert data.sum() == 6509.5
assert data.shape == (365,)
def test_tmy3_station_hourly_temp_data(monkeypatch_tmy3_request):
station = ISDStation("722880")
data = station.fetch_tmy3_hourly_temp_data()
assert data.sum() == 156194.3
assert data.shape == (8760,)
def test_cz2010_station_hourly_temp_data(monkeypatch_cz2010_request):
station = ISDStation("722880")
data = station.fetch_cz2010_hourly_temp_data()
assert data.sum() == 153430.90000000002
assert data.shape == (8760,)
# fetch invalid station
def test_fetch_isd_hourly_temp_data_invalid():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_isd_hourly_temp_data("INVALID", 2007)
def test_fetch_isd_daily_temp_data_invalid():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_isd_daily_temp_data("INVALID", 2007)
def test_fetch_gsod_daily_temp_data_invalid():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_gsod_daily_temp_data("INVALID", 2007)
def test_fetch_tmy3_hourly_temp_data_invalid():
with pytest.raises(TMY3DataNotAvailableError):
fetch_tmy3_hourly_temp_data("INVALID")
def test_fetch_cz2010_hourly_temp_data_invalid():
with pytest.raises(CZ2010DataNotAvailableError):
fetch_cz2010_hourly_temp_data("INVALID")
def test_fetch_tmy3_hourly_temp_data_not_in_tmy3_list(monkeypatch_noaa_ftp):
data = fetch_isd_hourly_temp_data("722874", 2007)
assert data.sum() == 156160.0355
assert data.shape == (8760,)
with pytest.raises(TMY3DataNotAvailableError):
fetch_tmy3_hourly_temp_data("722874")
def test_fetch_cz2010_hourly_temp_data_not_in_cz2010_list(monkeypatch_cz2010_request):
data = fetch_cz2010_hourly_temp_data("722880")
assert data.sum() == 153430.90000000002
assert data.shape == (8760,)
with pytest.raises(CZ2010DataNotAvailableError):
fetch_cz2010_hourly_temp_data("725340")
# get cache key
def test_get_isd_hourly_temp_data_cache_key():
assert (
get_isd_hourly_temp_data_cache_key("722874", 2007) == "isd-hourly-722874-2007"
)
def test_get_isd_daily_temp_data_cache_key():
assert get_isd_daily_temp_data_cache_key("722874", 2007) == "isd-daily-722874-2007"
def test_get_gsod_daily_temp_data_cache_key():
assert (
get_gsod_daily_temp_data_cache_key("722874", 2007) == "gsod-daily-722874-2007"
)
def test_get_tmy3_hourly_temp_data_cache_key():
assert get_tmy3_hourly_temp_data_cache_key("722880") == "tmy3-hourly-722880"
def test_get_cz2010_hourly_temp_data_cache_key():
assert get_cz2010_hourly_temp_data_cache_key("722880") == "cz2010-hourly-722880"
# station get cache key
def test_isd_station_get_isd_hourly_temp_data_cache_key():
station = ISDStation("722874")
assert station.get_isd_hourly_temp_data_cache_key(2007) == "isd-hourly-722874-2007"
def test_isd_station_get_isd_daily_temp_data_cache_key():
station = ISDStation("722874")
assert station.get_isd_daily_temp_data_cache_key(2007) == "isd-daily-722874-2007"
def test_isd_station_get_gsod_daily_temp_data_cache_key():
station = ISDStation("722874")
assert station.get_gsod_daily_temp_data_cache_key(2007) == "gsod-daily-722874-2007"
def test_tmy3_station_get_isd_hourly_temp_data_cache_key():
station = ISDStation("722880")
assert station.get_tmy3_hourly_temp_data_cache_key() == "tmy3-hourly-722880"
def test_cz2010_station_get_isd_hourly_temp_data_cache_key():
station = ISDStation("722880")
assert station.get_cz2010_hourly_temp_data_cache_key() == "cz2010-hourly-722880"
# cache expired empty
def test_cached_isd_hourly_temp_data_is_expired_empty(monkeypatch_key_value_store):
assert cached_isd_hourly_temp_data_is_expired("722874", 2007) is True
def test_cached_isd_daily_temp_data_is_expired_empty(monkeypatch_key_value_store):
assert cached_isd_daily_temp_data_is_expired("722874", 2007) is True
def test_cached_gsod_daily_temp_data_is_expired_empty(monkeypatch_key_value_store):
assert cached_gsod_daily_temp_data_is_expired("722874", 2007) is True
# station cache expired empty
def test_isd_station_cached_isd_hourly_temp_data_is_expired_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.cached_isd_hourly_temp_data_is_expired(2007) is True
def test_isd_station_cached_isd_daily_temp_data_is_expired_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.cached_isd_daily_temp_data_is_expired(2007) is True
def test_isd_station_cached_gsod_daily_temp_data_is_expired_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.cached_gsod_daily_temp_data_is_expired(2007) is True
# cache expired false
def test_cached_isd_hourly_temp_data_is_expired_false(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_hourly_temp_data_cached_proxy("722874", 2007)
assert cached_isd_hourly_temp_data_is_expired("722874", 2007) is False
def test_cached_isd_daily_temp_data_is_expired_false(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_daily_temp_data_cached_proxy("722874", 2007)
assert cached_isd_daily_temp_data_is_expired("722874", 2007) is False
def test_cached_gsod_daily_temp_data_is_expired_false(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_gsod_daily_temp_data_cached_proxy("722874", 2007)
assert cached_gsod_daily_temp_data_is_expired("722874", 2007) is False
# cache expired true
def test_cached_isd_hourly_temp_data_is_expired_true(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_hourly_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_isd_hourly_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert cached_isd_hourly_temp_data_is_expired("722874", 2007) is True
def test_cached_isd_daily_temp_data_is_expired_true(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_daily_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_isd_daily_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert cached_isd_daily_temp_data_is_expired("722874", 2007) is True
def test_cached_gsod_daily_temp_data_is_expired_true(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_gsod_daily_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_gsod_daily_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert cached_gsod_daily_temp_data_is_expired("722874", 2007) is True
# validate cache empty
def test_validate_isd_hourly_temp_data_cache_empty(monkeypatch_key_value_store):
assert validate_isd_hourly_temp_data_cache("722874", 2007) is False
def test_validate_isd_daily_temp_data_cache_empty(monkeypatch_key_value_store):
assert validate_isd_daily_temp_data_cache("722874", 2007) is False
def test_validate_gsod_daily_temp_data_cache_empty(monkeypatch_key_value_store):
assert validate_gsod_daily_temp_data_cache("722874", 2007) is False
def test_validate_tmy3_hourly_temp_data_cache_empty(monkeypatch_key_value_store):
assert validate_tmy3_hourly_temp_data_cache("722880") is False
def test_validate_cz2010_hourly_temp_data_cache_empty(monkeypatch_key_value_store):
assert validate_cz2010_hourly_temp_data_cache("722880") is False
# station validate cache empty
def test_isd_station_validate_isd_hourly_temp_data_cache_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.validate_isd_hourly_temp_data_cache(2007) is False
def test_isd_station_validate_isd_daily_temp_data_cache_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.validate_isd_daily_temp_data_cache(2007) is False
def test_isd_station_validate_gsod_daily_temp_data_cache_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.validate_gsod_daily_temp_data_cache(2007) is False
def test_isd_station_validate_tmy3_hourly_temp_data_cache_empty(
monkeypatch_key_value_store
):
station = ISDStation("722880")
assert station.validate_tmy3_hourly_temp_data_cache() is False
def test_isd_station_validate_cz2010_hourly_temp_data_cache_empty(
monkeypatch_key_value_store
):
station = ISDStation("722880")
assert station.validate_cz2010_hourly_temp_data_cache() is False
# error on non-existent when relying on cache
def test_raise_on_missing_isd_hourly_temp_data_cache_data_no_web_fetch(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
with pytest.raises(ISDDataNotAvailableError):
load_isd_hourly_temp_data_cached_proxy("722874", 1907, fetch_from_web=False)
def test_raise_on_missing_isd_daily_temp_data_cache_data_no_web_fetch(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
with pytest.raises(ISDDataNotAvailableError):
load_isd_daily_temp_data_cached_proxy("722874", 1907, fetch_from_web=False)
def test_raise_on_missing_gsod_daily_temp_data_cache_data_no_web_fetch(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
with pytest.raises(GSODDataNotAvailableError):
load_gsod_daily_temp_data_cached_proxy("722874", 1907, fetch_from_web=False)
def test_raise_on_missing_tmy3_hourly_temp_data_cache_data_no_web_fetch(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
with pytest.raises(TMY3DataNotAvailableError):
load_tmy3_hourly_temp_data_cached_proxy("722874", fetch_from_web=False)
def test_raise_on_missing_cz2010_hourly_temp_data_cache_data_no_web_fetch(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
with pytest.raises(CZ2010DataNotAvailableError):
load_cz2010_hourly_temp_data_cached_proxy("722874", fetch_from_web=False)
# validate updated recently
def test_validate_isd_hourly_temp_data_cache_updated_recently(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_hourly_temp_data_cached_proxy("722874", 2007)
assert validate_isd_hourly_temp_data_cache("722874", 2007) is True
def test_validate_isd_daily_temp_data_cache_updated_recently(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_daily_temp_data_cached_proxy("722874", 2007)
assert validate_isd_daily_temp_data_cache("722874", 2007) is True
def test_validate_gsod_daily_temp_data_cache_updated_recently(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_gsod_daily_temp_data_cached_proxy("722874", 2007)
assert validate_gsod_daily_temp_data_cache("722874", 2007) is True
def test_validate_tmy3_hourly_temp_data_cache_updated_recently(
monkeypatch_tmy3_request, monkeypatch_key_value_store
):
load_tmy3_hourly_temp_data_cached_proxy("722880")
assert validate_tmy3_hourly_temp_data_cache("722880") is True
def test_validate_cz2010_hourly_temp_data_cache_updated_recently(
monkeypatch_cz2010_request, monkeypatch_key_value_store
):
load_cz2010_hourly_temp_data_cached_proxy("722880")
assert validate_cz2010_hourly_temp_data_cache("722880") is True
# validate expired
def test_validate_isd_hourly_temp_data_cache_expired(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_hourly_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_isd_hourly_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert validate_isd_hourly_temp_data_cache("722874", 2007) is False
def test_validate_isd_daily_temp_data_cache_expired(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_daily_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_isd_daily_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert validate_isd_daily_temp_data_cache("722874", 2007) is False
def test_validate_gsod_daily_temp_data_cache_expired(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_gsod_daily_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_gsod_daily_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert validate_gsod_daily_temp_data_cache("722874", 2007) is False
# serialize
def test_serialize_isd_hourly_temp_data():
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert serialize_isd_hourly_temp_data(ts) == [["2017010100", 1]]
def test_serialize_isd_daily_temp_data():
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert serialize_isd_daily_temp_data(ts) == [["20170101", 1]]
def test_serialize_gsod_daily_temp_data():
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert serialize_gsod_daily_temp_data(ts) == [["20170101", 1]]
def test_serialize_tmy3_hourly_temp_data():
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert serialize_tmy3_hourly_temp_data(ts) == [["2017010100", 1]]
def test_serialize_cz2010_hourly_temp_data():
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert serialize_cz2010_hourly_temp_data(ts) == [["2017010100", 1]]
# station serialize
def test_isd_station_serialize_isd_hourly_temp_data():
station = ISDStation("722874")
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert station.serialize_isd_hourly_temp_data(ts) == [["2017010100", 1]]
def test_isd_station_serialize_isd_daily_temp_data():
station = ISDStation("722874")
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert station.serialize_isd_daily_temp_data(ts) == [["20170101", 1]]
def test_isd_station_serialize_gsod_daily_temp_data():
station = ISDStation("722874")
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert station.serialize_gsod_daily_temp_data(ts) == [["20170101", 1]]
def test_isd_station_serialize_tmy3_hourly_temp_data():
station = ISDStation("722880")
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert station.serialize_tmy3_hourly_temp_data(ts) == [["2017010100", 1]]
def test_isd_station_serialize_cz2010_hourly_temp_data():
station = ISDStation("722880")
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert station.serialize_cz2010_hourly_temp_data(ts) == [["2017010100", 1]]
# deserialize
def test_deserialize_isd_hourly_temp_data():
ts = deserialize_isd_hourly_temp_data([["2017010100", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "H"
def test_deserialize_isd_daily_temp_data():
ts = deserialize_isd_daily_temp_data([["20170101", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "D"
def test_deserialize_gsod_daily_temp_data():
ts = deserialize_gsod_daily_temp_data([["20170101", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "D"
def test_deserialize_tmy3_hourly_temp_data():
ts = deserialize_tmy3_hourly_temp_data([["2017010100", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "H"
def test_deserialize_cz2010_hourly_temp_data():
ts = deserialize_cz2010_hourly_temp_data([["2017010100", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "H"
# station deserialize
def test_isd_station_deserialize_isd_hourly_temp_data():
station = ISDStation("722874")
ts = station.deserialize_isd_hourly_temp_data([["2017010100", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "H"
def test_isd_station_deserialize_isd_daily_temp_data():
station = ISDStation("722874")
ts = station.deserialize_isd_daily_temp_data([["20170101", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "D"
def test_isd_station_deserialize_gsod_daily_temp_data():
station = ISDStation("722874")
ts = station.deserialize_gsod_daily_temp_data([["20170101", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "D"
def test_isd_station_deserialize_tmy3_hourly_temp_data():
station = ISDStation("722880")
ts = station.deserialize_tmy3_hourly_temp_data([["2017010100", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "H"
def test_isd_station_deserialize_cz2010_hourly_temp_data():
station = ISDStation("722880")
ts = station.deserialize_cz2010_hourly_temp_data([["2017010100", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "H"
# write read destroy
def test_write_read_destroy_isd_hourly_temp_data_to_from_cache(
monkeypatch_key_value_store
):
store = monkeypatch_key_value_store
key = get_isd_hourly_temp_data_cache_key("123456", 1990)
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
write_isd_hourly_temp_data_to_cache("123456", 1990, ts1)
assert store.key_exists(key) is True
ts2 = read_isd_hourly_temp_data_from_cache("123456", 1990)
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
destroy_cached_isd_hourly_temp_data("123456", 1990)
assert store.key_exists(key) is False
def test_write_read_destroy_isd_daily_temp_data_to_from_cache(
monkeypatch_key_value_store
):
store = monkeypatch_key_value_store
key = get_isd_daily_temp_data_cache_key("123456", 1990)
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
write_isd_daily_temp_data_to_cache("123456", 1990, ts1)
assert store.key_exists(key) is True
ts2 = read_isd_daily_temp_data_from_cache("123456", 1990)
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
destroy_cached_isd_daily_temp_data("123456", 1990)
assert store.key_exists(key) is False
def test_write_read_destroy_gsod_daily_temp_data_to_from_cache(
monkeypatch_key_value_store
):
store = monkeypatch_key_value_store
key = get_gsod_daily_temp_data_cache_key("123456", 1990)
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
write_gsod_daily_temp_data_to_cache("123456", 1990, ts1)
assert store.key_exists(key) is True
ts2 = read_gsod_daily_temp_data_from_cache("123456", 1990)
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
destroy_cached_gsod_daily_temp_data("123456", 1990)
assert store.key_exists(key) is False
def test_write_read_destroy_tmy3_hourly_temp_data_to_from_cache(
monkeypatch_key_value_store
):
store = monkeypatch_key_value_store
key = get_tmy3_hourly_temp_data_cache_key("123456")
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
write_tmy3_hourly_temp_data_to_cache("123456", ts1)
assert store.key_exists(key) is True
ts2 = read_tmy3_hourly_temp_data_from_cache("123456")
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
destroy_cached_tmy3_hourly_temp_data("123456")
assert store.key_exists(key) is False
def test_write_read_destroy_cz2010_hourly_temp_data_to_from_cache(
monkeypatch_key_value_store
):
store = monkeypatch_key_value_store
key = get_cz2010_hourly_temp_data_cache_key("123456")
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
write_cz2010_hourly_temp_data_to_cache("123456", ts1)
assert store.key_exists(key) is True
ts2 = read_cz2010_hourly_temp_data_from_cache("123456")
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
destroy_cached_cz2010_hourly_temp_data("123456")
assert store.key_exists(key) is False
# station write read destroy
def test_isd_station_write_read_destroy_isd_hourly_temp_data_to_from_cache(
monkeypatch_key_value_store
):
station = ISDStation("722874")
store = monkeypatch_key_value_store
key = station.get_isd_hourly_temp_data_cache_key(1990)
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
station.write_isd_hourly_temp_data_to_cache(1990, ts1)
assert store.key_exists(key) is True
ts2 = station.read_isd_hourly_temp_data_from_cache(1990)
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
station.destroy_cached_isd_hourly_temp_data(1990)
assert store.key_exists(key) is False
def test_isd_station_write_read_destroy_isd_daily_temp_data_to_from_cache(
monkeypatch_key_value_store
):
station = ISDStation("722874")
store = monkeypatch_key_value_store
key = station.get_isd_daily_temp_data_cache_key(1990)
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
station.write_isd_daily_temp_data_to_cache(1990, ts1)
assert store.key_exists(key) is True
ts2 = station.read_isd_daily_temp_data_from_cache(1990)
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
station.destroy_cached_isd_daily_temp_data(1990)
assert store.key_exists(key) is False
def test_isd_station_write_read_destroy_gsod_daily_temp_data_to_from_cache(
monkeypatch_key_value_store
):
station = ISDStation("722874")
store = monkeypatch_key_value_store
key = station.get_gsod_daily_temp_data_cache_key(1990)
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
station.write_gsod_daily_temp_data_to_cache(1990, ts1)
assert store.key_exists(key) is True
ts2 = station.read_gsod_daily_temp_data_from_cache(1990)
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
station.destroy_cached_gsod_daily_temp_data(1990)
assert store.key_exists(key) is False
def test_isd_station_write_read_destroy_tmy3_hourly_temp_data_to_from_cache(
monkeypatch_key_value_store
):
station = ISDStation("722880")
store = monkeypatch_key_value_store
key = station.get_tmy3_hourly_temp_data_cache_key()
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
station.write_tmy3_hourly_temp_data_to_cache(ts1)
assert store.key_exists(key) is True
ts2 = station.read_tmy3_hourly_temp_data_from_cache()
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
station.destroy_cached_tmy3_hourly_temp_data()
assert store.key_exists(key) is False
def test_isd_station_write_read_destroy_cz2010_hourly_temp_data_to_from_cache(
monkeypatch_key_value_store
):
station = ISDStation("722880")
store = monkeypatch_key_value_store
key = station.get_cz2010_hourly_temp_data_cache_key()
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
station.write_cz2010_hourly_temp_data_to_cache(ts1)
assert store.key_exists(key) is True
ts2 = station.read_cz2010_hourly_temp_data_from_cache()
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
station.destroy_cached_cz2010_hourly_temp_data()
assert store.key_exists(key) is False
# load cached proxy
def test_load_isd_hourly_temp_data_cached_proxy(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = load_isd_hourly_temp_data_cached_proxy("722874", 2007)
ts2 = load_isd_hourly_temp_data_cached_proxy("722874", 2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_load_isd_daily_temp_data_cached_proxy(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = load_isd_daily_temp_data_cached_proxy("722874", 2007)
ts2 = load_isd_daily_temp_data_cached_proxy("722874", 2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_load_gsod_daily_temp_data_cached_proxy(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = load_gsod_daily_temp_data_cached_proxy("722874", 2007)
ts2 = load_gsod_daily_temp_data_cached_proxy("722874", 2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_load_tmy3_hourly_temp_data_cached_proxy(
monkeypatch_tmy3_request, monkeypatch_key_value_store
):
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = load_tmy3_hourly_temp_data_cached_proxy("722880", 2007)
ts2 = load_tmy3_hourly_temp_data_cached_proxy("722880", 2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_load_cz2010_hourly_temp_data_cached_proxy(
monkeypatch_cz2010_request, monkeypatch_key_value_store
):
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = load_cz2010_hourly_temp_data_cached_proxy("722880", 2007)
ts2 = load_cz2010_hourly_temp_data_cached_proxy("722880", 2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
# station load cached proxy
def test_isd_station_load_isd_hourly_temp_data_cached_proxy(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
station = ISDStation("722874")
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = station.load_isd_hourly_temp_data_cached_proxy(2007)
ts2 = station.load_isd_hourly_temp_data_cached_proxy(2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_isd_station_load_isd_daily_temp_data_cached_proxy(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
station = ISDStation("722874")
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = station.load_isd_daily_temp_data_cached_proxy(2007)
ts2 = station.load_isd_daily_temp_data_cached_proxy(2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_isd_station_load_gsod_daily_temp_data_cached_proxy(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
station = ISDStation("722874")
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = station.load_gsod_daily_temp_data_cached_proxy(2007)
ts2 = station.load_gsod_daily_temp_data_cached_proxy(2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_isd_station_load_tmy3_hourly_temp_data_cached_proxy(
monkeypatch_tmy3_request, monkeypatch_key_value_store
):
station = ISDStation("722880")
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = station.load_tmy3_hourly_temp_data_cached_proxy()
ts2 = station.load_tmy3_hourly_temp_data_cached_proxy()
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_isd_station_load_cz2010_hourly_temp_data_cached_proxy(
monkeypatch_cz2010_request, monkeypatch_key_value_store
):
station = ISDStation("722880")
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = station.load_cz2010_hourly_temp_data_cached_proxy()
ts2 = station.load_cz2010_hourly_temp_data_cached_proxy()
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
# load data between dates
def test_load_isd_hourly_temp_data(monkeypatch_noaa_ftp, monkeypatch_key_value_store):
start = datetime(2006, 1, 3, tzinfo=pytz.UTC)
end = datetime(2007, 4, 3, tzinfo=pytz.UTC)
ts, warnings = load_isd_hourly_temp_data("722874", start, end)
assert ts.index[0] == start
assert | pd.isnull(ts[0]) | pandas.isnull |
# coding: utf-8
# # Comparison between Chamber of Deputies CEAP datasets 1.0 and 2.0
#
# This notebook compares the old Chamber's CEAP dataset (the huge XML files) with the new one (CSV by year). The main objective of this comparison is to show we didn't lose any data on the migration from the 1.0 to the much more efficient 2.0 version of the data. This validates changes to serenata-toolbox so we can ditch 1.0 datasets for good and be prepare to their extinction by the Chamber's Open Data team.
#
# Let's begin by loading both old and new datasets
#
# In[1]:
import pandas as pd
pd.set_option('max_columns', 500)
# In[2]:
from serenata_toolbox.datasets import Datasets
datasets = Datasets('../data')
datasets.downloader.download('2017-05-21-reimbursements.old.xz')
datasets.downloader.download('2017-05-21-reimbursements.new.xz')
# In[3]:
old_dataset = pd.read_csv('../data/2017-05-21-reimbursements.old.xz',
compression='xz',
low_memory=False)
# In[4]:
new_dataset = pd.read_csv('../data/2017-05-21-reimbursements.new.xz',
compression='xz',
low_memory=False)
# First we need to check if both datasets have the same columns, even in they are in the same order:
# In[5]:
old_keys = old_dataset.keys()
new_keys = new_dataset.keys()
print(old_keys==new_keys)
# We can also make sure they have the same types for all columns
# In[6]:
new_dataset.dtypes == old_dataset.dtypes
# Now we can take a slice of the datasets by year and compare their sizes. We also remove the current year, because this ongoing registry seems to have different update pace between versions, so it makes no sense comparing them:
# In[7]:
old_dataset = old_dataset[old_dataset['year'] != 2017]
new_dataset = new_dataset[new_dataset['year'] != 2017]
for year in | pd.unique(old_dataset['year']) | pandas.unique |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from numpy import mean, var, median
from scipy import stats
from collections import Counter
def find_gene_index(gene_list,gene):
j = [i for i,x in enumerate(gene_list) if x == gene]
return j
def find_patients_index(patients, p):
j = [i for i,x in enumerate(patients) if x == p]
return j[0]
filename = "log_modified_LAML_TPM.csv"
filename2 = "patients.txt"
filename3 = "FAB.txt"
filename4 = "sex.txt"
filename5 = "age.txt"
filename6 = "BM_blasts.txt"
filename7 = "WBC.txt"
#filename = "modified_raw_counts.csv"
data = pd.read_csv(filename)
patients = pd.read_csv(filename2)
FAB = pd.read_csv(filename3)
sex = pd.read_csv(filename4)
age = pd.read_csv(filename5)
blasts = | pd.read_csv(filename6) | pandas.read_csv |
from __future__ import division
from unittest import TestCase
from nose_parameterized import parameterized
from pandas import (
Series,
DataFrame,
date_range,
datetime,
Panel
)
from pandas.util.testing import (assert_frame_equal,
assert_series_equal)
from pyfolio.capacity import (days_to_liquidate_positions,
get_max_days_to_liquidate_by_ticker,
get_low_liquidity_transactions,
daily_txns_with_bar_data,
apply_slippage_penalty)
class CapacityTestCase(TestCase):
dates = date_range(start='2015-01-01', freq='D', periods=3)
positions = DataFrame([[1.0, 3.0, 0.0],
[0.0, 1.0, 1.0],
[3.0, 0.0, 1.0]],
columns=['A', 'B', 'cash'], index=dates)
transactions = DataFrame(data=[[1, 100000, 10, 'A']] * len(dates),
columns=['sid', 'amount', 'price', 'symbol'],
index=dates)
volume = DataFrame([[1.0, 3.0],
[2.0, 2.0],
[3.0, 1.0]],
columns=['A', 'B'], index=dates)
volume = volume * 1000000
price = DataFrame([[1.0, 1.0]] * len(dates),
columns=['A', 'B'], index=dates)
market_data = Panel({'volume': volume, 'price': price})
def test_days_to_liquidate_positions(self):
dtlp = days_to_liquidate_positions(self.positions,
self.market_data,
max_bar_consumption=1,
capital_base=1e6,
mean_volume_window=1)
expected = DataFrame([[0.0, .5/3],
[0.75/2, 0.0]],
columns=['A', 'B'],
index=self.dates[1:])
assert_frame_equal(dtlp, expected)
def test_get_max_days_to_liquidate_by_ticker(self):
mdtl = get_max_days_to_liquidate_by_ticker(self.positions,
self.market_data,
max_bar_consumption=1,
capital_base=1e6,
mean_volume_window=1)
expected = DataFrame([[datetime(2015, 1, 3), .75/2, 75.],
[datetime(2015, 1, 2), .5/3, 50.]],
columns=[
'date', 'days_to_liquidate', 'pos_alloc_pct'],
index=['A', 'B'])
expected.index.name = 'symbol'
assert_frame_equal(mdtl, expected)
@parameterized.expand([(DataFrame([[datetime(2015, 1, 1), 100.],
[datetime(2015, 1, 2), 100]],
columns=['date', 'max_pct_bar_consumed'],
index=['A', 'B']), None),
(DataFrame([[datetime(2015, 1, 3), (1/3)*100.]],
columns=['date', 'max_pct_bar_consumed'],
index=['A']), 1)])
def test_get_low_liquidity_transactions(self, expected, last_n_days):
txn_daily = DataFrame(data=[[1, 1000000, 1, 'A'],
[2, 2000000, 1, 'B'],
[1, 1000000, 1, 'A']],
columns=['sid', 'amount', 'price', 'symbol'],
index=self.dates)
llt = get_low_liquidity_transactions(txn_daily, self.market_data,
last_n_days=last_n_days)
expected.index.name = 'symbol'
assert_frame_equal(llt, expected)
def test_daily_txns_with_bar_data(self):
daily_txn = daily_txns_with_bar_data(
self.transactions, self.market_data)
expected = DataFrame(data=[['A', 100000, 1.0, 1000000.],
['A', 100000, 1.0, 2000000.],
['A', 100000, 1.0, 3000000.]],
columns=['symbol', 'amount', 'price', 'volume'],
index=self.dates)
assert_frame_equal(daily_txn, expected, check_less_precise=True)
@parameterized.expand([(1000000, 1, [0.9995, 0.9999375, 0.99998611]),
(10000000, 1, [0.95, 0.99375, 0.998611]),
(100000, 1, [0.999995, 0.999999375, 0.9999998611]),
(1000000, .1, [0.99995, 0.99999375, 0.999998611])])
def test_apply_slippage_penalty(self, starting_base, impact,
expected_adj_returns):
returns = Series([1., 1., 1.], index=self.dates)
daily_txn = daily_txns_with_bar_data(
self.transactions, self.market_data)
adj_returns = apply_slippage_penalty(
returns, daily_txn, starting_base, 1000000, impact=impact)
expected_adj_returns = | Series(expected_adj_returns, index=self.dates) | pandas.Series |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from config import test_snr_dB
import pandas as pd
from scipy.stats import ttest_1samp
def plot_paper_results(folder_envtfs, folder_stft):
sns.set(style="whitegrid")
df_env = pd.read_csv('models\\' + folder_envtfs + '\\results.csv', sep=';')
df_stft = pd.read_csv('models\\' + folder_stft + '\\results.csv', sep=';')
df_orig = df_env.copy()
df_orig = df_orig.drop(['eSTOI pred.'],axis=1)
df_orig = df_orig.drop(['PESQ pred.'],axis=1)
df_orig = df_orig.rename(columns={'eSTOI orig.':'eSTOI pred.'})
df_orig = df_orig.rename(columns={'PESQ orig.':'PESQ pred.'})
df_orig[' '] = 'Original'
df_env[' '] = 'ENV-TFS'
df_stft[' '] = 'STFT'
df = pd.concat([df_orig, df_stft, df_env])
sns.set(style="ticks",font='STIXGeneral')
fig = plt.figure(figsize=(11, 4.5))
size=16
plt.subplot(121)
ax = sns.boxplot(x='SNR', y='eSTOI pred.', hue=' ', data=df, fliersize=1)
plt.xlabel('SNR (dB)', {'size': size})
plt.ylabel('eSTOI', {'size': size})
ax.legend_.remove()
# ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.8)
ax.tick_params(labelsize=size)
lines, labels = ax.get_legend_handles_labels()
# fig.legend(lines, labels, loc='upper center')
fig.legend(lines, labels, loc='upper center', bbox_to_anchor=(0.53, 0.10), shadow = False, ncol = 3, prop={'size': size-3})
plt.tight_layout()
# plt.savefig('fig4.1_estoi_total.pdf',dpi=2000)
# plt.show()
# plt.figure(figsize=(11, 4.5))
plt.subplot(122)
ax = sns.boxplot(x='SNR', y='PESQ pred.', hue=' ', data=df, fliersize=1)
ax.legend_.remove()
ax.tick_params(labelsize=size)
# ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.11), ncol = 3)
plt.xlabel('SNR (dB)',{'size': size})
plt.ylabel('PESQ', {'size': size})
# ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.8)
plt.tight_layout()
plt.savefig('fig4_estoi_pesq_total.pdf',dpi=2000)
plt.show()
# multi plot
sns.set(style="ticks",font='STIXGeneral',font_scale=1.3)
g = sns.relplot(x="SNR", y="eSTOI pred.", hue = " ", col = "Noise", data = df, kind = "line",
col_wrap=5, height=2.5, aspect=0.8, legend='full')
# plt.tight_layout()
g.fig.subplots_adjust(wspace=0.10)
g.set_ylabels('eSTOI')
g.set_xlabels('SNR (dB)')
g.set(xticks=[-6, 0, 6])
g.set(xlim=(min(test_snr_dB), max(test_snr_dB)))
g.set(ylim=(0, 1))
g.set_titles("{col_name}",)
# for a in g.axes:
# a.axhline(a.get_yticks()[1], alpha=0.5, color='grey')
leg = g._legend
leg.set_bbox_to_anchor([0.84, 0.86]) # coordinates of lower left of bounding box
leg._loc = 1
plt.savefig('fig5_estoi_per_noise.pdf',bbox_inches='tight',dpi=2000)
plt.show()
# eSTOI increase histogram
plt.figure()
ax = sns.distplot(df_env['eSTOI pred.'] - df_env['eSTOI orig.'], kde_kws={"shade": True}, norm_hist=True, label='ENV-TFS')
sns.distplot(df_stft['eSTOI pred.'] - df_stft['eSTOI orig.'], kde_kws={"shade": True}, norm_hist=True, label='STFT')
plt.legend()
vals = ax.get_xticks()
ax.set_xticklabels(['{:,.0%}'.format(x) for x in vals])
plt.xlabel('eSTOI increase')
plt.ylabel('density')
plt.tight_layout()
plt.show()
# PESQ increase per snr histogram
# ax = sns.kdeplot(df_env['SNR'], df_env['PESQ pred.'] - df_env['PESQ orig.'], cmap="Reds", shade=True,shade_lowest=False, label='ENV')
# sns.kdeplot(df_stft['SNR'], df_stft['PESQ pred.'] - df_stft['PESQ orig.'], cmap="Blues", shade=True,shade_lowest=False, label='STFT')
ax = sns.distplot(df_env['PESQ pred.'] - df_env['PESQ orig.'], kde_kws={"shade": True}, norm_hist=True,
label='ENV-TFS')
sns.distplot(df_stft['PESQ pred.'] - df_stft['PESQ orig.'], kde_kws={"shade": True}, norm_hist=True, label='STFT')
plt.legend()
vals = ax.get_xticks()
plt.xlabel('PESQ increase')
plt.ylabel('density')
plt.tight_layout()
plt.show()
return
def plot_matlab_results(folder_envtfs, folder_stft):
df_env1 = pd.read_excel('models\\' + folder_envtfs + '\\HA_1.xls')
df_env2 = pd.read_excel('models\\' + folder_envtfs + '\\HA_2.xls')
df_env3 = pd.read_excel('models\\' + folder_envtfs + '\\HA_3.xls')
df_env4 = pd.read_excel('models\\' + folder_envtfs + '\\HA_4.xls')
df_env5 = pd.read_excel('models\\' + folder_envtfs + '\\HA_5.xls')
df_env6 = pd.read_excel('models\\' + folder_envtfs + '\\HA_6.xls')
df_stft1 = pd.read_excel('models\\' + folder_stft + '\\HA_1.xls')
df_stft2 = pd.read_excel('models\\' + folder_stft + '\\HA_2.xls')
df_stft3 = pd.read_excel('models\\' + folder_stft + '\\HA_3.xls')
df_stft4 = pd.read_excel('models\\' + folder_stft + '\\HA_4.xls')
df_stft5 = pd.read_excel('models\\' + folder_stft + '\\HA_5.xls')
df_stft6 = pd.read_excel('models\\' + folder_stft + '\\HA_6.xls')
df_env1['Profile'] = 'HL1'
df_env2['Profile'] = 'HL2'
df_env3['Profile'] = 'HL3'
df_env4['Profile'] = 'HL4'
df_env5['Profile'] = 'HL5'
df_env6['Profile'] = 'HL6'
df_stft1['Profile'] = 'HL1'
df_stft2['Profile'] = 'HL2'
df_stft3['Profile'] = 'HL3'
df_stft4['Profile'] = 'HL4'
df_stft5['Profile'] = 'HL5'
df_stft6['Profile'] = 'HL6'
df_env = pd.concat([df_env1, df_env2, df_env3, df_env4, df_env5, df_env6])
df_stft = pd.concat([df_stft1, df_stft2, df_stft3, df_stft4, df_stft5, df_stft6])
df_envtemp = [df_env1, df_env2, df_env3, df_env4, df_env5, df_env6]
df_stftemp = [df_stft1, df_stft2, df_stft3, df_stft4, df_stft5, df_stft6]
for i in range(6):
df = df_envtemp[i]
dfstft = df_stftemp[i]
print('HASPI', i+1)
print("Origin: %.1f ± %.1f" % (100* df.mean()['HASPI_orig'], 100*df.std()['HASPI_orig']))
print("STFT: %.1f ± %.1f" %(100* dfstft.mean()['HASPI_predi'], 100*dfstft.std()['HASPI_predi']))
print("ENVTFS: %.1f ± %.1f" %(100* df.mean()['HASPI_predi'], 100*df.std()['HASPI_predi']))
for i in range(6):
df = df_envtemp[i]
dfstft = df_stftemp[i]
print('HASQI', i + 1)
print("Origin: %.1f ± %.1f" % (100 * df.mean()['HASQI_orig'], 100 * df.std()['HASQI_orig']))
print("STFT: %.1f ± %.1f" %(100* dfstft.mean()['HASqI_predi'], 100*dfstft.std()['HASqI_predi']))
print("ENVTFS: %.1f ± %.1f" % (100 * df.mean()['HASqI_predi'], 100 * df.std()['HASqI_predi']))
df_orig = df_env.copy()
df_orig = df_orig.drop(['HASPI_predi'], axis=1)
df_orig = df_orig.rename(columns={'HASPI_orig': 'HASPI_predi'})
df_orig[' '] = 'Original'
df_env[' '] = 'ENV-TFS'
df_stft[' '] = 'STFT'
df = pd.concat([df_orig, df_stft, df_env])
sns.set(style="ticks", font='STIXGeneral', font_scale=1.3)
g = sns.relplot(x="snrs", y="HASPI_predi", hue=' ', col="Profile", data=df, kind="line",
col_wrap=3, height=2.5, aspect=0.8, legend='full')
# plt.tight_layout()
g.fig.subplots_adjust(wspace=0.10)
g.set_ylabels('HASPI')
g.set_xlabels('SNR (dB)')
g.set(xticks=[-6, 0, 6])
g.set(xlim=(min(test_snr_dB), max(test_snr_dB)))
g.set(ylim=(0, 1))
g.set_titles("{col_name}", )
# for a in g.axes:
# a.axhline(a.get_yticks()[1], alpha=0.5, color='grey')
leg = g._legend
leg.set_bbox_to_anchor([0.89, 0.84]) # coordinates of lower left of bounding box
leg._loc = 1
from matplotlib.transforms import Bbox
plt.savefig('fig6_haspi_per_audiogram.pdf', bbox_inches=Bbox([[0., 0.], [6.8, 5.]]),dpi=2000)
plt.show()
def print_matlab_results(folder_envtfs, folder_stft):
df_env1 = pd.read_excel('models\\' + folder_envtfs + '\\HA_1.xls')
df_env2 = pd.read_excel('models\\' + folder_envtfs + '\\HA_2.xls')
df_env3 = pd.read_excel('models\\' + folder_envtfs + '\\HA_3.xls')
df_env4 = pd.read_excel('models\\' + folder_envtfs + '\\HA_4.xls')
df_env5 = pd.read_excel('models\\' + folder_envtfs + '\\HA_5.xls')
df_env6 = pd.read_excel('models\\' + folder_envtfs + '\\HA_6.xls')
df_stft1 = pd.read_excel('models\\' + folder_stft + '\\HA_1.xls')
df_stft2 = pd.read_excel('models\\' + folder_stft + '\\HA_2.xls')
df_stft3 = pd.read_excel('models\\' + folder_stft + '\\HA_3.xls')
df_stft4 = pd.read_excel('models\\' + folder_stft + '\\HA_4.xls')
df_stft5 = | pd.read_excel('models\\' + folder_stft + '\\HA_5.xls') | pandas.read_excel |
# -*- coding: utf-8 -*-
import os
from itertools import chain
from typing import Any, List, Optional, Union, Iterator, Iterable
import numpy as np
import pandas as pd
import swifter
from joblib import Parallel, delayed
from pandas.io.parsers import TextFileReader as PandasTextFileReader
from sklearn.utils import shuffle
from scipy.stats import median_absolute_deviation as MAD
from tqdm.auto import tqdm
from .fingerprint import Fingerprint, MorganFingerprint
from .subsim_search import FPSubSim2
def equalize_cell_size_in_row(row, cols=None, fill_mode='internal', fill_value: object = ''):
"""Equalize the number of values in each list-containing cell of a pandas dataframe.
Slightly adapted from user nphaibk (https://stackoverflow.com/questions/45846765/efficient-way-to-unnest-explode-multiple-list-columns-in-a-pandas-dataframe)
:param row: pandas row the function should be applied to
:param cols: columns for which equalization must be performed
:param fill_mode: 'internal' to repeat the only/last value of a cell as much as needed
'external' to repeat fill_value as much as needed
'trim' to remove unaligned values
:param fill_value: value to repeat as much as needed to equalize cells
:return: the row with each cell having the same number of values
"""
if not cols:
cols = row.index
jcols = [j for j, v in enumerate(row.index) if v in cols]
if len(jcols) < 1:
jcols = range(len(row.index))
Ls = [len(x) for x in row.values]
if not Ls[:-1] == Ls[1:]:
vals = [v if isinstance(v, list) else [v] for v in row.values]
if fill_mode == 'external':
vals = [[e] + [fill_value] * (max(Ls) - 1) if (not j in jcols) and (isinstance(row.values[j], list))
else e + [fill_value] * (max(Ls) - len(e))
for j, e in enumerate(vals)]
elif fill_mode == 'internal':
vals = [[e] + [e] * (max(Ls) - 1) if (not j in jcols) and (isinstance(row.values[j], list))
else e + [e[-1]] * (max(Ls) - len(e))
for j, e in enumerate(vals)]
elif fill_mode == 'trim':
vals = [e[0:min(Ls)] for e in vals]
else:
raise ValueError("fill_mode must be one of ['internal', 'external', 'trim']")
row = pd.Series(vals, index=row.index.tolist())
return row
def keep_quality(data: Union[pd.DataFrame, PandasTextFileReader, Iterator], min_quality: str = 'high') -> Union[
pd.DataFrame, Iterator]:
"""Keep only the data with the minimum defined quality
:param data: the dataframe, chunked or not into a pandas TextFileReader, containing data to be filtered
or an Iterator of data chunks
:param min_quality: minimal quality {'high', 'medium', 'low'} to be kept
e.g. if 'medium', data of 'medium' and 'high' quality are kept
:return: the data with minimal required quality.
If input is a TextFileReader or an Iterator, the return type is an Iterator
"""
qualities = ["low", "medium", "high"]
if min_quality.lower() not in qualities:
raise ValueError(f'Quality not supported, must be one of {qualities}')
index = qualities.index(min_quality.lower())
if isinstance(data, pd.DataFrame):
filtered = data[data['Quality'].str.lower().isin(qualities[index:])]
return filtered
if isinstance(data, (PandasTextFileReader, Iterator)):
return _chunked_keep_quality(data, min_quality)
raise ValueError('data can only be a pandas DataFrame, TextFileReader or an Iterator')
def _chunked_keep_quality(chunks: Union[PandasTextFileReader, Iterator], min_quality: str = 'high'):
for chunk in chunks:
filtered_chunk = keep_quality(chunk, min_quality)
yield filtered_chunk
def process_group(group):
"""Aggregate data from one group accordingly"""
if (group.values[0] == group.values).all(): # If all values are equal, return first record
group['pchembl_value_Mean'] = group['pchembl_value']
group['pchembl_value_StdDev'] = np.NaN
group['pchembl_value_SEM'] = np.NaN
group['pchembl_value_N'] = 1
group['pchembl_value_Median'] = group['pchembl_value']
group['pchembl_value_MAD'] = np.NaN
return group.iloc[:1, :]
listvals = lambda x: ';'.join(set(str(y) for y in x)) if (x.values[0] == x.values).all() else ';'.join(
str(y) for y in x)
listallvals = lambda x: ';'.join(str(y) for y in x)
mappings = {'source': 'first', 'CID': listvals, 'AID': listvals,
'type_IC50': listallvals, 'type_EC50': listallvals, 'type_KD': listallvals,
'type_Ki': listallvals, 'type_other': listallvals, 'relation': listvals,
'pchembl_value': listallvals}
return pd.concat([group.groupby('Activity_ID').aggregate(mappings).reset_index(),
group.groupby('Activity_ID')['pchembl_value'].aggregate(pchembl_value_Mean='mean',
pchembl_value_StdDev='std',
pchembl_value_SEM='sem',
pchembl_value_N='count',
pchembl_value_Median='median',
pchembl_value_MAD=MAD
).reset_index(drop=True)], axis=1)
def process_groups(groups):
"""Aggregate data from multiple groups"""
return pd.concat([process_group(group) for group in groups])
def keep_source(data: Union[pd.DataFrame, PandasTextFileReader, Iterator], source: Union[List[str], str] = 'all', njobs: int = 1,
verbose: bool = False) -> pd.DataFrame:
"""Keep only the data from the defined source(s).
:param data: the dataframe containing data to be filtered
:param source: source(s) to be kept, 'all' or ''any' to keep all data
:param njobs: number of cores on which multiple processes are spawned to speed up filtering
:param verbose: whether to show progress bars
:return: the data with only from the specified source(s),;
aggregated data (mean, meadians, SEM, ...) are re-calculated to match only
the specified source(s)
"""
# Deal with chunked data
if isinstance(data, (PandasTextFileReader, Iterator)):
return _chunked_keep_source(data, source, njobs)
# Raise error if not correct type
elif not isinstance(data, pd.DataFrame):
raise ValueError('data can only be a pandas DataFrame, TextFileReader or an Iterator')
# Get sources of dataset
sources_ = set(chain.from_iterable(map(lambda x: x.split(';'), data['source'].unique())))
sources = set(map(str.lower, sources_))
# Change type of source if str
if isinstance(source, str):
source = [source]
source = list(map(str.lower, source))
# Keep all data if source is a list containing 'any', 'all' or all accepted values
if 'any' in source or 'all' in source or len(set(source).intersection(sources)) == len(sources):
return data
# Source not defined
elif set(source).difference(sources):
raise ValueError(f'Source not supported, must be one of {sources}')
# Sources are defined
else:
# Columns with optional multiple values
cols2split = ['source', 'CID', 'AID', 'type_IC50', 'type_EC50', 'type_KD', 'type_Ki', 'type_other', 'relation',
'pchembl_value']
# Keep trace of order of columns
ordered_columns = data.columns.tolist()
# Keep binary data associated to source
preserved_binary = data[~data['Activity_class'].isna() & data['source'].str.lower().isin(source)]
# Separate data with multiple sources
binary_data = data[
~data['Activity_class'].isna() & data['source'].str.contains(';') & data['source'].str.contains(
'|'.join(source), case=False)]
data = data[data['Activity_class'].isna()]
if not binary_data.empty:
# Keep columns and index
binary_included = binary_data[[x for x in binary_data.columns if x in cols2split + ['Activity_ID']]]
binary_excluded = binary_data[
[x for x in binary_data.columns if x not in cols2split and not x.startswith('pchembl_value_')]]
del binary_data
binary_included = (
binary_included.set_index('Activity_ID') # Alows unnesting data without messing with Activity_ID
.swifter.progress_bar(verbose) # Uses swifter without progress bar for apply
.apply(lambda x: x.str.split(';')) # Split mutiple values into lists
.swifter.progress_bar(verbose)
.apply(equalize_cell_size_in_row, axis=1) # Set same length of lists in each row
.swifter.progress_bar(verbose)
.apply(pd.Series.explode) # Unnest the data
.reset_index()) # Recover Activity_ID
# Filter by sources
binary_included = binary_included[binary_included['source'].str.lower().isin(source)]
# Join back with remove columns
binary_data = binary_included.merge(binary_excluded, how='inner', on='Activity_ID')[ordered_columns]
del binary_included, binary_excluded
# Separate records not needing any processing
preserved = data[data['source'].str.lower().isin(source)]
# Remove records with non-matching non-unique source
data = data[
~data['source'].str.lower().isin(source) & data['source'].str.contains(';') & data['source'].str.contains(
'|'.join(source), case=False)]
if not data.empty:
# Keep columns and index
included = data[[x for x in data.columns if x in cols2split + ['Activity_ID']]]
excluded = data[[x for x in data.columns if x not in cols2split and not x.startswith('pchembl_value_')]]
del data
included = (included.set_index('Activity_ID') # Alows unnesting data without messing with Activity_ID
.swifter.progress_bar(verbose) # Uses swifter without progress bar for apply
.apply(lambda x: x.str.split(';')) # Split mutiple values into lists
.swifter.progress_bar(verbose)
.apply(equalize_cell_size_in_row, axis=1) # Set same length of lists in each row
.swifter.progress_bar(verbose)
.apply(pd.Series.explode) # Unnest the data
.reset_index()) # Recover Activity_ID
# Filter by sources
included = included[included['source'].str.lower().isin(source)]
# Aggregate data on Activity_ID
_, grouped = list(zip(*included.swifter.progress_bar(verbose).apply(pd.to_numeric, errors='ignore').groupby(
'Activity_ID')))
del included
# Use joblib to speed up the aggregation process
filtered = pd.concat(Parallel(n_jobs=njobs, backend='loky', verbose=int(verbose))(
delayed(process_groups)(grouped[i:i + 1000]) for i in range(0, len(grouped), 1000))).reset_index(
drop=True)
del grouped
# Join back with remove columns
data = filtered.fillna(0).merge(excluded, how='inner', on='Activity_ID')[ordered_columns]
del excluded, filtered
# Add back binary data (might be empty)
data = pd.concat([preserved, data, preserved_binary, binary_data])
del preserved, preserved_binary, binary_data
return data
def _chunked_keep_source(data: Union[PandasTextFileReader, Iterator], source: Union[List[str], str], njobs) -> pd.DataFrame:
for chunk in data:
yield keep_source(chunk, source, njobs)
def is_activity_type(row, activity_types: List[str]):
"""Check if the row matches one of the activity types
:param row: pandas row the function should be applied to
:param activity_types: activity types the row should partially match
"""
return np.any([str(row[activity_type]) == '1' for activity_type in activity_types]) and np.all(
[';' not in str(row[activity_type]) for activity_type in activity_types])
def is_multiple_types(row, activity_types: List[str]):
"""Check if the row matches one of the activity types and if they contain multiple values
:param row: pandas row the function should be applied to
:param activity_types: activity types with multiple values the row should partially match
"""
return np.any([';' in str(row[activity_type]) for activity_type in activity_types])
def keep_type(data: Union[pd.DataFrame, PandasTextFileReader, Iterator], activity_types: Union[List[str], str] = 'ic50', njobs: int = 1,
verbose: bool = False):
"""Keep only the data matching desired activity types
:param data: the dataframe containing data to be filtered
:param activity_types: type of activity to keep: {'IC50', 'EC50', 'KD', 'Ki', 'all'}
:param njobs: number of cores on which multiple processes are spawned to speed up filtering
:param verbose: whether to show progress bars
:return: the data with desired activity type(s)
"""
# Deal with chunked data
if isinstance(data, (PandasTextFileReader, Iterator)):
return _chunked_keep_type(data, activity_types, njobs)
# Raise error if not correct type
elif not isinstance(data, pd.DataFrame):
raise ValueError('data can only be a pandas DataFrame, TextFileReader or an Iterator')
# Define accepted data types
types = ['IC50', 'EC50', 'KD', 'Ki', 'other']
types_ = [x.lower() for x in types]
if isinstance(activity_types, str):
activity_types = [activity_types]
activity_types = set([x.lower() for x in activity_types])
# Keep all data if type is a list containing 'any', 'all' or all accepted values
if 'any' in activity_types or 'all' in activity_types or len(activity_types.intersection(types_)) == len(types_):
return data
# Type not defined
elif activity_types.difference(types_):
raise ValueError(f'Type not supported, must be one of {types}')
else:
# Transform activity_types to column names
activity_types = [f"type_{types[i]}" for i in range(len(types)) if types_[i] in activity_types]
# Columns with optional multiple values
cols2split = ['source', 'CID', 'AID', 'type_IC50', 'type_EC50', 'type_KD', 'type_Ki', 'type_other', 'relation',
'pchembl_value']
# Keep trace of order of columns
ordered_columns = data.columns.tolist()
# Keep binary data associated to type
preserved_binary = data[
~data['Activity_class'].isna() & data.apply(is_activity_type, activity_types=activity_types, axis=1)]
# Separate data with multiple types
binary_data = data[
~data['Activity_class'].isna() & data.apply(is_multiple_types, activity_types=activity_types, axis=1)]
data = data[data['Activity_class'].isna()]
if not binary_data.empty:
# Keep columns and index
binary_included = binary_data[[x for x in binary_data.columns if x in cols2split + ['Activity_ID']]]
binary_excluded = binary_data[
[x for x in binary_data.columns if x not in cols2split and not x.startswith('pchembl_value_')]]
del binary_data
binary_included = (
binary_included.set_index('Activity_ID') # Allows unnesting data without messing with Activity_ID
.astype(str) # Required for following split
.swifter.progress_bar(verbose) # Uses swifter without progress bar for apply
.apply(lambda x: x.str.split(';')) # Split multiple values into lists
.swifter.progress_bar(verbose)
.apply(equalize_cell_size_in_row, axis=1) # Set same length of lists in each row
.swifter.progress_bar(verbose)
.apply(pd.Series.explode) # Unnest the data
.reset_index()) # Recover Activity_ID
# Filter by type
binary_included = binary_included[
binary_included.swifter.progress_bar(verbose).apply(is_activity_type, activity_types=activity_types,
axis=1)]
# Join back with remove columns
binary_data = binary_included.merge(binary_excluded, how='inner', on='Activity_ID')[ordered_columns]
del binary_included, binary_excluded
# Separate records not needing any processing
preserved = data[data.apply(is_activity_type, activity_types=activity_types, axis=1)]
# Remove records with non-matching non-unique type
data = data[data.apply(is_multiple_types, activity_types=activity_types, axis=1)]
if not data.empty:
# Keep columns and index
included = data[[x for x in data.columns if x in cols2split + ['Activity_ID']]]
excluded = data[[x for x in data.columns if x not in cols2split and not x.startswith('pchembl_value_')]]
del data
included = (included.set_index('Activity_ID') # Alows unnesting data without messing with Activity_ID
.astype(str) # Required for following split
.swifter.progress_bar(verbose) # Uses swifter without progress bar for apply
.apply(lambda x: x.str.split(';')) # Split multiple values into lists
.swifter.progress_bar(verbose)
.apply(equalize_cell_size_in_row, axis=1) # Set same length of lists in each row
.swifter.progress_bar(verbose)
.apply(pd.Series.explode) # Unnest the data
.reset_index()) # Recover Activity_ID
# Filter by types
included = included[included.apply(is_activity_type, activity_types=activity_types, axis=1)]
# Aggregate data on Activity_ID
_, grouped = list(zip(*included.swifter.progress_bar(verbose).apply(pd.to_numeric, errors='ignore').groupby(
'Activity_ID')))
del included
# Use joblib to speed up the aggregation process
filtered = pd.concat(Parallel(n_jobs=njobs, backend='loky', verbose=int(verbose))(
delayed(process_groups)(grouped[i:i + 1000]) for i in range(0, len(grouped), 1000))).reset_index(
drop=True)
del grouped
# Join back with remove columns
data = filtered.fillna(0).merge(excluded, how='inner', on='Activity_ID')[ordered_columns]
del excluded, filtered
# Add back binary data (might be empty)
data = pd.concat([preserved, data, preserved_binary, binary_data])
del preserved, preserved_binary, binary_data
return data
def _chunked_keep_type(data: Union[PandasTextFileReader, Iterator], activity_types: Union[List[str], str], njobs: int):
for chunk in data:
yield keep_type(chunk, activity_types, njobs)
def keep_accession(data: Union[pd.DataFrame, PandasTextFileReader, Iterator], accession: Union[List[str], str] = 'all'):
"""Keep only the data matching desired accession.
:param data: the dataframe containing data to be filtered
:param accession: accession to keep (e.g. 'P30542'); mutation can be specified (e.g. '')
:return: the data with desired accession(s)
"""
# Deal with chunked data
if isinstance(data, (PandasTextFileReader, Iterator)):
return _chunked_keep_accession(data, accession)
# Raise error if not correct type
elif not isinstance(data, pd.DataFrame):
raise ValueError('data can only be a pandas DataFrame, TextFileReader or an Iterator')
if isinstance(accession, str):
accession = [accession]
return data[data['target_id'].str.lower().str.contains('|'.join(accession).lower())]
def _chunked_keep_accession(data: Union[PandasTextFileReader, Iterator], accession: Union[List[str], str]):
for chunk in data:
filtered_chunk = keep_accession(chunk, accession)
yield filtered_chunk
def equalize_cell_size_in_column(col, fill_mode='internal', fill_value: object = ''):
"""Equalize the number of values in each list-containing cell of a pandas dataframe.
Adapted from user nphaibk (https://stackoverflow.com/questions/45846765/efficient-way-to-unnest-explode-multiple-list-columns-in-a-pandas-dataframe)
:param col: pandas Series the function should be applied to
:param fill_mode: 'internal' to repeat the only/last value of a cell as much as needed
'external' to repeat fill_value as much as needed
'trim' to remove unaligned values
:param fill_value: value to repeat as much as needed to equalize cells
:return: the column with each cell having the same number of values
"""
Ls = [len(x) for x in col.values]
if not Ls[:-1] == Ls[1:]:
vals = [v if isinstance(v, list) else [v] for v in col.values]
if fill_mode == 'external':
vals = [e + [fill_value] * (max(Ls) - len(e)) for j, e in enumerate(vals)]
elif fill_mode == 'internal':
vals = [e + [e[-1]] * (max(Ls) - len(e)) for j, e in enumerate(vals)]
elif fill_mode == 'trim':
vals = [e[0:min(Ls)] for e in vals]
else:
raise ValueError("fill_mode must be one of ['internal', 'external', 'trim']")
col = pd.Series(vals, index=col.index.tolist())
return col
def keep_protein_class(data: Union[pd.DataFrame, PandasTextFileReader, Iterator], protein_data: pd.DataFrame,
classes: Optional[Union[dict, List[dict]]] = [{'l2': 'Kinase'}, {'l5': 'Adenosine receptor'}],
generic_regex: bool = False):
"""Keep only the data matching desired protein classifications.
:param data: the dataframe containing data to be filtered
:param protein_data: the dataframe of Papyrus protein targets
:param classes: protein classes to keep (case insensitive).
- {'l2': 'Kinase'} matches all proteins with classification 'Enzyme->Kinase'
- {'l5': 'Adenosine receptor'} matches 'Membrane receptor->Family A G protein-coupled receptor->Small molecule receptor (family A GPCR)->Nucleotide-like receptor (family A GPCR)-> Adenosine receptor'
- All levels in the same dict are enforced, e.g. {'l1': ''Epigenetic regulator', 'l3': 'HDAC class IIb'} does not match records without the specified l1 AND l3
- If given a list of dicts, results in a union of the dicts, e.g. [{'l2': 'Kinase'}, {'l1': 'Membrane receptor'}] matches records with classification either 'Enzyme->Kinase' or 'Membrane receptor'
- Level-independent patterns can be specified with the 'l?' key, e.g. {'l?': 'SLC'} matches any classification level containing the 'SLC' keyword
Only one 'l?' per dict is supported.
Mixed usage of 'l?' and level-specific patterns (e.f. 'l1') is not supported
:param generic_regex: whether to consider generic patterns 'l?' as regex, allowing for partial match.
:return: the data with desired protein classes
"""
# Deal with chunked data
if isinstance(data, (PandasTextFileReader, Iterator)):
return _chunked_keep_protein_class(data, protein_data, classes, generic_regex)
# Raise error if not correct type
elif not isinstance(data, pd.DataFrame):
raise ValueError('data can only be a pandas DataFrame, TextFileReader or an Iterator')
# If no filter return entire dataset
if classes is None:
return data
if isinstance(classes, dict):
classes = [classes]
# Verify classification keys
keys = set(key for keys in classes for key in keys.keys())
allowed_keys = ['l?', 'l1', 'l2', 'l3', 'l4', 'l5', 'l6', 'l7', 'l8']
if keys.difference(allowed_keys):
raise ValueError(f'levels of protein classes must be of {allowed_keys}')
lvl_dependent, lvl_independent = False, False
for key in classes:
if 'l?' in key.keys():
lvl_independent = True
if len(key.keys()) > 1:
raise ValueError(f'only one pattern per "l?" is accepted')
else:
lvl_dependent = True
# Split classifications
## 1) Handle multiple classifications
split_classes = protein_data['Classification'].str.split(';')
split_classes = equalize_cell_size_in_column(split_classes, 'external', '')
split_classes = pd.DataFrame(split_classes.tolist())
## 2) Split into classification levels
multiplicity = len(split_classes.columns) # Number of max classifications
for j in range(multiplicity):
split_classes.iloc[:, j] = split_classes.iloc[:, j].str.split('->')
split_classes.iloc[:, j] = equalize_cell_size_in_column(split_classes.iloc[:, j], 'external', '')
# Ensure 8 levels of classification
for _ in range(8 - len(split_classes.iloc[0, j])):
split_classes.iloc[0, j].append('')
split_classes.iloc[:, j] = equalize_cell_size_in_column(split_classes.iloc[:, j])
## 3) Create DataFrame with all annotations
split_classes = pd.concat(
[pd.DataFrame(split_classes.iloc[:, j].tolist(), columns=[f'l{x + 1}_{j + 1}' for x in range(8)]) for j in
range(multiplicity)], axis=1)
# Ensure case insensitivity
split_classes = split_classes.apply(lambda s: s.str.lower())
# Filter classes
## 1) Deal with specific protein classes (i.e. l1 to l8)
if lvl_dependent:
query_dpd = ') or ('.join([') or ('.join([' and '.join([f'`{subkey.lower()}_{k + 1}` == "{subval.lower()}"'
for subkey, subval in key.items()
])
for k in range(multiplicity)
])
for key in classes if 'l?' not in key.keys()
])
## 2) Deal with 'l?'
regex_indices = []
if lvl_independent:
query_idpd = ""
if generic_regex: # Use regex
regex_indices = split_classes[
eval('|'.join([f'split_classes["{subkey.lower()}"].str.lower().str.contains("{subval.lower()}", regex=True)'
for key in classes for subkey in split_classes.columns for subval in key.values() if
'l?' in key.keys()])
)].index.tolist()
else: # Complete match
query_idpd = ') or ('.join([') or ('.join([' and '.join([f'`{subkey.lower()}` == "{subval.lower()}"'
for subval in key.values()
])
for subkey in split_classes.columns
])
for key in classes if 'l?' in key.keys()
])
query = (f"{('(' + query_dpd + ')') if lvl_dependent else ''}"
f"{' or ' if lvl_dependent and lvl_independent and not generic_regex else ''}"
f"{('(' + query_idpd + ')') if lvl_independent and not generic_regex else ''}")
## 3) Execute filter
if len(query):
indices = split_classes.query(query).index.tolist()
else:
indices = []
if generic_regex:
indices = sorted(set(indices + regex_indices))
# Obtain targets from filtered indices
targets = protein_data.loc[indices, 'target_id']
# Map back to activity data
return data[data['target_id'].isin(targets)].merge(protein_data.loc[indices, ('target_id', 'Classification')], on='target_id')
def _chunked_keep_protein_class(data: Union[PandasTextFileReader, Iterator], protein_data: pd.DataFrame,
classes: Optional[Union[dict, List[dict]]],
generic_regex: bool):
for chunk in data:
filtered_chunk = keep_protein_class(chunk, protein_data, classes, generic_regex)
yield filtered_chunk
def consume_chunks(generator: Union[PandasTextFileReader, Iterator], progress: bool = True, total: int = None):
"""Transform the result of chained filters into a pandas DataFrame
:param generator: iterator to be transformed into a dataframe
:param progress: whether to show progress
:param total: total number of chunks the input is divided in
"""
data = []
if progress:
pbar = tqdm(generator, total=total)
else:
pbar = generator
for item in pbar:
if not isinstance(item, pd.DataFrame):
consumed = _consume_deeper_chunks(item)
data.extend(consumed)
else:
data.append(item)
if not len(data):
return pd.DataFrame()
return pd.concat(data, axis=0)
def _consume_deeper_chunks(generator: Union[PandasTextFileReader, Iterator]):
"""Transform the result of chained filters into a pandas DataFrame.
Internal function. One must use consume_chunks instead.
:param generator: iterator to be transformed into a dataframe
"""
data = []
for item in generator:
if not isinstance(item, pd.DataFrame):
consumed = consume_chunks(item)
data.extend(consumed)
else:
data.append(item)
if not len(data):
return pd.DataFrame()
return | pd.concat(data, axis=0) | pandas.concat |
#%% [markdown]
# # Credit Card Fraud Detection
#
# ## Group 12
#%%
#Libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec # to do the grid of plots
#%% [markdown]
# ### Loading Data
#%%
# reading data from csv file
df = pd.read_csv('creditcard.csv')
#%%
# prinitng 1st 5 rows with headings
data_top = df.head()
data_top.to_csv("data_head.csv")
df.head(10)
#There are no null values in the dataset.
#%% [markdown]
# ## Salient Features of data:
# Total 31 attributes (including class)
# Time is a Discrete-valued numeric attribute.
# V1 to V28 are Principal Components of the orginial dataset not avaliable to us.
# They are a result of Principal Component Analysis.
# They are continuous valued numeric attributes. We cannot say whether they are ratio-scaled or interval-scaled
# Amount is a continuous-valued numeric attribute.
# Class is a discrete-valued Binary attribute that takes value 0 for non-fraudulent transaction and 1 for fraud transaction.
# V1 to V28 are distributed aroud 0 and are scaled.
# From V1 to V28, the variance of attributes decreases from left to right, as expected from a PCA output.
#%%
# prinitng 5 number summary, basic info about the data
data_summary = df.describe()
data_summary.to_csv("data_summary.csv")
df.describe()
#%% [markdown]
# ### Checking on Amount and Time Data
#%%
df[['Time', 'Amount']].describe()
# Time and Amount are not scaled.
#%% [markdown]
# # Visualizing Data Distribution
#%%
# Time and Amount Distribution
print('Non Fraudulent: ', round(df['Class'].value_counts()[0]/len(df) * 100,3), '% of the dataset')
print('Fraudulent: ', round(df['Class'].value_counts()[1]/len(df) * 100,3), '% of the dataset')
# colors = ["#0101DF", "#DF0101"]
# sns.countplot('Class', data=df, palette=colors)
# plt.title('Class Distributions \n (0: No Fraud || 1: Fraud)', fontsize=14)
# plt.show()
fig, ax = plt.subplots(1, 2, figsize=(18,4))
amount_val = df['Amount'].values
time_val = df['Time'].values
sns.distplot(amount_val, ax=ax[0], color='r')
ax[0].set_title('Distribution of Transaction Amount', fontsize=14)
ax[0].set_xlim([min(amount_val), max(amount_val)])
sns.distplot(time_val, ax=ax[1], color='b')
ax[1].set_title('Distribution of Transaction Time', fontsize=14)
ax[1].set_xlim([min(time_val), max(time_val)])
# for i =1:30:
plt.show()
#%% [markdown]
# ### distribution of amount with Class:
#%%
counts = df.Class.value_counts()
normal = counts[0]
fraudulent = counts[1]
plt.figure(figsize=(8,6))
sns.barplot(x=counts.index, y=counts)
plt.title('Count of Fraudulent vs. Non-Fraudulent Transactions')
plt.ylabel('Count')
plt.xlabel('Class (0:Non-Fraudulent, 1:Fraudulent)')
#%%
# Class - Amount Plot
plt.subplot(121)
ax = sns.boxplot(x ="Class",y="Amount",
data=df)
ax.set_title("Class x Amount", fontsize=20)
ax.set_xlabel("Is Fraud?", fontsize=16)
ax.set_ylabel("Amount", fontsize = 16)
# Total Data Objects with Class 0: 2,84,315 (99.83%) - non-fraud transactions
# Total Data Objects with Class 1: 492 (0.17%) - fraud transactions
#Therefore, the dataset has a strong imbalanced nature, where the problem is two-class classification.
#%% [markdown]
# There are __only 7__ points out of 2.8 Lakh having Amount > 10,000.
# Therefore these values should be excluded from dataset.
#%%
df[df.Amount > 10000]
#%%
df = df[df.Amount < 10000]
df.describe()
#%%
#New distribution of amount with Class:
plt.subplot(121)
ax = sns.boxplot(x ="Class",y="Amount",
data=df)
ax.set_title("Class x Amount", fontsize=20)
ax.set_xlabel("Is Fraud?", fontsize=16)
ax.set_ylabel("Amount", fontsize = 16)
#%% [markdown]
# ### Creating new columns for ease in visualization
#%%
data_new = df
timedelta = | pd.to_timedelta(data_new['Time'], unit='s') | pandas.to_timedelta |
#TODO: regression without constant
import pandas as pd
import numpy as np
import scipy as sp
from statsmodels.formula.api import ols, logit
from statsmodels.tools.tools import add_constant
from statsmodels.api import OLS, Logit
from statsmodels.stats.outliers_influence import OLSInfluence
from IPython.display import display
from pandas.api.types import is_numeric_dtype
from .utils import get_categories
class LinearRegression:
"""
Class for OLS regression models based on the excellent statsmodels package.
Parameters
----------
method : 'enter' or 'backward'
Method for predictors selection
include_constant : bool
(CURRENTLY UNAVAILIABLE) Whether to include constant in the model
sig_level_entry : float
(CURRENTLY UNAVAILIABLE) Max significance level to include predictor in the model
sig_level_removal : float
Min significance level to exclude predictor from the model
Attributes
----------
variables_excluded : list
Variables excluded because of zero variance
variables_included : list
Variables included in a model
predictions : pd.Series
Predicted values
N : int
Number of observations included in a model
r2 : float
R-squared (coefficient of determination)
r2_adjusted : float
Adjusted r-squared
F : float
F-statistic
F_pvalue : float
P-value for F-statistic
ess : float
Explained sum of squares
rss : float
Residual sum of squares
tss : float
Total sum of squares
coefficients : pd.Series
Regression coefficients
coefficients_sterrors : pd.Series
Standard errors of regression coefficients
coefficients_tvalues : pd.Series
T-statistics of regression coefficients
coefficients_pvalues : pd.Series
P-values of regression coefficients
"""
def __init__(
self,
method='enter',
include_constant=True,
sig_level_entry=0.05,
sig_level_removal=0.05
):
self.method = method.lower().strip()
self.include_constant = include_constant
self.sig_level_entry = sig_level_entry
self.sig_level_removal = sig_level_removal
def fit(
self,
data,
formula,
categorical_variables=None,
show_results=True,
confidence_intervals=True,
collinearity_statistics=False,
use_patsy_notation=False,
n_decimals=3
):
"""
Fit model to the given data using formula.
Parameters
----------
data : pd.DataFrame
Data to fit a model
formula : str
Formula of a model specification, e.g. 'y ~ x1 + x2';
should be passed either in Patsy (statsmodels) notation
or using the following rules:
'*' for interaction of the variables,
':' for interaction & main effects,
i.e., 'y ~ x:z' equals to 'y ~ x + z + x*z' (unlike the Patsy notation).
If you use Patsy notation, please specify the parameter use_patsy_notation=True.
categorical_variables : list
List of names of the variables that should be considered categorical.
These variables would be automatically converted into sets of dummy variables.
If you want to use this option, please make sure that you don't have nested names of variables
(e.g. 'imdb' and 'imdb_rate' at the same time), otherwise this option results in an incorrect procedure.
show_results : bool
Whether to show results of analysis
confidence_intervals : bool
Whether to include coefficients' confidence intervals in the summary table
collinearity_statistics : bool
whether to include coefficients' tolerance and VIF in the summary table
use_patsy_notation : bool
turn this on if you use strictly Patsy's rules to define a formula.
See more: https://patsy.readthedocs.io/en/latest/quickstart.html
n_decimals : int
Number of digits to round results when showing them
Returns
-------
self
The current instance of the LinearRegression class
"""
self._data = data.copy()
self.categorical_variables = categorical_variables
self._show_ci = confidence_intervals
self._show_col = collinearity_statistics
if '=' in formula:
formula = formula.replace('=', '~')
if not use_patsy_notation:
formula = formula.replace('*', '^').replace(':', '*').replace('^', ':')
self.formula = formula
#won't work correctly if some variables have similar names (e.g. kinopoisk_rate and kinopoisk_rate_count)
if categorical_variables is not None:
if not isinstance(categorical_variables, list):
raise ValueError(f"""Categorical variables should be passed as list.
Type {type(categorical_variables)} was passed instead.""")
else:
for variable in categorical_variables:
formula = formula.replace(variable, f'C({variable})')
self._model = ols(formula=formula, data=data).fit()
self._observations_idx = list(self._model.fittedvalues.index)
self.dependent_variable = self._model.model.endog_names
self.variables_excluded = self._identify_variables_without_variation()
if len(self.variables_excluded) > 0:
y = pd.Series(self._model.model.endog.copy(),
index=self._observations_idx,
name=self.dependent_variable)
X = self._remove_variables_without_variation()
self._model = OLS(y, X, missing = 'drop').fit()
self.variables_excluded = [LinearRegression._translate_from_patsy_notation(x) for x in self.variables_excluded]
if self.method == 'backward':
self._fit_backward()
self._get_statistics_from_model()
self.predictions = self.predict()
if show_results:
self.show_results(n_decimals)
if len(self.variables_excluded) > 0:
print('------------------\n')
print(f"Following variables were excluded due to zero variance: {'; '.join(self.variables_excluded)}")
return self
def predict(
self,
data=None,
add_to_data=False,
):
"""
Predict values of a dependent variable for the given data using the fitted model.
Parameters
----------
data : pd.DataFrame
Data for predictions,
may be not specified if you want to predict values for the same data that were used to fit a model
add_to_data : bool
Whether to merge predictions with the given data.
Currently, this option returns data with a sorted index
Returns
-------
pd.DataFrame
Predictions
"""
name = f'{self.dependent_variable} (predicted)'
if data is None:
data_init = self._data.copy()
result = self._model.fittedvalues
data_init[name] = result
if add_to_data:
return data_init
else:
return data_init[name].copy()
else:
aux_model = ols(self.formula, data).fit()
aux_data_idx = aux_model.fittedvalues.index
aux_data_cols = aux_model.model.exog_names
aux_data_cols = [LinearRegression._translate_from_patsy_notation(x)\
for x in aux_data_cols]
aux_data = pd.DataFrame(aux_model.model.exog,
index=aux_data_idx,
columns=aux_data_cols)
aux_X = add_constant(aux_data[self.variables_included].copy())
aux_y = aux_model.model.endog.copy()
aux_model = OLS(aux_y, aux_X, missing='drop').fit()
result = aux_model.fittedvalues
result.name = name
if add_to_data:
result = pd.concat([data, result], axis=1, sort=False)
return result
def _get_statistics_from_model(self):
self.N = self._model.nobs
self.r2 = self._model.rsquared
self.r2_adjusted = self._model.rsquared_adj
self.F = self._model.fvalue
self.F_pvalue = self._model.f_pvalue
self.ess = self._model.ess
self.rss = self._model.ssr
if self.include_constant:
self.tss = self._model.centered_tss
else:
self.tss = self._model.uncentered_tss
self.ms_model = self._model.mse_model
self.ms_resid = self._model.mse_resid
self.ms_total = self._model.mse_total
self.dof_model = self._model.df_model
self.dof_resid = self._model.df_resid
self.dof_total = self.dof_model + self.dof_resid
self.coefficients = self._model.params.copy()
self.coefficients_sterrors = self._model.bse.copy()
self.coefficients_tvalues = self._model.tvalues.copy()
self.coefficients_pvalues = self._model.pvalues.copy()
variables_included = [x for x in list(self.coefficients.index) if x!='Intercept']
self._variables_included_patsy = variables_included.copy()
variables_included = [LinearRegression._translate_from_patsy_notation(x) for x in variables_included]
self.variables_included = variables_included
#self._independent_variables =
if self.include_constant:
self._params_idx = ['Constant'] + variables_included
else:
self._params_idx = variables_included.copy()
for stats in [self.coefficients,
self.coefficients_pvalues,
self.coefficients_sterrors,
self.coefficients_tvalues]:
stats.index = self._params_idx
return
@property
def coefficients_beta(self):
b = np.array(self._model.params)[1:]
std_y = self._model.model.endog.std(axis=0)
std_x = self._model.model.exog.std(axis=0)[1:]
beta = list(b * (std_x / std_y))
if self.include_constant:
beta = [np.nan] + beta
result = pd.Series(beta, index=self._params_idx)
return result
@property
def coefficients_confidence_interval(self):
ci = self._model.conf_int()
ci.index = self._params_idx
ci.columns = [f'LB CI (95%)',
f'UB CI (95%)']
return ci
@property
def coefficients_VIF(self):
#eps = 1e-20
x = self._model.model.exog[:, 1:].copy()
inv_corr = np.linalg.inv(sp.corrcoef(x, rowvar=False))
diag = list(inv_corr.diagonal())
if self.include_constant:
diag = [np.nan] + diag
return pd.Series(diag, index=self._params_idx)
@property
def coefficients_tolerance(self):
return 1 / self.coefficients_VIF
@staticmethod
def _translate_from_patsy_notation(effect):
effect = effect\
.replace(':', ' * ')\
.replace('C(', '')\
.replace('T.', '')\
.replace('[', ' = "')\
.replace(']', '"')\
.replace(')', '')
return effect
def show_results(self, n_decimals):
"""
Show results of the analysis in a readable form.
Parameters
----------
n_decimals : int
Number of digits to round results when showing them
"""
phrase = 'method {}'
print('\nLINEAR REGRESSION SUMMARY')
print('------------------\n')
print('Model summary')
display(self.summary_r2().style\
.set_caption(phrase.format('.summary_r2()'))\
.set_precision(n_decimals))
print('------------------\n')
print('ANOVA')
display(self.summary_F().style\
.format(None, na_rep="")\
.set_caption(phrase.format('.summary_F()'))\
.set_precision(n_decimals))
print('------------------\n')
print('Coefficients')
display(self.summary().style\
.format(None, na_rep="")\
.set_caption(phrase.format('.summary()'))\
.set_precision(n_decimals))
def summary(self):
"""
Summary table with requested information related to regression coefficients.
Returns
-------
pd.DataFrame
A summary table
"""
statistics = [
self.coefficients,
self.coefficients_sterrors,
self.coefficients_beta,
self.coefficients_tvalues,
self.coefficients_pvalues
]
columns = [
'B',
'Std. Error',
'Beta',
't',
'p-value'
]
if self._show_ci:
statistics.append(self.coefficients_confidence_interval)
columns.extend(list(self.coefficients_confidence_interval.columns))
if self._show_col:
statistics.append(self.coefficients_tolerance)
statistics.append(self.coefficients_VIF)
columns.extend(['Tolerance', 'VIF'])
statistics = pd.concat(statistics, axis=1)
statistics.columns = columns
statistics.index = self._params_idx
return statistics
def summary_r2(self):
"""
Summary table with information related to coefficient of determination.
Returns
-------
pd.DataFrame
A summary table
"""
r = self.r2 ** 0.5
r2 = self.r2
r2_adj = self.r2_adjusted
statistics = [[r, r2, r2_adj]]
columns = [
'R',
'R Squared',
'Adj. R Squared'
]
statistics = pd.DataFrame(
statistics,
columns=columns,
index = ['']
)
return statistics
def summary_F(self):
"""
Summary table with information related to F-statistic.
Returns
-------
pd.DataFrame
A summary table
"""
results = [[self.ess, self.dof_model, self.ms_model, self.F, self.F_pvalue],
[self.rss, self.dof_resid, self.ms_resid, np.nan, np.nan],
[self.tss, self.dof_total, np.nan, np.nan, np.nan]]
results = pd.DataFrame(results,
columns = ['Sum of Squares', 'df', 'Mean Square', 'F', 'p-value'],
index = ['Regression', 'Residual', 'Total'])
return results
def _fit_backward(self):
y_train = pd.Series(self._model.model.endog.copy(),
name=self.dependent_variable,
index=self._observations_idx)
X_train = pd.DataFrame(self._model.model.exog, columns=self._model.model.exog_names,
index=self._observations_idx)
model = OLS(y_train, X_train, missing = 'drop')
results = model.fit()
max_pvalue = results.pvalues.drop('Intercept').max()
while max_pvalue > self.sig_level_removal:
x_to_drop = results.pvalues.drop('Intercept').idxmax()
X_train = X_train.drop(x_to_drop, axis = 1)
model = OLS(y_train, X_train, missing = 'drop')
results = model.fit()
max_pvalue = results.pvalues.drop('Intercept').max()
self._model = results
return
def _identify_variables_without_variation(self):
if self.include_constant:
mask = self._model.model.exog.var(axis=0)[1:] == 0
else:
mask = self._model.model.exog.var(axis=0) == 0
variables_included = [x for x in list(self._model.params.index) if x!='Intercept']
return list(np.array(variables_included)[mask])
def _remove_variables_without_variation(self):
X = pd.DataFrame(self._model.model.exog,
columns=self._model.model.exog_names,
index=self._observations_idx)
X = X.drop(self.variables_excluded, axis = 1)
return X
def save_independent_variables(
self,
data=None,
add_to_data=False
):
"""
Produce values of independent variable remained in a fitted model.
This option is useful if you don't create dummy variables or interaction effects manually
but want to use them in a further analysis. Only variables remained in a model are returned
(those that are shown in a summary table).
Parameters
----------
data : pd.DataFrame
Data for which independent variables are requested;
may be not specified if you want to save values for the same data that were used to fit a model
add_to_data : bool
Whether to merge new values with the given data.
Currently, this option returns data with a sorted index
Returns
-------
pd.DataFrame
Values of independent variables
"""
if data is None:
data = self._data.copy()
if self.include_constant:
result = self._model.model.exog[:, 1:].copy()
else:
result = self._model.model.exog.copy()
columns = [x for x in self.variables_included if x!='Constant']
result = pd.DataFrame(
result,
columns=columns,
index=self._observations_idx)
else:
aux_model = ols(self.formula, data).fit()
aux_data_idx = aux_model.fittedvalues.index
aux_data_cols = aux_model.model.exog_names
aux_data_cols = [LinearRegression._translate_from_patsy_notation(x)\
for x in aux_data_cols]
aux_data = pd.DataFrame(aux_model.model.exog,
index=aux_data_idx,
columns=aux_data_cols)
result = aux_data[self.variables_included]
if add_to_data:
result = pd.concat([data, result], axis=1, sort=False)
return result
def save_residuals(self,
unstandardized=True,
standardized=False,
studentized=False,
deleted=False,
studentized_deleted=False,
add_to_data=False):
"""
Produce values of various residuals.
Residuals are returned only for data used to fit a model.
Parameters
----------
unstandardized : bool
Whether to save unstandardized (raw) residuals
standardized : bool
Whether to save standardized (z-scores) residuals
studentized : bool
Whether to save studentized residuals
deleted : bool
Whether to save deleted residuals
studentized_deleted : bool
Whether to save studentized deleted residuals
add_to_data : bool
Whether to merge new values with data.
Currently, this option returns data with a sorted index
Returns
-------
pd.DataFrame
Requested residuals
"""
columns_to_show = [f'{k.capitalize().replace("ized", ".").replace("eted", ".").replace("_", " ")} res.' \
for k, v in vars().items() if v==True and k!='add_to_data']
infl = OLSInfluence(self._model)
result = []
res_unstand = infl.resid
res_unstand.name = 'Unstandard. res.'
res_stand = (res_unstand - res_unstand.mean()) / res_unstand.std()
res_stand.name = 'Standard. res.'
res_stud = infl.resid_studentized_internal
res_stud.name = 'Student. res.'
result.extend([
res_unstand,
res_stand,
res_stud])
if deleted:
res_del = infl.resid_press
res_del.name = 'Del. res.'
result.append(res_del)
if studentized_deleted:
res_stud_del = infl.resid_studentized_external
res_stud_del.name = 'Student. del. res.'
result.append(res_stud_del)
result = pd.concat(result, axis=1)
result = result[columns_to_show].copy()
if add_to_data:
result = pd.concat([self._data, result], axis=1)
return result
#following two methods are still in progress
@staticmethod
def _turn_all_rows_to_fancy(summary):
return summary.apply(lambda x: LinearRegression._turn_one_row_to_fancy(x), axis=1)
@staticmethod
def _turn_one_row_to_fancy(row):
coef = round(row['B'].item(), 3)
sterr = round(row['Std. Error'].item(), 3)
pval = row['p-value'].item()
if pval <= 0.01:
mark = '***'
elif pval <= 0.05:
mark = '**'
elif pval <= 0.1:
mark = '*'
else:
mark = ''
result = f'{coef}{mark} \n ({sterr})'
return result
class BinaryLogisticRegression:
"""
Class for binary logistic regression models based on the excellent statsmodels package.
Parameters
----------
method : 'enter' or 'backward'
Method for predictors selection
include_constant : bool
(CURRENTLY UNAVAILIABLE) Whether to include constant in the model
classification_cutoff : float
Minimum probability to assign a prediction value 1
sig_level_entry : float
(CURRENTLY UNAVAILIABLE) Max significance level to include predictor in the model
sig_level_removal : float
Min significance level to exclude predictor from the model
Attributes
----------
predictions : pd.Series
Predicted values
classification_table : pd.DataFrame
A classification table
precision_and_recall : pd.DataFrame
Table with precision, recall, and F1-score of the model
variables_excluded : list
Variables excluded because of zero variance
variables_included : list
Variables included in a model
N : int
Number of observations included in a model
r2_pseudo_macfadden : float
MacFadden's pseudo coefficient of determination
r2_pseudo_cox_snell : float
Cox&Snell's pseudo coefficient of determination
r2_pseudo_nagelkerke : float
Nagelkerke's pseudo coefficient of determination
loglikelihood : float
-2LL
coefficients : pd.Series
Regression coefficients
coefficients_sterrors : pd.Series
Standard errors of regression coefficients
coefficients_wald_statistics : pd.Series
Wald statistic of regression coefficients
coefficients_zvalues : pd.Series
z-statistic of regression coefficients
coefficients_pvalues : pd.Series
P-values of regression coefficients
coefficients_exp : pd.Series
e ** regression coefficients
"""
def __init__(
self,
method='enter',
include_constant=True,
classification_cutoff=0.5,
sig_level_entry=0.05,
sig_level_removal=0.05,
):
self.method = method.lower().strip()
self.include_constant = include_constant
self.classification_cutoff = classification_cutoff
self.sig_level_entry = sig_level_entry
self.sig_level_removal = sig_level_removal
def fit(
self,
data,
formula,
categorical_variables=None,
max_iterations=100,
show_results=True,
confidence_intervals=True,
use_patsy_notation=False,
n_decimals=3
):
"""
Fit model to the given data using formula.
Parameters
----------
data : pd.DataFrame
Data to fit a model
formula : str
Formula of a model specification, e.g. 'y ~ x1 + x2';
should be passed either in Patsy (statsmodels) notation
or using the following rules:
'*' for interaction of the variables,
':' for interaction & main effects,
i.e., 'y ~ x:z' equals to 'y ~ x + z + x*z' (unlike the Patsy notation).
If you use Patsy notation, please specify the parameter use_patsy_notation=True.
categorical_variables : list
List of names of the variables that should be considered categorical.
These variables would be automatically converted into sets of dummy variables.
If you want to use this option, please make sure that you don't have nested names of variables
(e.g. 'imdb' and 'imdb_rate' at the same time), otherwise this option results in an incorrect procedure.
max_iterations : int
Maximum iterations for convergence
show_results : bool
Whether to show results of analysis
confidence_intervals : bool
Whether to include coefficients' confidence intervals in the summary table
use_patsy_notation : bool
Turn this on if you use strictly Patsy's rules to define a formula.
See more: https://patsy.readthedocs.io/en/latest/quickstart.html
n_decimals : int
Number of digits to round results when showing them
Returns
-------
self
The current instance of the BinaryLogisticRegression class
"""
self._data = data.copy()
self.categorical_variables = categorical_variables
self._show_ci = confidence_intervals
self.max_iterations = max_iterations
if '=' in formula:
formula = formula.replace('=', '~')
if not use_patsy_notation:
formula = formula.replace('*', '^').replace(':', '*').replace('^', ':')
self.formula = formula
self.dependent_variable = self.formula.split('~')[0].strip()
dep_cats = get_categories(self._data[self.dependent_variable])
self._dep_cats = dep_cats
if len(dep_cats) != 2:
raise ValueError(f"""A dependent variable should have exactly 2 unique categories.
The provided variable has {len(dep_cats)}.""")
self._mapper = {dep_cats[0]: 0, dep_cats[1]: 1}
self._inv_mapper = {0: dep_cats[0], 1: dep_cats[1]}
if not is_numeric_dtype(self._data[self.dependent_variable]):
self._data[self.dependent_variable] = self._data[self.dependent_variable].map(self._mapper).astype(int)
#won't work correctly if some variables have nested names (e.g. kinopoisk_rate and kinopoisk_rate_count)
if categorical_variables is not None:
if not isinstance(categorical_variables, list):
raise ValueError(f"""Categorical variables should be passed as list.
Type {type(categorical_variables)} was passed instead.""")
else:
for variable in categorical_variables:
formula = formula.replace(variable, f'C({variable})')
self._optimizer = 'newton'
try:
self._model = logit(formula=formula, data=self._data).fit(
maxiter=self.max_iterations,
warn_convergence=False,
disp=False,
method=self._optimizer,
full_output=True
)
except np.linalg.LinAlgError:
self._optimizer = 'bfgs'
self._model = logit(formula=formula, data=self._data).fit(
maxiter=self.max_iterations,
warn_convergence=False,
disp=False,
method=self._optimizer,
full_output=True
)
self._model_params = {
'maxiter': self.max_iterations,
'warn_convergence': False,
'disp': False,
'method': self._optimizer,
'full_output': True
}
self._observations_idx = list(self._model.fittedvalues.index)
self.variables_excluded = self._identify_variables_without_variation()
if len(self.variables_excluded) > 0:
y = pd.Series(self._model.model.endog.copy(),
index=self._observations_idx,
name=self.dependent_variable)
X = self._remove_variables_without_variation()
self._model = Logit(y, X, missing = 'drop').fit(**self._model_params)
self.variables_excluded = [BinaryLogisticRegression._translate_from_patsy_notation(x) for x in self.variables_excluded]
if self.method == 'backward':
self._fit_backward()
self._get_statistics_from_model()
self.predictions = self.predict()
self.classification_table = self.get_classification_table()
self.precision_and_recall = self.get_precision_and_recall()
if show_results:
self.show_results(n_decimals)
if len(self.variables_excluded) > 0:
print('------------------\n')
print(f"Following variables were excluded due to zero variance: {'; '.join(self.variables_excluded)}")
return self
def _fit_backward(self):
y_train = pd.Series(self._model.model.endog.copy(),
name=self.dependent_variable,
index=self._observations_idx)
X_train = pd.DataFrame(self._model.model.exog, columns=self._model.model.exog_names,
index=self._observations_idx)
model = Logit(y_train, X_train, missing = 'drop')
results = model.fit(**self._model_params)
max_pvalue = results.pvalues.drop('Intercept').max()
while max_pvalue > self.sig_level_removal:
x_to_drop = results.pvalues.drop('Intercept').idxmax()
X_train = X_train.drop(x_to_drop, axis = 1)
model = Logit(y_train, X_train, missing = 'drop')
results = model.fit(**self._model_params)
max_pvalue = results.pvalues.drop('Intercept').max()
self._model = results
return
def _identify_variables_without_variation(self):
if self.include_constant:
mask = self._model.model.exog.var(axis=0)[1:] == 0
else:
mask = self._model.model.exog.var(axis=0) == 0
variables_included = [x for x in list(self._model.params.index) if x!='Intercept']
return list(np.array(variables_included)[mask])
def _remove_variables_without_variation(self):
X = pd.DataFrame(self._model.model.exog,
columns=self._model.model.exog_names,
index=self._observations_idx)
X = X.drop(self.variables_excluded, axis = 1)
return X
@staticmethod
def _translate_from_patsy_notation(effect):
effect = effect\
.replace(':', ' * ')\
.replace('C(', '')\
.replace('T.', '')\
.replace('[', ' = "')\
.replace(']', '"')\
.replace(')', '')
return effect
def _get_statistics_from_model(self):
self.N = self._model.nobs
self.r2_pseudo_macfadden = self._model.prsquared
self.r2_pseudo_cox_snell = 1 - np.exp(-self._model.llr/self.N)
self.r2_pseudo_nagelkerke = self.r2_pseudo_cox_snell / (1 - np.exp(-(-2*self._model.llnull)/self.N))
self.loglikelihood = -2 * self._model.llf
self.coefficients = self._model.params.copy()
self.coefficients_sterrors = self._model.bse.copy()
self.coefficients_wald_statistics = self._model.tvalues.copy() ** 2
self.coefficients_zvalues = self._model.tvalues.copy()
self.coefficients_pvalues = self._model.pvalues.copy()
self.coefficients_exp = self.coefficients.apply(np.exp)
variables_included = [x for x in list(self.coefficients.index) if x!='Intercept']
self._variables_included_patsy = variables_included.copy()
variables_included = [BinaryLogisticRegression._translate_from_patsy_notation(x) for x in variables_included]
self.variables_included = variables_included
if self.include_constant:
self._params_idx = ['Constant'] + variables_included
else:
self._params_idx = variables_included.copy()
for stats in [self.coefficients,
self.coefficients_pvalues,
self.coefficients_sterrors,
self.coefficients_zvalues,
self.coefficients_wald_statistics,
self.coefficients_exp]:
stats.index = self._params_idx
return
def summary(self):
"""
Summary table with requested information related to regression coefficients.
Returns
-------
pd.DataFrame
A summary table
"""
statistics = [
self.coefficients,
self.coefficients_sterrors,
self.coefficients_wald_statistics,
self.coefficients_pvalues,
self.coefficients_exp
]
columns = [
'B',
'Std. Error',
'Wald',
'p-value',
'Exp(B)'
]
if self._show_ci:
statistics.append(self.coefficients_confidence_interval)
columns.extend(list(self.coefficients_confidence_interval.columns))
statistics = pd.concat(statistics, axis=1)
statistics.columns = columns
statistics.index = self._params_idx
return statistics
@property
def coefficients_confidence_interval(self):
ci = self._model.conf_int()
ci.index = self._params_idx
ci.columns = [f'LB CI (95%)',
f'UB CI (95%)']
return ci
def show_results(self, n_decimals):
"""
Show results of the analysis in a readable form.
Parameters
----------
n_decimals : int
Number of digits to round results when showing them
"""
phrase = 'method {}'
print('\nLOGISTIC REGRESSION SUMMARY\n')
if self._model.mle_retvals['converged']==True:
print('Estimation was converged successfully.')
else:
print('Estimation was NOT converged successfully.')
print('Please enlarge the number of iterations.')
print('------------------\n')
print('Dependent variable encoding')
display(self.get_dependent_variable_codes().style\
.set_caption(phrase.format('.get_dependent_variable_codes()')))
print('------------------\n')
print('Model summary')
display(self.summary_r2().style\
.set_caption(phrase.format('.summary_r2()'))\
.set_precision(n_decimals))
print('------------------\n')
print('Classification table')
display(self.get_classification_table().style\
.set_caption(phrase.format('.get_classification_table()'))\
.set_precision(n_decimals))
print('------------------\n')
print('Precision and recall')
display(self.get_precision_and_recall().style\
.set_caption(phrase.format('.get_precision_and_recall()'))\
.set_precision(n_decimals))
print('------------------\n')
print('Coefficients')
display(self.summary().style\
.format(None, na_rep="")\
.set_caption(phrase.format('.summary()'))\
.set_precision(n_decimals))
def summary_r2(self):
"""
Summary table with information related to pseudo coefficients of determination.
Returns
-------
pd.DataFrame
A summary table
"""
ll = self.loglikelihood
mf = self.r2_pseudo_macfadden
cs = self.r2_pseudo_cox_snell
nk = self.r2_pseudo_nagelkerke
statistics = [[ll, mf, cs, nk]]
columns = [
'-2 Log likelihood',
"MacFadden's Pseudo R2",
"Cox&Snell's Pseudo R2",
"Nagelkerke's Pseudo R2",
]
statistics = pd.DataFrame(
statistics,
columns=columns,
index = ['']
)
return statistics
def get_dependent_variable_codes(self):
"""
Get information on how categories of a dependent variable were encoded.
Returns
-------
pd.DataFrame
A table explaining encodings
"""
mapper = self._mapper
result = pd.DataFrame(
[list(mapper.items())[0], list(mapper.items())[1]],
columns = ['Original value', 'Model value'],
index = ['', ' ']
)
return result
def get_classification_table(self):
"""
Get a classification table.
Returns
-------
pd.DataFrame
A classification table
"""
all_categories = self._dep_cats
classification = pd.DataFrame(
self._model.pred_table(),
columns=self._dep_cats,
index=self._dep_cats
)
classification.index.name = 'Observed'
classification.columns.name = 'Predicted'
classification['All'] = classification.sum(axis=1)
classification.loc['All'] = classification.sum()
n = classification.loc['All', 'All']
for category in all_categories:
classification.loc[category, 'All'] = classification.loc[category, category] / classification.loc[category, 'All'] * 100
classification.loc['All', category] = classification.loc['All', category] / n * 100
classification.loc['All', 'All'] = np.diagonal(classification.loc[all_categories, all_categories]).sum() / n * 100
classification.index = all_categories + ['Percent predicted']
classification.index.name = 'Observed'
classification.columns = all_categories + ['Percent correct']
classification.columns.name = 'Predicted'
return classification
def get_precision_and_recall(self):
"""
Estimate precision, recall, and F-score for all the categories.
Returns
-------
pd.DataFrame
A table with estimated metrics
"""
preds = self.classification_table.iloc[:-1, :-1]
results = []
categories = list(preds.index)
for current_category in categories:
idx = [cat for cat in categories if cat!=current_category]
tp = preds.loc[current_category, current_category]
fp = preds.loc[idx, current_category].sum()
fn = preds.loc[current_category, idx].sum()
if fp == 0:
precision = 0
else:
precision = tp / (tp + fp)
recall = tp / (tp + fn)
if precision + recall != 0:
f1 = 2 * (precision * recall) / (precision + recall)
else:
f1 = 0
results.append([precision, recall, f1])
results = pd.DataFrame(results,
index=categories,
columns = ['Precision', 'Recall', 'F score'])
results.loc['Mean'] = results.mean()
return results
def predict(
self,
data=None,
group_membership=True,
probability=False,
logit=False,
add_to_data=False,
):
"""
Predict values of a dependent variable using the fitted model.
Parameters
----------
data : pd.DataFrame
Data for prediction;
may be not specified if you want to predict values for the same data that were used to fit a model
group_membership : bool
Whether to predict observation's membership
to categories of a dependent variable
probability : bool
Whether to predict exact probability
logit : bool
Whether to predict a logit value
add_to_data : bool
Whether to merge predictions with the given data.
Currently, this option returns data with a sorted index
Returns
-------
pd.DataFrame
Predictions
"""
name_memb = f'{self.dependent_variable} (predicted)'
name_prob = f'{self.dependent_variable} (predicted prob.)'
name_logit = f'{self.dependent_variable} (predicted logit)'
all_columns = [name_memb, name_prob, name_logit]
columns_to_show = []
if group_membership:
columns_to_show.append(name_memb)
if probability:
columns_to_show.append(name_prob)
if logit:
columns_to_show.append(name_logit)
cutoff = self.classification_cutoff
if data is None:
data_init = self._data.copy()
logit = self._model.fittedvalues
prob = logit.apply(lambda x: np.exp(x) / (1 + np.exp(x)))
memb = prob.apply(lambda x: 1 if x >= cutoff else 0).map(self._inv_mapper)
result = pd.DataFrame(index=self._observations_idx, columns=all_columns)
result[name_memb] = memb
result[name_prob] = prob
result[name_logit] = logit
result = result[columns_to_show]
if add_to_data:
return pd.concat([data_init, result], axis=1)
else:
return result
else:
aux_model = logit(self.formula, data).fit(**self._model_params)
aux_data_idx = aux_model.fittedvalues.index
aux_data_cols = aux_model.model.exog_names
aux_data_cols = [BinaryLogisticRegression._translate_from_patsy_notation(x)\
for x in aux_data_cols]
aux_data = pd.DataFrame(aux_model.model.exog,
index=aux_data_idx,
columns=aux_data_cols)
aux_X = add_constant(aux_data[self.variables_included].copy())
aux_y = aux_model.model.endog.copy()
aux_model = Logit(aux_y, aux_X, missing='drop').fit(**self._model_params)
logit = aux_model.fittedvalues
prob = logit.apply(lambda x: np.exp(x) / (1 + np.exp(x)))
memb = prob.apply(lambda x: 1 if x >= cutoff else 0).map(self._inv_mapper)
result = pd.DataFrame(index=aux_data_idx, columns=all_columns)
result[name_memb] = memb
result[name_prob] = prob
result[name_logit] = logit
result = result[columns_to_show]
if add_to_data:
return | pd.concat([data, result], axis=1) | pandas.concat |
import numpy as np
import matplotlib.pyplot as plt
import random
import pandas as pd
import pybitup
import heat_conduction
# Create fake data in a tsv (tab-separated values) file
param = [0.95, 0.95, 2.37, 21.29, -18.41, 0.00191]
x = np.arange(10.0, 70.0, 4.0)
model_def = heat_conduction.HeatConduction()
model_def.param = param
model_def.x = x
std_y=0.2604
array_std_y = np.ones(len(x))
array_std_y *= std_y
# Generate experimental data from deterministic simulation and random error from std
#y = model_def.compute_temperature()
#num_data = len(x)
#rn_data=np.zeros((1, num_data))
#for i in range(0, num_data):
# rn_data[0,i]=random.gauss(0, std_y)
#y += rn_data[0,:]
# Experimental data provided (see Smith. Tab. 3.2 p. 57, aluminium rod)
y = [96.14, 80.12, 67.66, 57.96, 50.90, 44.84, 39.75, 36.16, 33.31, 31.15, 29.28, 27.88, 27.18, 26.40, 25.86]
df = | pd.DataFrame({'x': x, 'T': y, 'std_T': array_std_y}) | pandas.DataFrame |
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from tools import utils
import datetime
import io
import time
import shutil
import boto
import botocore.config
import xarray as xr
import numpy as np
import pandas as pd
import scipy.misc
import psutil
from joblib import delayed, Parallel
import torch
from torch.utils.data import Dataset, DataLoader
import tools
## Interact with NOAA GOES ABI dataset via S3 and local paths
class NOAAGOESS3(object):
'<Key: noaa-goes16,ABI-L1b-RadC/2000/001/12/OR_ABI-L1b-RadC-M3C01_G16_s20000011200000_e20000011200000_c20170671748180.nc>'
def __init__(self, product='ABI-L1b-RadM', channels=range(1,17),
save_directory='./GOES16',
skip_connection=False):
self.bucket_name = 'noaa-goes16'
self.product = product
self.channels = channels
self.save_directory = os.path.join(save_directory, product)
if not skip_connection:
self._connect_to_s3()
def _connect_to_s3(self):
config = botocore.config.Config(connect_timeout=5, retries={'max_attempts': 1})
self.conn = boto.connect_s3() #host='s3.amazonaws.com', config=config)
self.goes_bucket = self.conn.get_bucket(self.bucket_name)
def year_day_pairs(self):
'''
Gets all year and day pairs in S3 for the given product
Return:
list of pairs
'''
days = []
for key_year in self.goes_bucket.list(self.product+"/", "/"):
y = int(key_year.name.split('/')[1])
if y == 2000:
continue
for key_day in self.goes_bucket.list(key_year.name, "/"):
d = int(key_day.name.split('/')[2])
days += [(y, d)]
return days
def day_keys(self, year, day, hours=range(12,24)):
keybase = '%(product)s/%(year)04i/%(day)03i/' % dict(product=self.product,
year=year, day=day)
data = []
for key_hour in self.goes_bucket.list(keybase, "/"):
hour = int(key_hour.name.split('/')[3])
if hour not in hours:
continue
for key_nc in self.goes_bucket.list(key_hour.name, '/'):
fname = key_nc.name.split('/')[4]
info = fname.split('-')[3]
c, g, t, _, _ = info.split("_")
spatial = fname.split('-')[2]
c = int(c[3:])
if c not in self.channels:
continue
minute = int(t[10:12])
second = int(t[12:15])
data.append(dict(channel=c, year=year, day=day, hour=hour,
minute=minute, second=second, spatial=spatial,
keyname=key_nc.name))
#if len(data) > 100:
# break
return pd.DataFrame(data)
def _open_file(self, f, normalize=True):
try:
ds = xr.open_dataset(f)
except IOError:
os.remove(f)
return None
if normalize:
mn = ds['min_radiance_value_of_valid_pixels'].values
mx = ds['max_radiance_value_of_valid_pixels'].values
ds['Rad'] = (ds['Rad'] - mn) / (mx - mn)
return ds
def download_from_s3(self, keyname, directory):
if not os.path.exists(directory):
os.makedirs(directory)
k = boto.s3.key.Key(self.goes_bucket)
k.key = keyname
data_file = os.path.join(directory, os.path.basename(keyname))
if os.path.exists(data_file):
pass
elif k.exists():
print("writing file to {}".format(data_file))
k.get_contents_to_filename(data_file)
else:
data_file = None
return data_file
def read_nc_from_s3(self, keyname, normalize=True):
data_file = self.download_from_s3(keyname, self.save_directory)
if data_file is not None:
ds = self._open_file(data_file, normalize=normalize)
if ds is None:
self.read_nc_from_s3(keyname, normalize=normalize)
else:
ds = None
data_file = None
return ds, data_file
def download_day(self, year, day, hours=range(12, 25)):
'''
Downloads all files for a given year and dayofyear for the defined channels
'''
keys_df = self.day_keys(year, day, hours=hours)
for i, row in keys_df.iterrows():
save_dir= os.path.join(self.save_directory,
'%04i/%03i/%02i/' % (year, day, row.hour))
data_file = self.download_from_s3(row.keyname, save_dir)
def local_files(self, year=None, dayofyear=None):
tmp_path = os.path.join(os.path.dirname(__file__), '.cache')
filelist_file = tmp_path + '/localfilelist_{}_{}_{}.pkl'.format(self.product, year, dayofyear)
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
if False: #os.path.exists(filelist_file):
data = pd.read_pickle(filelist_file)
else:
data = []
base_dir = self.save_directory
if year is not None:
base_dir = os.path.join(base_dir, '%04i' % year)
if dayofyear is not None:
base_dir = os.path.join(base_dir, '%03i' % dayofyear)
#for f in os.listdir(self.save_directory):
if not os.path.exists(base_dir):
return pd.DataFrame()
for directory, folders, files in os.walk(base_dir):
for f in files:
if (f[-3:] == '.nc') and ("L1b" in f):
meta = get_filename_metadata(f)
meta['file'] = os.path.join(directory, f)
data.append(meta)
data = | pd.DataFrame(data) | pandas.DataFrame |
import string, os, itertools, pickle
from time import time
import pandas as pd
import numpy as np
from scipy import interp
from sklearn.preprocessing import LabelBinarizer, label_binarize
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import precision_recall_fscore_support, roc_curve, auc, precision_recall_curve, \
average_precision_score, confusion_matrix, classification_report
from imblearn.pipeline import Pipeline
import matplotlib.pyplot as plt
class TwoLabelBinarizer(LabelBinarizer):
"""my label binarizer so that it would give same result in binary cases as in multiclass
default binarizer turns out funny format when dealing with binary classification problem"""
def transform(self, y):
Y = super().transform(y)
if self.y_type_ == 'binary':
return np.hstack((Y, 1 - Y))
else:
return Y
class ClassifierCv(object):
"""class for general classifier"""
def __init__(self, data_labels, data_text):
"""initalizes classifier object
-INPUT:
-data_labels: series, labels for classes
-data_text: series, texts for classification
-OUTPUT:
-initialized classifier object"""
self.text = data_text.reset_index(drop=True)
self.labels = data_labels.reset_index(drop=True)
if data_labels is not None: # should be none only if unpickle
# turn into binary labels
self.labels_unique = [label for label in self.labels.unique()]
# for some reason in two classes label binareizer gives different output
if len(self.labels_unique) == 2:
my_label_binarizer = TwoLabelBinarizer()
self.labels_bin = my_label_binarizer.fit_transform(self.labels)
else:
self.labels_bin = label_binarize(self.labels, classes=self.labels_unique)
else:
self.labels_unique = None
self.labels_bin = None
# metrics (recall, prec, f1)
self.metrics_per_class = None
self.metrics_average = None
# cv labels
self.cv_labels_real = []
self.cv_labels_predicted = []
# roc auc
self.fpr = None
self.tpr = None
self.roc_auc = None
# precision-recall curve
self.recall = None
self.precision = None
self.average_precision = None
# needed for precison recall, keeps cv results
self.y_real = None
self.y_proba = None
# grid search
self.grid_search = None
# time
self.times_cv = []
self.time_train = []
def text_process(self, mess):
"""
Default text cleaning. Takes in a string of text, then performs the following:
1. Remove all punctuation
2. Remove all stopwords
3. Returns a list of the cleaned text
"""
# Check characters to see if they are in punctuation
nopunc = [char for char in mess if char not in string.punctuation]
# Join the characters again to form the string.
nopunc = ''.join(nopunc)
# Now just remove any stopwords
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
def prepare_pipeline(self, custom_pipeline=None):
"""prepares pipeline for model
- INPUT:
- custom_pipeline: Pipeline, if None, use default pipeline, else input list for sklearn Pipeline
-OUTPUT:
- initialises sklearn pipeline"""
if custom_pipeline is None:
self.text_clf = Pipeline([('vect', CountVectorizer(analyzer=self.text_process)),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='hinge', penalty='l2',
alpha=1e-3, random_state=42,
max_iter=5, tol=None)),
])
else:
self.text_clf = Pipeline(custom_pipeline)
def perform_random_search(self, param_grid, scoring='f1_weighted', num_cv=3, n_jobs=1, **kwargs):
"""perform grid search to find best parameters
-INPUT:
- param_grid: dict or list of dictionaries, Dictionary with parameters names (string) as keys and lists
of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned
by each dictionary in the list are explored. This enables searching over any sequence of parameter settings.
- scoring: string from http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
- num_cv: int, number of cross-validation iterations
- n_jobs: Number of jobs to run in parallel.
-OUTPUT:
- fitted gridsearch"""
self.grid_search = GridSearchCV(self.text_clf, cv=num_cv, scoring=scoring, n_jobs=n_jobs,
param_grid=param_grid, **kwargs)
self.grid_search.fit(self.text, self.labels)
def print_top_random_search(self, num_top=3):
"""print grid search results
-INPUT:
-num_top: int, number of top search results to print
-OUTPUT:
- printed top results"""
results = self.grid_search.cv_results_
for i in range(1, num_top + 1):
candidates = pd.np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
def get_top_random_search_parameters(self, num):
"""get parameters of top grid search
-INPUT:
- num: int, number of nth top rank parameters
-OUTPUT:
- dict of nth top parameters"""
results = self.grid_search.cv_results_
candidates = pd.np.flatnonzero(results['rank_test_score'] == num)
for candidate in candidates:
return results['params'][candidate]
def prepare_cv(self, n_iter, shuffle=True, random_state=1):
"""initialises stratified cross-validaton
INPUT:
- n_iter: int, number of cross validation iterations
OUTPUT:
- prepares k-fold cross validation object"""
self.kf = StratifiedKFold(n_splits=n_iter, shuffle=shuffle, random_state=random_state)
self.unique_labels = list(self.labels.unique())
def init_metrics_(self):
"""
initialise metrics, remove previous training metrics
"""
self.metrics_per_class = []
self.metrics_average = []
self.fpr = dict()
self.tpr = dict()
self.roc_auc = dict()
self.precision = dict()
self.recall = dict()
self.average_precision = dict()
self.y_proba = dict()
self.y_real = dict()
self.cv_labels_predicted = []
self.cv_labels_real = []
self.times_cv = []
self.time_train = []
for label_bin in range(len(self.labels_unique)):
self.fpr[label_bin] = []
self.tpr[label_bin] = []
self.roc_auc[label_bin] = []
self.precision[label_bin] = []
self.recall[label_bin] = []
self.average_precision[label_bin] = []
self.y_real[label_bin] = []
self.y_proba[label_bin] = []
self.fpr["micro"] = []
self.tpr["micro"] = []
self.roc_auc["micro"] = []
self.precision["micro"] = []
self.recall["micro"] = []
self.average_precision["micro"] = []
self.y_real["micro"] = []
self.y_proba["micro"] = []
def calc_store_rocauc_precrec_(self, classifier_rocauc, proba_method, train_ids, test_ids):
"""calculate and store ROC AUC and precision recall curve metrics
-INPUT:
-classifier_roc_auc: sklearn OneVsRest classifier
-proba_method: string, classifier method name for predicting label probability
-train_ids: list of ids of samples used for training
-test_ids: list of ids of samples used for testing
-OUTPUT:
-stored metrics for ROC AUC and precision recall curve
"""
y_score = None
# roc auc stuff
# some classifiers have method decision function, others predict proba to get scores
if proba_method == "decision_function":
y_score = classifier_rocauc.fit(self.text[train_ids], self.labels_bin[train_ids]).decision_function(
self.text[test_ids])
elif proba_method == "predict_proba":
y_score = classifier_rocauc.fit(self.text[train_ids], self.labels_bin[train_ids]).predict_proba(
list(self.text[test_ids]))
if y_score is None:
return
for i in range(len(self.unique_labels)):
fpr_temp, tpr_temp, _ = roc_curve(self.labels_bin[test_ids][:, i], y_score[:, i])
self.fpr[i].append(fpr_temp)
self.tpr[i].append(tpr_temp)
self.roc_auc[i].append(auc(fpr_temp, tpr_temp))
# precison -recall metrics
precision_temp, recall_temp, _ = precision_recall_curve(self.labels_bin[test_ids][:, i],
y_score[:, i])
self.precision[i].append(precision_temp)
self.recall[i].append(recall_temp)
self.average_precision[i].append(average_precision_score(self.labels_bin[test_ids][:, i],
y_score[:, i]))
self.y_real[i].append(self.labels_bin[test_ids][:, i])
self.y_proba[i].append(y_score[:, i])
# Compute micro-average ROC curve and ROC area
fpr_micro_temp, tpr_micro_temp, _ = roc_curve(self.labels_bin[test_ids].ravel(), y_score.ravel())
self.fpr["micro"].append(fpr_micro_temp)
self.tpr["micro"].append(tpr_micro_temp)
self.roc_auc["micro"].append(auc(fpr_micro_temp, tpr_micro_temp))
# precision recall. A "micro-average": quantifying score on all classes jointly
prec_micro_temp, recall_micro_temp, _ = precision_recall_curve(self.labels_bin[test_ids].ravel(),
y_score.ravel())
self.precision["micro"].append(prec_micro_temp)
self.recall["micro"].append(recall_micro_temp)
self.average_precision["micro"] = average_precision_score(self.labels_bin[test_ids], y_score,
average="micro")
self.y_real["micro"].append(self.labels_bin[test_ids].ravel())
self.y_proba["micro"].append(y_score.ravel())
def get_classifier_proba_method_(self, classifier):
"""get label probability method of classifier. Some mehtods don't support predict_proba
-INPUT:
-classifier: sklearn classifier, which probability calculation method is to be detected
-OUTPUT:
-string with method name
"""
proba_method = None
if callable(getattr(classifier, "decision_function", None)):
proba_method = "decision_function"
elif callable(getattr(classifier, "predict_proba", None)):
proba_method = "predict_proba"
return proba_method
def train(self, roc_auc=True):
"""train model, save metrics
-INPUT:
- roc_auc: boolean, should roc_auc (includeing precision -recall plot) metrics be saved
_OUTPUT:
- trained model with metrics"""
self.init_metrics_()
classifier_rocauc = OneVsRestClassifier(self.text_clf)
# check if classifier has predict_proba or decison_function method
proba_method = self.get_classifier_proba_method_(classifier_rocauc)
for train, test in self.kf.split(self.text, self.labels):
t0 = time()
self.text_clf.fit(self.text[train], self.labels[train])
time_cv = time() - t0
self.times_cv.append(time_cv)
labels_predict = self.text_clf.predict(list(self.text[test]))
self.cv_labels_predicted.append(labels_predict)
self.cv_labels_real.append(self.labels[test])
labels_predict_label = labels_predict
# per class metric, not average
self.metrics_per_class.append(precision_recall_fscore_support(self.labels[test],
labels_predict_label,
average=None,
labels=self.unique_labels))
self.metrics_average.append(precision_recall_fscore_support(self.labels[test],
labels_predict_label,
average='weighted',
labels=self.unique_labels))
if roc_auc:
self.calc_store_rocauc_precrec_(classifier_rocauc, proba_method, train, test)
self.metrics_df = pd.DataFrame(self.metrics_per_class)
self.metrics_average_df = pd.DataFrame(self.metrics_average)
# finally make model with all training data
t0 = time()
self.text_clf.fit(self.text, self.labels)
time_train = time() - t0
self.time_train.append(time_train)
def predict(self, text_list, proba=False):
""""predict labels based on trained classifier
- INPUT:
- text_list: list of texts which label will be predicted
- proba: boolean, if true probability will be predicted
- OUTPUT:
- dataframe labels (with probas if proba True)
"""
if proba:
probas = []
if callable(getattr(self.text_clf, "predict_proba", None)):
probas = self.text_clf.predict_proba(text_list)
if callable(getattr(self.text_clf, "decision_function", None)):
probas = self.text_clf.decision_function(text_list)
return | pd.DataFrame(probas, columns=self.unique_labels) | pandas.DataFrame |
import pandas as pd
import numpy as np
import cv2
import sys
import os
from keras.models import Sequential
from keras.callbacks import Callback, ModelCheckpoint
from keras.layers import (Flatten, Dense, Convolution2D, MaxPool2D,
BatchNormalization, Dropout, Activation, Cropping2D, Lambda)
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.preprocessing.image import ImageDataGenerator
from keras.backend import tf as ktf
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from scipy.misc import imread
import scipy
import matplotlib
import matplotlib.pyplot as plt
import argparse
import json
import random
matplotlib.style.use('ggplot')
########################### Utilities #########################################
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
###
############################# VISUALIZATION ####################################
def show_data_distribution(df):
binwidth = 0.025
# histogram before image augmentation
plt.hist(df.steering_angle, bins=np.arange(min(df.steering_angle), max(df.steering_angle) + binwidth, binwidth))
plt.title('Number of images per steering angle')
plt.xlabel('Steering Angle')
plt.ylabel('# Frames')
plt.show()
############################### NETWORK ########################################
def nvidia_end_to_end(shape, l2_regularization_scale):
print("Training Nvidia End To End of input shape %s" % str(shape))
height = shape[0]
crop_factor = 0.2 # Top 40% to be removed
crop_size = (int)(crop_factor * height)
model = Sequential()
model.add(Cropping2D(cropping=((crop_size, 0), (0, 0)), input_shape=shape))
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
model.add(BatchNormalization(axis=1, input_shape=shape))
model.add(Convolution2D(16, (3, 3), padding='valid', strides=(2, 2), activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Convolution2D(24, (3, 3), padding='valid', strides=(1, 2), activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Convolution2D(36, (3, 3), padding='valid', activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Convolution2D(48, (2, 2), padding='valid', activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Convolution2D(48, (2, 2), padding='valid', activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Flatten())
model.add(Dense(512,
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Dropout(.5))
model.add(Activation('elu'))
model.add(Dense(10,
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Activation('elu'))
model.add(Dense(1,
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.summary()
adam = Adam(lr=0.0001)
model.compile(loss='mse', optimizer=adam)
return model
################################# Dataset Manipulation Functions ##############################
def flip_image(img):
fimg = np.fliplr(img)
return fimg
def read_image(filename):
img = imread(filename).astype(np.float32)
img = scipy.misc.imresize(img, 50)
return img
def change_brightness(img):
change_pct = int(random.uniform(0, 100))
mask = (255 - img) < change_pct
img = np.where((255 - img) < change_pct, 255, img + change_pct)
return img
def read_csv(filename, cols):
print("Reading Training file: %s" % filename)
return pd.read_csv(filename, names=cols)
def drop_zero_value_steering_angle_rows(df, drop_to):
"""
df: The dataframe to drop rows from
col_name: The column to check from for steering_angle
drop_to: How many rows to drop to
"""
# print("Total rows: %s" % len(df))
# indices = df[df[col_name] == 0.0].index
# total_existing = indices.shape[0]
# print("Total Zero Value rows: %s" % total_existing)
# print("Dropping %s rows from df" % (total_existing - drop_to))
# remove_indices = np.random.choice(indices, size=total_existing - drop_to)
# new_df = df.drop(remove_indices)
# indices = new_df[new_df[col_name] == 0.0].index
# print("Remaining zero value %s" % len(indices))
#
# print("Total rows: %s" % len(new_df))
# print("Dropped %s rows" % (total_existing - drop_to))
# assert(len(df) - len(new_df) == (total_existing - drop_to))
# return new_df
df_with_zero = df[df.steering_angle == 0]
df_without_zero = df[df.steering_angle != 0]
df_with_zero = df_with_zero.sample(n=drop_to)
new_df = pd.concat([df_with_zero, df_without_zero])
return new_df
def align_steering_angles_data(df):
"""
Given a dataframe drop the 0 value steering angles to bring it at par
"""
new_df = drop_zero_value_steering_angle_rows(df, 600)
return new_df
############################# Data Reading Routines #################################
def read_training_data(track):
cols = ['center_image', 'left_image', 'right_image', 'steering_angle', 'throttle', 'brake', 'speed']
data_dirs = [entry.path for entry in os.scandir('data') if entry.is_dir()]
dfs = []
for ddir in data_dirs:
# Ignore the recovery tracks since they will be loaded later
if "recovery" not in ddir:
if track in ddir:
dfs.append(read_csv(ddir + '/driving_log.csv', cols))
elif track == "both":
dfs.append(read_csv(ddir + '/driving_log.csv', cols))
df = pd.concat(dfs)
return df
def read_sample_training(df):
"""
df: Original DF from our training data which is to be augmented
"""
cols = ['center_image', 'left_image', 'right_image', 'steering_angle', 'throttle', 'brake', 'speed']
sample_df = read_csv('sample_training_data/driving_log.csv', cols)
df = pd.concat([df, sample_df])
return df
def preprocess(img):
return img
def augment_image(img, technique):
if technique == "flip":
return flip_image(img)
elif technique == "brightness":
return change_brightness(img)
assert("No Valid technique passed for image augmentation")
def load_data(df):
all_samples = []
measurements = []
shape = None
total_images = len(df)
index = 0
for i, row in df.iterrows():
print_progress_bar(index, total_images)
index += 1
center_image = preprocess(read_image(row[0]))
all_samples.append(center_image)
measurements.append(float(row[3]))
left_image = preprocess(read_image(row[1]))
all_samples.append(left_image)
measurements.append(float(row[3]) + (0.25))
right_image = preprocess(read_image(row[2]))
all_samples.append(right_image)
measurements.append(float(row[3]) - (0.25))
shape = center_image.shape
# Add an image for the flipped version of the center image
flipped_center_image = flip_image(center_image)
all_samples.append(flipped_center_image)
measurements.append(-float(row[3]))
return np.array(all_samples), np.array(measurements), shape
# def setup_probabilistic_distribution(df):
# binwidth = 0.025
# num_bins = int((max(df.steering_angle) - min(df.steering_angle)) / binwidth)
# # histogram before image augmentation
# counts, bins = np.histogram(df['steering_angle'])
# total = len(df.index)
def rearrange_and_augment_dataframe(df, shuffle_data):
"""
Rearrange the dataframe to linearize the steering angle images and also add
a column to indicate whether augmentation is required or not and what kind of
augmentation is required.
"""
center_df = pd.DataFrame()
left_df = pd.DataFrame()
right_df = pd.DataFrame()
flipped_center = pd.DataFrame()
center_df['image'] = df['center_image']
flipped_center['image'] = df['center_image']
left_df['image'] = df['left_image']
right_df['image'] = df['right_image']
center_df['steering_angle'] = df['steering_angle']
left_df['steering_angle'] = df['steering_angle'] + 0.25
right_df['steering_angle'] = df['steering_angle'] - 0.25
flipped_center['steering_angle'] = -1.0 * df['steering_angle']
# Set the dataframe columns for augmentation to false for some
center_df['augmentation'] = False
left_df['augmentation'] = False
right_df['augmentation'] = False
flipped_center['augmentation'] = True
# Set the augmentation techniques we need
center_df['techniques'] = ""
left_df['techniques'] = ""
right_df['techniques'] = ""
flipped_center['techniques'] = "flip"
# Change the brightness for images with different steering angles and add them
brightness_df = center_df.loc[(center_df.steering_angle < -0.025) | (center_df.steering_angle > 0.025)]
BRIGHTNESS_AUG_FACTOR = 20
brightness_df = brightness_df.append([brightness_df]*BRIGHTNESS_AUG_FACTOR, ignore_index=True)
brightness_df.steering_angle = brightness_df.steering_angle + (np.random.uniform(-1, 1)/30.0)
brightness_df.augmentation = True
brightness_df.techniques = "brightness"
new_df = pd.concat([center_df, left_df, right_df, flipped_center, brightness_df])
if shuffle_data:
shuffle(new_df)
return new_df
def read_recovery_track_data():
# Read the recovery track data for track 2
cols = ['center_image', 'left_image', 'right_image', 'steering_angle', 'throttle', 'brake', 'speed']
df = read_csv('data/track2_recovery/driving_log.csv', cols)
recovery_df = rearrange_and_augment_dataframe(df, shuffle_data=True)
return recovery_df
def save_experiment(name, network_used, epochs, model, hist):
# Based on the experiment name, save the history and the model for future use
experiments_folder = "experiments/"
history_filename = experiments_folder + name + ".json"
fp = open(history_filename, 'w')
json.dump(hist.history, fp)
print(hist.history)
fp.close()
model_filename = experiments_folder + name + "_" + str(epochs) + "_epochs_" + network_used + '.h5'
model.save(model_filename)
print("Wrote History file: %s" % history_filename)
print("Wrote Model file: %s" % model_filename)
NETWORKS = {
"nvidia": nvidia_end_to_end,
}
################################# GENERATORS ###################################
def new_generator(samples, batch_size=32):
num_samples = len(samples)
while 1:
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset: offset + batch_size]
images = []
angles = []
for i, batch_sample in batch_samples.iterrows():
img = read_image(batch_sample.image)
steering_angle = float(batch_sample.steering_angle)
augment = batch_sample.augmentation
techniques = batch_sample.techniques
if augment:
# Techniques should be setup like this for multiple ones
# flip,brightness
techniques = techniques.split(",")
for technique in techniques:
img = augment_image(img, technique)
images.append(img)
angles.append(steering_angle)
X = np.array(images)
y = np.array(angles)
yield X, y
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1:
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset: offset + batch_size]
images = []
angles = []
for i, batch_sample in batch_samples.iterrows():
center_image = read_image(batch_sample[0])
center_angle = float(batch_sample[3])
images.append(center_image)
angles.append(center_angle)
left_image = read_image(batch_sample[1])
left_angle = float(batch_sample[3] + 0.25)
images.append(left_image)
angles.append(left_angle)
right_image = read_image(batch_sample[0])
right_angle = float(batch_sample[3] - 0.25)
images.append(right_image)
angles.append(right_angle)
X = np.array(images)
y = np.array(angles)
yield shuffle(X, y)
def training_generator(samples, batch_size=32):
num_samples = len(samples)
images = []
angles = []
# Drop all the rows and just keep 10
# drop_indices = np.random.choice(samples.index, size=len(samples.index) - 100, replace=False)
# samples = samples.drop(drop_indices)
# First create the proper training data.
print("Creating Initial Training Data...")
for i, batch_sample in samples.iterrows():
center_image = read_image(batch_sample[0])
center_angle = float(batch_sample[3])
images.append(center_image)
angles.append(center_angle)
left_image = read_image(batch_sample[1])
left_angle = float(batch_sample[3] + 0.25)
images.append(left_image)
angles.append(left_angle)
right_image = read_image(batch_sample[0])
right_angle = float(batch_sample[3] - 0.25)
images.append(right_image)
angles.append(right_angle)
# Also flip the center image and change the steering angle.
flipped_center_image = flip_image(center_image)
images.append(flipped_center_image)
angles.append(-center_angle)
images = np.array(images)
angles = np.array(angles)
print("Feeding to Keras Generator...")
datagen = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=10,
width_shift_range=0.2,
height_shift_range=0.2,
zca_whitening=False,
channel_shift_range=0.2,
zoom_range=0.2)
# datagen.fit(images)
while 1:
X, y = shuffle(images, angles)
for X_train, y_train in datagen.flow(X, y, batch_size=batch_size):
yield shuffle(X_train, y_train)
################################# MAIN METHODS #################################
def args_definition():
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", help="Number of Epochs to train the network for"
,type=int, default=20)
parser.add_argument("--network", help="Specify which Neural Network to execute"
,choices=list(NETWORKS.keys()) + ["all"], default="simple_network")
parser.add_argument("--track", help="Specify which track data to use",
choices=["track1", "track2", "both"], default="both")
parser.add_argument("--use_sample_training", help="Use the sample training data",
action='store_true')
parser.add_argument("--experiment", help="Give the run an experiment name", type=str)
parser.add_argument("--show_data_distribution", help="Show the data distribution for the training data",
action='store_true')
args = parser.parse_args()
return args
def main():
global NETWORKS
args = args_definition()
df = read_training_data(args.track)
if args.use_sample_training:
df = read_sample_training(df)
frames, steering_angles, shape = load_data(df)
model = NETWORKS[args.network](shape)
hist = model.fit(frames,
steering_angles,
validation_split=0.2,
shuffle=True,
epochs=args.epochs)
model_name = args.network + '.h5'
model.save(model_name)
if args.experiment != "":
save_experiment(args.experiment, args.network, model, hist)
from keras import backend as K
K.clear_session()
class EarlyStoppingByLossVal(Callback):
def __init__(self, monitor='val_loss', value=0.00001, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
if current < self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
def main_generator():
global NETWORKS
args = args_definition()
df = read_training_data(args.track)
if args.use_sample_training:
df = read_sample_training(df)
df = rearrange_and_augment_dataframe(df, shuffle_data=True)
if args.track == "track2" or args.track == "both":
recovery_df = read_recovery_track_data()
df = | pd.concat([df, recovery_df]) | pandas.concat |
# Assignment Package
import pandas
# Define function to split dates and return into multiple columns
def date_split(X, header):
'''
Converts a dataframe with a column of dates, adding corresponding columns
of year, month and day.
Params:
X: a pandas.DataFrame with a column including dates.
header: the column header to split into year, month, day. Must include
quotations around column header name.
Example:
date_split(pandas.DataFrame({'date': ['4/25/2017', '5/5/2018',
'5/30/2020']}), 'date')
Returns: a pandas.DataFrame with the original columns as well as a column
for 'year', 'month', and 'day'.
'''
# Convert date_recorded to datetime
X[header] = | pandas.to_datetime(X[header], infer_datetime_format=True) | pandas.to_datetime |
import numpy as np
import pandas as pd
import pickle as pkl
from tqdm import tqdm
from numpy import load
import tensorflow as tf
import keras.backend as K
from keras.layers import Flatten
from keras.engine.topology import Layer
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import MultiHeadAttention
from keras import activations, initializers, constraints, regularizers
tqdm.pandas()
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
for device in gpu_devices:
tf.config.experimental.set_memory_growth(device, True)
def f1_score(y_true, y_pred):
tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2*p*r / (p+r+K.epsilon())
f1 = tf.where(tf.math.is_nan(f1), tf.zeros_like(f1), f1)
return f1
class MultiGraphCNN(Layer):
def __init__(self,
output_dim,
num_filters,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform',
kernel_regularizer='l1',
bias_regularizer='l1',
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(MultiGraphCNN, self).__init__(**kwargs)
self.output_dim = output_dim
self.num_filters = num_filters
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_initializer.__name__ = kernel_initializer
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
if self.num_filters != int(input_shape[1][-2]/input_shape[1][-1]):
raise ValueError('num_filters does not match with graph_conv_filters dimensions.')
self.input_dim = input_shape[0][-1]
kernel_shape = (self.num_filters * self.input_dim, self.output_dim)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs):
output = graph_conv_op(inputs[0], self.num_filters, inputs[1], self.kernel)
if self.use_bias:
output = K.bias_add(output, self.bias)
if self.activation is not None:
output = self.activation(output)
return output
def compute_output_shape(self, input_shape):
output_shape = (input_shape[0][0], input_shape[0][1], self.output_dim)
return output_shape
def get_config(self):
config = {
'output_dim': self.output_dim,
'num_filters': self.num_filters,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(MultiGraphCNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def normalize_adj_numpy(adj, symmetric=True):
if symmetric:
d = np.diag(np.power(np.array(adj.sum(1)), -0.5).flatten(), 0)
a_norm = adj.dot(d).transpose().dot(d)
else:
d = np.diag(np.power(np.array(adj.sum(1)), -1).flatten(), 0)
a_norm = d.dot(adj)
return a_norm
def preprocess_adj_tensor(adj_tensor, symmetric=True):
adj_out_tensor = []
for i in range(adj_tensor.shape[0]):
adj = adj_tensor[i]
adj = adj + np.eye(adj.shape[0])
adj = normalize_adj_numpy(adj, symmetric)
adj_out_tensor.append(adj)
adj_out_tensor = np.array(adj_out_tensor)
return adj_out_tensor
def graph_conv_op(x, num_filters, graph_conv_filters, kernel):
if len(x.get_shape()) == 2:
conv_op = K.dot(graph_conv_filters, x)
conv_op = tf.split(conv_op, num_filters, axis=0)
conv_op = K.concatenate(conv_op, axis=1)
elif len(x.get_shape()) == 3:
conv_op = K.batch_dot(graph_conv_filters, x)
conv_op = tf.split(conv_op, num_filters, axis=1)
conv_op = K.concatenate(conv_op, axis=2)
else:
raise ValueError('x must be either 2 or 3 dimension tensor'
'Got input shape: ' + str(x.get_shape()))
conv_out = K.dot(conv_op, kernel)
return conv_out
def Model(paths):
required_articles = pkl.load(open(f'{paths["reqd_articles_path"]}', 'rb'))
publisher = | pd.read_csv(f'{paths["publisher_path"]}', index_col=0) | pandas.read_csv |
import os
import time
import pandas as pd
import numpy as np
import json
from hydroDL import kPath
from hydroDL.data import usgs, gageII, gridMET, ntn
from hydroDL.post import axplot, figplot
import matplotlib.pyplot as plt
dirInv = os.path.join(kPath.dirData, 'USGS', 'inventory')
fileSiteNo = os.path.join(dirInv, 'siteNoLst-1979')
siteNoLstAll = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()
tabG = gageII.readData(
varLst=['NWIS_DRAIN_SQKM', 'BASIN_BOUNDARY_CONFIDENCE'], siteNoLst=siteNoLstAll)
# read NTN
dirNTN = os.path.join(kPath.dirData, 'EPA', 'NTN')
fileData = os.path.join(dirNTN, 'NTN-All-w.csv')
fileSite = os.path.join(dirNTN, 'NTNsites.csv')
ntnData = pd.read_csv(fileData)
ntnSite = pd.read_csv(fileSite)
ntnData['siteID'] = ntnData['siteID'].apply(lambda x: x.upper())
ntnData = ntnData.replace(-9, np.nan)
ntnIdLst = ntnData['siteID'].unique().tolist()
crdNTN = pd.read_csv(os.path.join(dirNTN, 'crdNTN.csv'), index_col='siteid')
crdNTN = crdNTN.drop(['CO83', 'NC30', 'WI19'])
crdUSGS = pd.read_csv(os.path.join(
dirNTN, 'crdUSGS.csv'), dtype={'STAID': str})
crdUSGS = crdUSGS.set_index('STAID')
t = pd.date_range(start='1979-01-01', end='2019-12-31', freq='W-TUE')
t = t[1:]
# varC = usgs.varC
varC = ['00940']
varNtn = ['Cl', 'subppt']
# siteNoLst = ['0422026250', '04232050', '0423205010']
siteNo = '04193500'
# siteNo = '01184000'
siteNoLstAll.index(siteNo)
# find NTN sites
usgsId = siteNo
x = crdUSGS.loc[usgsId]['x']
y = crdUSGS.loc[usgsId]['y']
dist = np.sqrt((x-crdNTN['x'])**2+(y-crdNTN['y'])**2)
dist = dist.drop(dist[dist > 500*1000].index)
data = np.full([len(t), len(varNtn)], np.nan)
distOut = np.full(len(t), np.nan)
idOut = np.full(len(t), np.nan, dtype=object)
while len(dist) > 0:
ntnId = dist.idxmin()
# temp = dictNTN[ntnId].values
tab = ntnData[ntnData['siteID'] == ntnId]
tab.index = pd.to_datetime(tab['dateoff'])
out = | pd.DataFrame(index=t) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import xarray as xr
import numpy as np
import pandas as pd
import pathlib
class BaselineDatabase(object):
def __init__(self):
self.line_table = pd.DataFrame(columns=['site','install', 'line', 'instrument_id', 'comment'])
self.instrument_table = pd.DataFrame(columns = ['instrument_id','type_id', 'sn', 'config'])
def add2line_table(self, site,install_datetime, line_id, instrument_id, comment = ''):#20200205
install_datetime = pd.to_datetime(install_datetime)
new_line_table_entry = pd.DataFrame({'site':site,'install': install_datetime, 'line': line_id, 'instrument_id': instrument_id, 'comment': comment}, index = [instrument_id])
# self.line_table = self.line_table.append(new_line_table_entry, ignore_index=True)
self.line_table = pd.concat([self.line_table, new_line_table_entry], ignore_index=True)
return
def addnewinstrument(self, instrument_id, type_id, sn, config):
# self.instrument_table = self.instrument_table.append({'instrument_id': instrument_id,'type_id':type_id, 'sn': sn, 'config':config_id}, ignore_index=True)
# new_instrument = pd.DataFrame({'instrument_id': instrument_id,'type_id':type_id, 'sn': sn, 'config':config}, index = [instrument_id])
new_instrument = pd.DataFrame( [[instrument_id, type_id, sn, config]], columns = ['instrument_id', 'type_id', 'sn', 'config'],index = [instrument_id])
self.instrument_table = pd.concat([self.instrument_table, new_instrument])#, ignore_index=True)
return
def get_instrument(self, site, line, date):
# site = 'mlo'
# line = 121
# date = df_all.index[0]
lt_site_line = self.line_table[np.logical_and(self.line_table.site == site, self.line_table.line == line)]
previous_installs = lt_site_line[lt_site_line.install <= date]
if previous_installs.shape[0] == 0:
raise IndexError(f'Instrument not installed (line:{line}, site: {site}, date: {date}')
lt_found = previous_installs.iloc[-1]
instrument_found = self.instrument_table[self.instrument_table.instrument_id == lt_found.instrument_id].iloc[0]
return instrument_found
database = BaselineDatabase()
#### filter comfigurations
conf_1= {'A': 368, 'B': 1050, 'C': 610, 'D': 778}
conf_2= {'A': 412, 'B': 500, 'C': 675, 'D': 862}
#### Instruments
database.addnewinstrument(1,1,1032,conf_2)
database.addnewinstrument(2,1,1046,conf_1)
database.addnewinstrument(3,1,1022,conf_2)
#### instrument linups
installdate = '20131126'
database.add2line_table('mlo', installdate, 121, 2)
database.add2line_table('mlo', installdate, 221, 1)
installdate = '20140104' # something is statring to go wrong on that day!
database.add2line_table('mlo', installdate, 121, 1)
database.add2line_table('mlo', installdate, 221, 2)
installdate = '20141204'
database.add2line_table('mlo', installdate, 121, 2)
database.add2line_table('mlo', installdate, 221, 1)
installdate = '20151203'
database.add2line_table('mlo', installdate, 121, 1)
database.add2line_table('mlo', installdate, 221, 2)
installdate = '20161211'
database.add2line_table('mlo', installdate, 121, 1)
database.add2line_table('mlo', installdate, 221, 2)
installdate = '20171207'
database.add2line_table('mlo', installdate, 121, 2)
database.add2line_table('mlo', installdate, 221, 1)
database.add2line_table('mlo', '20200205', 121, 1)
database.add2line_table('mlo', '20200205', 221, 2)
database.add2line_table('mlo', '20200620', 121, 2)
database.add2line_table('mlo', '20200620', 221, 1)
installdate = '20210204'
database.add2line_table('mlo', installdate, 121, 1)
database.add2line_table('mlo', installdate, 221, 2)
# # testing: installation in BRW...
# installdate = '20210318'
# uninstalldate = '20211008'
# database.add2line_table('brw', installdate, 121, 1)
# # database.add2line_table('brw', installdate, 101, 1)
# database.add2line_table('brw', installdate, 221, 2)
# database.add2line_table('brw', installdate, 221, 2)
installdate = '20220101'
database.add2line_table('mlo', installdate, 121, 1)
database.add2line_table('mlo', installdate, 221, 2)
installdate = '20220309'
database.add2line_table('mlo', installdate, 121, 3)
def get_lines_from_station_header(path = '/nfs/grad/gradobs/documentation/station_headers/MLO_header.xlsx', line_ids = [121, 221]):
path2header = pathlib.Path(path)
df = pd.read_excel(path2header)
col_names = {}
lines = []
for line_id in line_ids:
idx = (df['Unnamed: 1'] == line_id).argmax()
header = df.iloc[idx-1].dropna().values[1:]
col_names[line_id] = header
lines.append(dict(line_id = line_id, column_labels = header))
return lines
def read_file(path2raw, lines = None,
# collabels = ['lineID', 'Year', 'DOY', 'HHMM', 'A', 'B', 'C', 'D','temp'],
collabels = ['lineID', 'Year', 'DOY', 'HHMM'],
database = None,
site = None
):
"""
The particular way I am reading here allows for later implementation of
reading old data from Longenecker. And also allows to read other raw files
Parameters
----------
path2raw : str, list, pathlib.Path
Single or list of path(s) to file(s).
lines : list, optional
List of lines to consider (e.g. [121, 221] for sp02 at MLO). The default is None -> all.
collabels : TYPE, optional
DESCRIPTION. The default is ['lineID', 'Year', 'DOY', 'HHMM'].
database : TYPE, optional
DESCRIPTION. The default is None.
site : str, optional
DESCRIPTION. The default is None. If None the site is infered from the
file path. Set if the path is not the standard path
Returns
-------
out_list : TYPE
DESCRIPTION.
"""
out = {}
collabels = np.array(collabels)
#open
if not isinstance(path2raw, list):
path2raw = [path2raw,]
df_all = pd.concat([pd.read_csv(p2r, header=None) for p2r in path2raw])
# df_all = pd.read_csv(path2raw, header=None
# # names = False
# )
# out['df_all_01'] = df_all.copy()
colsis = df_all.columns.values
colsis = [int(col) for col in colsis]
# todo: assigne collumn labels accoreding to instrument info
# if 0:
colsis[:collabels.shape[0]] = collabels
df_all.columns = colsis
# out['df_all_02'] = df_all.copy()
# df_all = pd.read_csv(path2raw, names=lines[0]['column_labels'])
# make datetime index
df_all['HHMM'] = df_all.apply(lambda row: f'{int(row.HHMM):04d}', axis=1)
df_all.index = df_all.apply(lambda row: pd.to_datetime(f'{int(row.Year)}0101') + pd.to_timedelta(row.DOY - 1 , 'd') + pd.to_timedelta(int(row.HHMM[:2]), 'h') + pd.to_timedelta(int(row.HHMM[2:]), 'm'), axis=1)
df_all.index.name = 'datetime'
# data_list = []
# df_inst_temp = pd.DataFrame()
# df_inst_channels = pd.DataFrame()
out['df_all'] = df_all.copy()
# return out
out_list = []
date = df_all.index[0]
# print(df_all.lineID.unique())
for lid in df_all.lineID.unique():
if isinstance(lines, list):
if lid not in lines:
print(f'{lid} not in lines ({lines})')
continue
df_lid = df_all[df_all.lineID == lid].copy()
# there was the case that Longenecker must have created an overlab when stiching two days together ... therefor ->
df_lid = df_lid[~df_lid.index.duplicated()]
df_lid.sort_index(inplace=True)
instrument = database.get_instrument(site, lid, date)
df_lid = df_lid.drop(['lineID', 'Year','DOY', 'HHMM'], axis=1)
df_lid.columns = ['A', 'B', 'C', 'D', 'temp']
# replace invalid values with nan
df_lid[df_lid == -99999] = np.nan
df_lid[df_lid == -7999.0] = np.nan
# seperate photo detector readings from temp
df_temp = df_lid.temp
df_voltages = df_lid.reindex(['A', 'B', 'C', 'D'], axis= 1)
df_voltages.columns.name = 'channel'
# create dataset
ds = xr.Dataset()
ds['raw_data'] = df_voltages
ds['internal_temperature'] = df_temp
ser = pd.Series(instrument.config)
ser.index.name = 'channel'
ds['channle_wavelengths'] = ser
ds['line_id'] = lid
sn = instrument['sn']
ds['serial_no'] = sn
# ds_by_instrument[f'sp02_{lid}_{sn}'] = ds
out_list.append(ds)
return out_list
# for line in lines:
# lid = line['line_id']
# dft = df_all[df_all.lineID == lid].copy()
# dft = dft.dropna(axis = 1)
# # replace placeholder with correct column labels
# dft.columns = line['column_labels']
# line['df'] = dft.copy()
# # clean up the channel voltages
# df_channels = dft.drop(['lineID', 'Year', 'DOY', 'HHMM', 'SPO2 internal temp [degC]'], axis=1)
# channels = [int(col.split(' ')[2]) for col in df_channels.columns]
# df_channels.columns = channels
# # df_channels.columns.name = f'wavelength_lid{lid}'
# df_channels[df_channels == -99999] = np.nan
# df_channels[df_channels == -7999.0] = np.nan
# data_list.append(df_channels.copy())
# # clean up temp
# temp = dft['SPO2 internal temp [degC]'].copy()
# temp[temp == -99999] = np.nan
# temp[temp == -7999.0] = np.nan
# df_inst_temp[lid] = temp
# # print(len(channels))
# # print(channels)
# df_inst_channels[lid] = channels
# # line['raw_data'] = df_channels
# # ds[f'rawdata_line_id_{lid}'] = df_channels
# # ds[f'instrument_temperature_line_id_{lid}'] = temp
# # ds['line_ids'] = lines
# ds = xr.Dataset()
# data = pd.concat(data_list, axis=1).sort_index(axis=1)
# data.columns.name = 'channel_wavelength'
# ds['raw_data'] = data
# df_inst_temp.columns.name = 'line_id'
# ds['instrument_temperatures'] = df_inst_temp
# df_inst_channels.columns.name = 'line_id'
# df_inst_channels.index = [chr(ord('A') + i) for i in df_inst_channels.index]
# df_inst_channels.index.name = 'channel'
# ds['instrument_channels'] = df_inst_channels
# return ds
def convert_raw2nc(path2rawfolder = '/nfs/grad/gradobs/raw/mlo/2020/', path2netcdf = '/mnt/telg/data/baseline/mlo/2020/',
# database = None,
start_date = '2020-02-06',
pattern = '*sp02.*',
sernos = [1032, 1046],
site = 'mlo',
overwrite = False,
verbose = False,
raise_error = True,
test = False):
"""
Parameters
----------
path2rawfolder : TYPE, optional
DESCRIPTION. The default is '/nfs/grad/gradobs/raw/mlo/2020/'.
path2netcdf : TYPE, optional
DESCRIPTION. The default is '/mnt/telg/data/baseline/mlo/2020/'.
# database : TYPE, optional
DESCRIPTION. The default is None.
start_date : TYPE, optional
DESCRIPTION. The default is '2020-02-06'.
pattern : str, optional
Only files with this pattern are considered. In newer raw data
versions this would be '*sp02.*'. In older ones: 'MLOD*'
sernos : TYPE, optional
DESCRIPTION. The default is [1032, 1046].
overwrite : TYPE, optional
DESCRIPTION. The default is False.
verbose : TYPE, optional
DESCRIPTION. The default is False.
test : TYPE, optional
If True only one file is processed. The default is False.
Returns
-------
None.
"""
# lines = get_lines_from_station_header()
path2rawfolder = pathlib.Path(path2rawfolder)
path2netcdf = pathlib.Path(path2netcdf)
try:
path2netcdf.mkdir(exist_ok=True)
except FileNotFoundError:
path2netcdf.parent.mkdir()
path2netcdf.mkdir()
file_list = list(path2rawfolder.glob(pattern))
# print(len(file_list))
# file_contents = []
# return file_list
df_in = pd.DataFrame(file_list, columns=['path_in'])
# test what format, old or new.
p2f = file_list[0]
nl = p2f.name.split('.')
if len(nl) == 2:
# old format like /nfs/grad/gradobs/raw/mlo/2013/sp02/MLOD013A.113
# get year from path
def path2date(path2file):
year = path2file.parent.parent.name
jul = int(''.join(filter(str.isdigit, path2file.name.split('.')[0])))
date = | pd.to_datetime(year) | pandas.to_datetime |
import pickle
import numpy as np
import pandas as pd
from AlchemicalAssistant.FEPBOSSReader import bossPdbAtom2Element,ucomb,tor_cent
from AlchemicalAssistant.Vector_algebra import pairing_func,AtomNum2Symb,AtomNum2Mass
from AlchemicalAssistant.MolReaders import ang_id,tor_id
from AlchemicalAssistant.TINKER_Rel_FEP import xyz_prep,tinker_prm
def pdb_prep(atoms, coos, resid='A2B',pdbname='COMBO'):
opdb = open(pdbname+'_NAMD.pdb', 'w+')
opdb.write('REMARK LIGPARGEN GENERATED PDB FILE\n')
num = 0
for (i, j) in zip(atoms, coos):
num += 1
opdb.write('%-6s%5d %4s %3s %4d %8.3f%8.3f%8.3f\n' %
('ATOM', num, i, resid, 1, j[0], j[1], j[2]))
opdb.write('END\n')
opdb.close()
return None
###
def MapMolecules(map_dict,num):
if num in map_dict.keys(): return map_dict[num]
else: return num+1
def TranslateICs(amol,bmol,map_dict,zdf):
amol['BONDS'][['cl1','cl2']] = amol['BONDS'][['cl1','cl2']].apply(lambda x: x+3)
bmol['BONDS'][['cl1','cl2']] = bmol['BONDS'][['cl1','cl2']].applymap(lambda x: MapMolecules(map_dict,x+3))
all_bonds = pd.concat([amol['BONDS'],bmol['BONDS']],axis=0)
all_bonds['UID'] = [pairing_func(i,j) for i,j in zip(all_bonds.cl1,all_bonds.cl2)]
amol['ANGLES'][['cl1','cl2','cl3']] = amol['ANGLES'][['cl1','cl2','cl3']].apply(lambda x: x+3)
bmol['ANGLES'][['cl1','cl2','cl3']] = bmol['ANGLES'][['cl1','cl2','cl3']].applymap(lambda x: MapMolecules(map_dict,x+3))
all_angles = | pd.concat([amol['ANGLES'],bmol['ANGLES']],axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(KeyError, lambda: pivot_table(
df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(KeyError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'),
values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),
columns='Buyer', values='Quantity',
aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(ValueError, lambda: pivot_table(
df, index=Grouper(freq='6MS', level='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(ValueError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', level='foo'),
values='Quantity', aggfunc=np.sum))
# double grouper
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 11, 1, 13, 0), datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 11, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 12, 5, 14, 0)],
'PayDay': [datetime(2013, 10, 4, 0, 0),
datetime(2013, 10, 15, 13, 5),
datetime(2013, 9, 5, 20, 0),
datetime(2013, 11, 2, 10, 0),
datetime(2013, 10, 7, 20, 0),
datetime(2013, 9, 5, 10, 0),
datetime(2013, 12, 30, 12, 0),
datetime(2013, 11, 20, 14, 0), ]})
result = pivot_table(df, index=Grouper(freq='M', key='Date'),
columns=Grouper(freq='M', key='PayDay'),
values='Quantity', aggfunc=np.sum)
expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan,
6, np.nan, 1, 9,
np.nan, 9, np.nan, np.nan, np.nan,
np.nan, 3, np.nan]).reshape(4, 4),
index=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)],
columns=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)])
expected.index.name = 'Date'
expected.columns.name = 'PayDay'
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=Grouper(freq='M', key='PayDay'),
columns=Grouper(freq='M', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
tuples = [(datetime(2013, 9, 30), datetime(2013, 10, 31)),
(datetime(2013, 10, 31),
datetime(2013, 9, 30)),
(datetime(2013, 10, 31),
datetime(2013, 11, 30)),
(datetime(2013, 10, 31),
datetime(2013, 12, 31)),
(datetime(2013, 11, 30),
datetime(2013, 10, 31)),
(datetime(2013, 12, 31), datetime(2013, 11, 30)), ]
idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay'])
expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan,
9, np.nan, 9, np.nan,
np.nan, 3]).reshape(6, 2),
index=idx, columns=['A', 'B'])
expected.columns.name = 'Branch'
result = pivot_table(
df, index=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')], columns=['Branch'],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=['Branch'],
columns=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
def test_pivot_datetime_tz(self):
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='dt1')
exp_col1 = Index(['value1', 'value1'])
exp_col2 = Index(['a', 'b'], name='label')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 3], [1, 4], [2, 5]],
index=exp_idx, columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=[
'label'], values=['value1'])
tm.assert_frame_equal(result, expected)
exp_col1 = Index(['sum', 'sum', 'sum', 'sum',
'mean', 'mean', 'mean', 'mean'])
exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2)
exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00',
'2013-02-01 15:00:00'] * 4,
tz='Asia/Tokyo', name='dt2')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2],
[1, 4, 2, 1, 1, 4, 2, 1],
[2, 5, 1, 2, 2, 5, 1, 2]],
dtype='int64'),
index=exp_idx,
columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=['dt2'],
values=['value1', 'value2'],
aggfunc=[np.sum, np.mean])
tm.assert_frame_equal(result, expected)
def test_pivot_dtaccessor(self):
# GH 8103
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))
result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,
values='value1')
exp_idx = Index(['a', 'b'], name='label')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=exp_idx,
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.month,
columns=df['dt1'].dt.hour,
values='value1')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=Index([1, 2], name='dt2'),
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.year.values,
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
exp_col = MultiIndex.from_arrays(
[[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=['dt1', 'dt2'])
expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]], dtype='int64'),
index=[2013], columns=exp_col)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=np.array(['X', 'X', 'X',
'X', 'Y', 'Y']),
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],
[np.nan, np.nan, np.nan,
4, np.nan, 5]]),
index=['X', 'Y'], columns=exp_col)
tm.assert_frame_equal(result, expected)
def test_daily(self):
rng = date_range('1/1/2000', '12/31/2004', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(DataFrame(ts), index=ts.index.year,
columns=ts.index.dayofyear)
annual.columns = annual.columns.droplevel(0)
doy = np.asarray(ts.index.dayofyear)
for i in range(1, 367):
subset = ts[doy == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_monthly(self):
rng = date_range('1/1/2000', '12/31/2004', freq='M')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(pd.DataFrame(ts), index=ts.index.year,
columns=ts.index.month)
annual.columns = annual.columns.droplevel(0)
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_pivot_table_with_iterator_values(self):
# GH 12017
aggs = {'D': 'sum', 'E': 'mean'}
pivot_values_list = pd.pivot_table(
self.data, index=['A'], values=list(aggs.keys()), aggfunc=aggs,
)
pivot_values_keys = pd.pivot_table(
self.data, index=['A'], values=aggs.keys(), aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_keys, pivot_values_list)
agg_values_gen = (value for value in aggs.keys())
pivot_values_gen = pd.pivot_table(
self.data, index=['A'], values=agg_values_gen, aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_gen, pivot_values_list)
def test_pivot_table_margins_name_with_aggfunc_list(self):
# GH 13354
margins_name = 'Weekly'
costs = pd.DataFrame(
{'item': ['bacon', 'cheese', 'bacon', 'cheese'],
'cost': [2.5, 4.5, 3.2, 3.3],
'day': ['M', 'M', 'T', 'T']}
)
table = costs.pivot_table(
index="item", columns="day", margins=True,
margins_name=margins_name, aggfunc=[np.mean, max]
)
ix = pd.Index(
['bacon', 'cheese', margins_name], dtype='object', name='item'
)
tups = [('mean', 'cost', 'M'), ('mean', 'cost', 'T'),
('mean', 'cost', margins_name), ('max', 'cost', 'M'),
('max', 'cost', 'T'), ('max', 'cost', margins_name)]
cols = pd.MultiIndex.from_tuples(tups, names=[None, None, 'day'])
expected = pd.DataFrame(table.values, index=ix, columns=cols)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins(self, observed):
# GH 10989
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins_category(self, observed):
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
df.y = df.y.astype('category')
df.z = df.z.astype('category')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
def test_categorical_aggfunc(self, observed):
# GH 9534
df = pd.DataFrame({"C1": ["A", "B", "C", "C"],
"C2": ["a", "a", "b", "b"],
"V": [1, 2, 3, 4]})
df["C1"] = df["C1"].astype("category")
result = df.pivot_table("V", index="C1", columns="C2",
dropna=observed, aggfunc="count")
expected_index = pd.CategoricalIndex(['A', 'B', 'C'],
categories=['A', 'B', 'C'],
ordered=False,
name='C1')
expected_columns = pd.Index(['a', 'b'], name='C2')
expected_data = np.array([[1., np.nan],
[1., np.nan],
[np.nan, 2.]])
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_categorical_pivot_index_ordering(self, observed):
# GH 8731
df = pd.DataFrame({'Sales': [100, 120, 220],
'Month': ['January', 'January', 'January'],
'Year': [2013, 2014, 2013]})
months = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November',
'December']
df['Month'] = df['Month'].astype('category').cat.set_categories(months)
result = df.pivot_table(values='Sales',
index='Month',
columns='Year',
dropna=observed,
aggfunc='sum')
expected_columns = pd.Int64Index([2013, 2014], name='Year')
expected_index = pd.CategoricalIndex(['January'],
categories=months,
ordered=False,
name='Month')
expected = pd.DataFrame([[320, 120]],
index=expected_index,
columns=expected_columns)
if not observed:
result = result.dropna().astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_pivot_table_not_series(self):
# GH 4386
# pivot_table always returns a DataFrame
# when values is not list like and columns is None
# and aggfunc is not instance of list
df = DataFrame({'col1': [3, 4, 5],
'col2': ['C', 'D', 'E'],
'col3': [1, 3, 9]})
result = df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum)
m = MultiIndex.from_arrays([[1, 3, 9],
['C', 'D', 'E']],
names=['col3', 'col2'])
expected = DataFrame([3, 4, 5],
index=m, columns=['col1'])
tm.assert_frame_equal(result, expected)
result = df.pivot_table(
'col1', index='col3', columns='col2', aggfunc=np.sum
)
expected = DataFrame([[3, np.NaN, np.NaN],
[np.NaN, 4, np.NaN],
[np.NaN, np.NaN, 5]],
index=Index([1, 3, 9], name='col3'),
columns=Index(['C', 'D', 'E'], name='col2'))
tm.assert_frame_equal(result, expected)
result = df.pivot_table('col1', index='col3', aggfunc=[np.sum])
m = MultiIndex.from_arrays([['sum'],
['col1']])
expected = DataFrame([3, 4, 5],
index=Index([1, 3, 9], name='col3'),
columns=m)
tm.assert_frame_equal(result, expected)
def test_pivot_margins_name_unicode(self):
# issue #13292
greek = u'\u0394\u03bf\u03ba\u03b9\u03bc\u03ae'
frame = pd.DataFrame({'foo': [1, 2, 3]})
table = pd.pivot_table(frame, index=['foo'], aggfunc=len, margins=True,
margins_name=greek)
index = pd.Index([1, 2, 3, greek], dtype='object', name='foo')
expected = pd.DataFrame(index=index)
tm.assert_frame_equal(table, expected)
def test_pivot_string_as_func(self):
# GH #18713
# for correctness purposes
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar',
'bar', 'bar', 'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one',
'one', 'two', 'two', 'two', 'one'],
'C': range(11)})
result = pivot_table(data, index='A', columns='B', aggfunc='sum')
mi = MultiIndex(levels=[['C'], ['one', 'two']],
codes=[[0, 0], [0, 1]], names=[None, 'B'])
expected = DataFrame({('C', 'one'): {'bar': 15, 'foo': 13},
('C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
result = pivot_table(data, index='A', columns='B',
aggfunc=['sum', 'mean'])
mi = MultiIndex(levels=[['sum', 'mean'], ['C'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]],
names=[None, None, 'B'])
expected = DataFrame({('mean', 'C', 'one'): {'bar': 5.0, 'foo': 3.25},
('mean', 'C', 'two'): {'bar': 7.0,
'foo': 6.666666666666667},
('sum', 'C', 'one'): {'bar': 15, 'foo': 13},
('sum', 'C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('f, f_numpy',
[('sum', np.sum),
('mean', np.mean),
('std', np.std),
(['sum', 'mean'], [np.sum, np.mean]),
(['sum', 'std'], [np.sum, np.std]),
(['std', 'mean'], [np.std, np.mean])])
def test_pivot_string_func_vs_func(self, f, f_numpy):
# GH #18713
# for consistency purposes
result = pivot_table(self.data, index='A', columns='B', aggfunc=f)
expected = pivot_table(self.data, index='A', columns='B',
aggfunc=f_numpy)
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_pivot_number_of_levels_larger_than_int32(self):
# GH 20601
df = DataFrame({'ind1': np.arange(2 ** 16),
'ind2': np.arange(2 ** 16),
'count': 0})
with pytest.raises(ValueError, match='int32 overflow'):
df.pivot_table(index='ind1', columns='ind2',
values='count', aggfunc='count')
class TestCrosstab(object):
def setup_method(self, method):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df['A'], df['C'])
expected = df.groupby(['A', 'C']).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df['A'], [df['B'], df['C']])
expected = df.groupby(['A', 'B', 'C']).size()
expected = expected.unstack(
'B').unstack('C').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df['B'], df['C']], df['A'])
expected = df.groupby(['B', 'C', 'A']).size()
expected = expected.unstack('A').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'))
expected = crosstab(df['a'], [df['b'], df['c']])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=['a'], rownames=('b', 'c'))
expected = crosstab([df['b'], df['c']], df['a'])
tm.assert_frame_equal(result, expected)
# assign arbitrary names
result = crosstab(self.df['A'].values, self.df['C'].values)
assert result.index.name == 'row_0'
assert result.columns.name == 'col_0'
def test_crosstab_non_aligned(self):
# GH 17005
a = pd.Series([0, 1, 1], index=['a', 'b', 'c'])
b = pd.Series([3, 4, 3, 4, 3], index=['a', 'b', 'c', 'd', 'f'])
c = np.array([3, 4, 3])
expected = pd.DataFrame([[1, 0], [1, 1]],
index=Index([0, 1], name='row_0'),
columns=Index([3, 4], name='col_0'))
result = crosstab(a, b)
tm.assert_frame_equal(result, expected)
result = crosstab(a, c)
tm.assert_frame_equal(result, expected)
def test_crosstab_margins(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True)
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['All', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['All'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('All', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['All']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('All', '')]))
exp_rows.name = 'All'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
def test_crosstab_margins_set_margin_name(self):
# GH 15972
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True, margins_name='TOTAL')
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['TOTAL', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['TOTAL'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('TOTAL', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['TOTAL']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('TOTAL', '')]))
exp_rows.name = 'TOTAL'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
for margins_name in [666, None, ['a', 'b']]:
with pytest.raises(ValueError):
crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True, margins_name=margins_name)
def test_crosstab_pass_values(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
values = np.random.randn(100)
table = crosstab([a, b], c, values, aggfunc=np.sum,
rownames=['foo', 'bar'], colnames=['baz'])
df = DataFrame({'foo': a, 'bar': b, 'baz': c, 'values': values})
expected = df.pivot_table('values', index=['foo', 'bar'],
columns='baz', aggfunc=np.sum)
tm.assert_frame_equal(table, expected)
def test_crosstab_dropna(self):
# GH 3820
a = np.array(['foo', 'foo', 'foo', 'bar',
'bar', 'foo', 'foo'], dtype=object)
b = np.array(['one', 'one', 'two', 'one',
'two', 'two', 'two'], dtype=object)
c = np.array(['dull', 'dull', 'dull', 'dull',
'dull', 'shiny', 'shiny'], dtype=object)
res = pd.crosstab(a, [b, c], rownames=['a'],
colnames=['b', 'c'], dropna=False)
m = MultiIndex.from_tuples([('one', 'dull'), ('one', 'shiny'),
('two', 'dull'), ('two', 'shiny')],
names=['b', 'c'])
tm.assert_index_equal(res.columns, m)
def test_crosstab_no_overlap(self):
# GS 10291
s1 = pd.Series([1, 2, 3], index=[1, 2, 3])
s2 = pd.Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
expected = pd.DataFrame()
tm.assert_frame_equal(actual, expected)
def test_margin_dropna(self):
# GH 12577
# pivot_table counts null into margin ('All')
# when margins=true and dropna=true
df = pd.DataFrame({'a': [1, 2, 2, 2, 2, np.nan],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3, 4, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, 2, np.nan],
'b': [3, np.nan, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3.0, 4.0, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, np.nan, 2],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3, 4, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
# GH 12642
# _add_margins raises KeyError: Level None not found
# when margins=True and dropna=False
df = pd.DataFrame({'a': [1, 2, 2, 2, 2, np.nan],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)
expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 4, 6]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3, 4, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, 2, np.nan],
'b': [3, np.nan, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 4, 6]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3.0, 4.0, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
a = np.array(['foo', 'foo', 'foo', 'bar',
'bar', 'foo', 'foo'], dtype=object)
b = np.array(['one', 'one', 'two', 'one',
'two', np.nan, 'two'], dtype=object)
c = np.array(['dull', 'dull', 'dull', 'dull',
'dull', 'shiny', 'shiny'], dtype=object)
actual = pd.crosstab(a, [b, c], rownames=['a'],
colnames=['b', 'c'], margins=True, dropna=False)
m = MultiIndex.from_arrays([['one', 'one', 'two', 'two', 'All'],
['dull', 'shiny', 'dull', 'shiny', '']],
names=['b', 'c'])
expected = DataFrame([[1, 0, 1, 0, 2], [2, 0, 1, 1, 5],
[3, 0, 2, 1, 7]], columns=m)
expected.index = Index(['bar', 'foo', 'All'], name='a')
tm.assert_frame_equal(actual, expected)
actual = pd.crosstab([a, b], c, rownames=['a', 'b'],
colnames=['c'], margins=True, dropna=False)
m = MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo', 'All'],
['one', 'two', 'one', 'two', '']],
names=['a', 'b'])
expected = DataFrame([[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2],
[5, 2, 7]], index=m)
expected.columns = Index(['dull', 'shiny', 'All'], name='c')
tm.assert_frame_equal(actual, expected)
actual = pd.crosstab([a, b], c, rownames=['a', 'b'],
colnames=['c'], margins=True, dropna=True)
m = MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo', 'All'],
['one', 'two', 'one', 'two', '']],
names=['a', 'b'])
expected = DataFrame([[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2],
[5, 1, 6]], index=m)
expected.columns = Index(['dull', 'shiny', 'All'], name='c')
tm.assert_frame_equal(actual, expected)
def test_crosstab_normalize(self):
# Issue 12578
df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4],
'c': [1, 1, np.nan, 1, 1]})
rindex = pd.Index([1, 2], name='a')
cindex = pd.Index([3, 4], name='b')
full_normal = pd.DataFrame([[0.2, 0], [0.2, 0.6]],
index=rindex, columns=cindex)
row_normal = pd.DataFrame([[1.0, 0], [0.25, 0.75]],
index=rindex, columns=cindex)
col_normal = pd.DataFrame([[0.5, 0], [0.5, 1.0]],
index=rindex, columns=cindex)
# Check all normalize args
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='all'),
full_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=True),
full_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index'),
row_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns'),
col_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=1),
pd.crosstab(df.a, df.b, normalize='columns'))
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=0),
pd.crosstab(df.a, df.b, normalize='index'))
row_normal_margins = pd.DataFrame([[1.0, 0],
[0.25, 0.75],
[0.4, 0.6]],
index=pd.Index([1, 2, 'All'],
name='a',
dtype='object'),
columns=pd.Index([3, 4], name='b',
dtype='object'))
col_normal_margins = pd.DataFrame([[0.5, 0, 0.2], [0.5, 1.0, 0.8]],
index=pd.Index([1, 2], name='a',
dtype='object'),
columns=pd.Index([3, 4, 'All'],
name='b',
dtype='object'))
all_normal_margins = pd.DataFrame([[0.2, 0, 0.2],
[0.2, 0.6, 0.8],
[0.4, 0.6, 1]],
index=pd.Index([1, 2, 'All'],
name='a',
dtype='object'),
columns=pd.Index([3, 4, 'All'],
name='b',
dtype='object'))
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index',
margins=True), row_normal_margins)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns',
margins=True),
col_normal_margins)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=True,
margins=True), all_normal_margins)
# Test arrays
pd.crosstab([np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])],
np.array([1, 2, 1, 2]))
# Test with aggfunc
norm_counts = pd.DataFrame([[0.25, 0, 0.25],
[0.25, 0.5, 0.75],
[0.5, 0.5, 1]],
index=pd.Index([1, 2, 'All'],
name='a',
dtype='object'),
columns=pd.Index([3, 4, 'All'],
name='b'))
test_case = pd.crosstab(df.a, df.b, df.c, aggfunc='count',
normalize='all',
margins=True)
tm.assert_frame_equal(test_case, norm_counts)
df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4],
'c': [0, 4, np.nan, 3, 3]})
norm_sum = pd.DataFrame([[0, 0, 0.],
[0.4, 0.6, 1],
[0.4, 0.6, 1]],
index=pd.Index([1, 2, 'All'],
name='a',
dtype='object'),
columns=pd.Index([3, 4, 'All'],
name='b',
dtype='object'))
test_case = pd.crosstab(df.a, df.b, df.c, aggfunc=np.sum,
normalize='all',
margins=True)
tm.assert_frame_equal(test_case, norm_sum)
def test_crosstab_with_empties(self):
# Check handling of empties
df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4],
'c': [np.nan, np.nan, np.nan, np.nan, np.nan]})
empty = pd.DataFrame([[0.0, 0.0], [0.0, 0.0]],
index=pd.Index([1, 2],
name='a',
dtype='int64'),
columns=pd.Index([3, 4], name='b'))
for i in [True, 'index', 'columns']:
calculated = pd.crosstab(df.a, df.b, values=df.c, aggfunc='count',
normalize=i)
tm.assert_frame_equal(empty, calculated)
nans = pd.DataFrame([[0.0, np.nan], [0.0, 0.0]],
index=pd.Index([1, 2],
name='a',
dtype='int64'),
columns=pd.Index([3, 4], name='b'))
calculated = pd.crosstab(df.a, df.b, values=df.c, aggfunc='count',
normalize=False)
tm.assert_frame_equal(nans, calculated)
def test_crosstab_errors(self):
# Issue 12578
df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4],
'c': [1, 1, np.nan, 1, 1]})
error = 'values cannot be used without an aggfunc.'
with pytest.raises(ValueError, match=error):
pd.crosstab(df.a, df.b, values=df.c)
error = 'aggfunc cannot be used without values'
with pytest.raises(ValueError, match=error):
pd.crosstab(df.a, df.b, aggfunc=np.mean)
error = 'Not a valid normalize argument'
with pytest.raises(ValueError, match=error):
pd.crosstab(df.a, df.b, normalize='42')
with pytest.raises(ValueError, match=error):
pd.crosstab(df.a, df.b, normalize=42)
error = 'Not a valid margins argument'
with pytest.raises(ValueError, match=error):
pd.crosstab(df.a, df.b, normalize='all', margins=42)
def test_crosstab_with_categorial_columns(self):
# GH 8860
df = pd.DataFrame({'MAKE': ['Honda', 'Acura', 'Tesla',
'Honda', 'Honda', 'Acura'],
'MODEL': ['Sedan', 'Sedan', 'Electric',
'Pickup', 'Sedan', 'Sedan']})
categories = ['Sedan', 'Electric', 'Pickup']
df['MODEL'] = (df['MODEL'].astype('category')
.cat.set_categories(categories))
result = pd.crosstab(df['MAKE'], df['MODEL'])
expected_index = pd.Index(['Acura', 'Honda', 'Tesla'], name='MAKE')
expected_columns = pd.CategoricalIndex(categories,
categories=categories,
ordered=False,
name='MODEL')
expected_data = [[2, 0, 0], [2, 0, 1], [0, 1, 0]]
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_columns)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
from os import listdir
filenames = listdir("/home/debian/datalake/sirene/2020-08/rna/")
globdf = pd.DataFrame(columns=['id', 'id_ex', 'siret', 'rup_mi', 'gestion', 'date_creat', 'date_decla',
'date_publi', 'date_disso', 'nature', 'groupement', 'titre',
'titre_court', 'objet', 'objet_social1', 'objet_social2',
'adrs_complement', 'adrs_numvoie', 'adrs_repetition', 'adrs_typevoie',
'adrs_libvoie', 'adrs_distrib', 'adrs_codeinsee', 'adrs_codepostal',
'adrs_libcommune', 'adrg_declarant', 'adrg_complemid',
'adrg_complemgeo', 'adrg_libvoie', 'adrg_distrib', 'adrg_codepostal',
'adrg_achemine', 'adrg_pays', 'dir_civilite', 'siteweb', 'publiweb',
'observation', 'position', 'maj_time', 'mois_crea', 'mois_ferm',
'objet_social_parent_id', 'objet_social_lib', 'objet_social_parent_lib',
'reg', 'dep'])
dfnom = | pd.read_csv("/home/debian/projects/postgres-playground/db/utils/rna_nomenclature_objet_social.csv",dtype=str) | pandas.read_csv |
import pandas as pd
import numpy as np
from datetime import datetime
from sklearn.pipeline import Pipeline
from pandas_datareader import data
from sklearn.ensemble import RandomForestRegressor
import sys
import pickle
import argparse
import warnings
warnings.filterwarnings("ignore")
def load_data(symbols, start_date, end_date):
"""
INPUT:
tickers : list containing the tickers of the stocks whose prices will be predicted
start_date : initial date to gather data
end_data : final date to gather data
OUTPUT:
prices_base : dataframe containing the adjusted closing price for the stocks
on the desired time frame
"""
df = data.DataReader(
symbols,
'yahoo',
start_date,
end_date)
df = | pd.DataFrame(df) | pandas.DataFrame |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = | pd.Index(["a", "b", "c", "d", "e"]) | pandas.Index |
import http.client
import json
import pandas as pd
REQUEST_URL = 'api.huobi.com'
KLINE_TT_COLS = ['date', 'open', 'high', 'low', 'close', 'volume']
def http_get(url, resource, params=''):
conn = http.client.HTTPSConnection(url, timeout=10)
conn.request("GET", resource + '?' + params)
response = conn.getresponse()
data = response.read().decode('utf-8')
return json.loads(data)
def ticker(symbol=''):
ticker_resource = "/staticmarket/%(symbol)s_kline_100_json.js" % {'symbol': symbol}
params = ''
if symbol:
params = 'length=2000'
k_data = http_get(REQUEST_URL, ticker_resource, params)
if len(k_data) == 0:
raise ValueError('Can not obtain the data.')
else:
df = | pd.DataFrame(k_data, columns=KLINE_TT_COLS) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Ball detector - Deeplearning Exercice 1 - Part 1
.. moduleauthor:: <NAME>
.. See https://perso.liris.cnrs.fr/christian.wolf/teaching/deeplearning/tp.html and https://github.com/PaulEmmanuelSotir/BallDetectionAndForecasting
"""
import os
import re
import types
import random
import importlib
import pandas as pd
import pprint as pp
import operator as op
from pathlib import Path
from typing import Iterable, Optional, List, Tuple
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
__all__ = ['layer', 'conv_layer', 'fc_layer', 'Flatten', 'flatten_batch', 'get_gain_name', 'parrallelize', 'set_seeds',
'progess_bar', 'import_pickle', 'source_dir', 'extract_from_hp_search_log', 'summarize_hp_search']
__author__ = '<NAME> <<EMAIL>>'
def layer(layer_op: nn.Module, act_fn: nn.Module, dropout_prob: float, batch_norm: Optional[dict] = None) -> Iterable[nn.Module]:
ops = (layer_op, act_fn)
weight_rank = len(layer_op.weight.data.shape)
if dropout_prob is not None and dropout_prob != 0.:
ops = (nn.Dropout(p=dropout_prob),) + ops
if batch_norm is not None:
# Applies Batch_narm after activation function : see reddit thread about it : https://www.reddit.com/r/MachineLearning/comments/67gonq/d_batch_normalization_before_or_after_relu/
if weight_rank == 4:
ops += (nn.BatchNorm2d(layer_op.out_channels, **batch_norm),)
elif weight_rank < 4:
ops += (nn.BatchNorm1d(layer_op.out_features, **batch_norm),)
return ops
def conv_layer(conv2d: dict, act_fn: type = nn.Identity, dropout_prob: float = 0., batch_norm: Optional[dict] = None) -> nn.Module:
return nn.Sequential(*layer(nn.Conv2d(**conv2d), act_fn(), dropout_prob, batch_norm))
def fc_layer(linear: dict, act_fn: type = nn.Identity, dropout_prob: float = 0., batch_norm: Optional[dict] = None) -> nn.Module:
return nn.Sequential(*layer(nn.Linear(**linear), act_fn(), dropout_prob, batch_norm))
class Flatten(nn.Module):
def __init__(self) -> None:
super(Flatten, self).__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return flatten_batch(x)
def flatten_batch(tensor):
return tensor.view(tensor.size(0), -1) # Flattens target bounding boxes and positions
def get_gain_name(act_fn: type) -> str:
""" Intended to be used with nn.init.calculate_gain(str):
.. Example: nn.init.calculate_gain(get_gain_act_name(nn.ReLU))
"""
if act_fn is nn.ReLU:
return 'relu'
elif act_fn is nn.LeakyReLU:
return 'leaky_relu'
elif act_fn is nn.Tanh:
return 'tanh'
elif act_fn is nn.Identity:
return 'linear'
else:
raise Exception("Unsupported activation function, can't initialize it.")
def parrallelize(model: nn.Module) -> nn.Module:
""" Make use of all available GPU using nn.DataParallel
NOTE: ensure to be using different random seeds for each process if you use techniques like data-augmentation or any other techniques which needs random numbers different for each steps. TODO: make sure this isn't already done by Pytorch?
"""
if torch.cuda.device_count() > 1:
print(f'> Using "nn.DataParallel(model)" on {torch.cuda.device_count()} GPUs.')
model = nn.DataParallel(model)
return model
def set_seeds(all_seeds: int = 345349):
set_seeds(torch_seed=all_seeds, cuda_seed=all_seeds, np_seed=all_seeds, python_seed=all_seeds)
def set_seeds(torch_seed: Optional[int] = None, cuda_seed: Optional[int] = None, np_seed: Optional[int] = None, python_seed: Optional[int] = None):
if torch_seed is not None:
torch.manual_seed(torch_seed)
if cuda_seed is not None and torch.cuda.is_available():
torch.cuda.manual_seed_all(cuda_seed)
if np_seed is not None:
np.random.seed(np_seed)
if python_seed is not None:
random.seed(python_seed)
def progess_bar(iterable, desc, batch_size, custom_vars: bool = False, disable: bool = False):
t = tqdm(iterable, unit='batch', ncols=180, desc=desc, postfix=f'BatchSize={batch_size}', ascii=False, position=0, disable=disable,
bar_format='{desc} {percentage:3.0f}%|'
'{bar}'
'| {n_fmt}/{total_fmt} [Elapsed={elapsed}, Remaining={remaining}, Speed={rate_fmt}{postfix}]')
if custom_vars:
def callback(**kwargs):
t.set_postfix(batch_size=batch_size, **kwargs)
return t, callback
return t
def import_pickle() -> types.ModuleType:
""" Returns cPickle module if available, returns imported pickle module otherwise """
try:
pickle = importlib.import_module('cPickle')
except ImportError:
pickle = importlib.import_module('pickle')
return pickle
def source_dir(source_file: str = __file__) -> Path:
return Path(os.path.dirname(os.path.realpath(source_file)))
def extract_from_hp_search_log(log_path: Path) -> Tuple[Optional[List[dict]], Optional[int], Optional[dict]]:
def _to_float(iterable): return list(map(float, iterable))
with open(log_path, 'r') as f:
log = f.read()
# Split hyperparameter search into trials
experiments = re.split('HYPERPARAMETERS TRIAL', log)
del log
flags = re.MULTILINE + re.IGNORECASE
trials = []
for i, exp in enumerate(experiments):
hp_match = re.findall(r'#+\s*\n\r?\s*(\{.*\})\s*\n\r?', exp, flags)
if hp_match is not None and len(hp_match) > 0:
epoch_matches = list(map(int, re.findall(r'Epoch\s+([0-9]+)/[0-9]+\s*\n\r?', exp, flags)))
train_matches = _to_float(re.findall(r'TRAIN_(?:LOSS|MSE)\s*=\s*([\.0-9]+)\n\r?', exp, flags))
valid_matches = _to_float(re.findall(r'(?:VALID|TEST)_(?:LOSS|MSE)\s*=\s*([\.0-9]+)\n\r?', exp, flags))
if epoch_matches and train_matches and valid_matches:
trials.append({'hyperparameters': hp_match[0], 'train_losses': _to_float(train_matches), 'valid_losses': _to_float(valid_matches),
'best_epoch': np.argmin(valid_matches), 'epochs': np.max(epoch_matches)})
else:
print(f"WARNING: Can't parse resulting losses of hyperparameter search trial NO#{i}.")
else:
print(f"WARNING: Can't parse hyperparameter search trial NO#{i}.")
if len(trials) == 0:
return [], None, None
else:
best_idx = np.argmin([np.min(t['valid_losses']) for t in trials])
return trials, best_idx, trials[best_idx]
def summarize_hp_search(trials: List[dict], best_idx: int, hp_search_name: str = ''):
best_trial = trials[best_idx]
hp_search_name = hp_search_name.upper()
valid_losses = list(map(op.itemgetter('valid_losses'), trials))
pd.DataFrame(valid_losses).T.plot(figsize=(18, 8), legend=False, logy=True, title=f'{hp_search_name} HYPERPARAMETER SEARCH - VALID LOSSES')
train_losses = list(map(op.itemgetter('train_losses'), trials))
| pd.DataFrame(train_losses) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.