prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
## Copyright 2015-2021 PyPSA Developers
## You can find the list of PyPSA Developers at
## https://pypsa.readthedocs.io/en/latest/developers.html
## PyPSA is released under the open source MIT License, see
## https://github.com/PyPSA/PyPSA/blob/master/LICENSE.txt
"""
Power flow functionality.
"""
__author__ = (
"PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html"
)
__copyright__ = (
"Copyright 2015-2021 PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html, "
"MIT License"
)
import logging
logger = logging.getLogger(__name__)
import time
from operator import itemgetter
import networkx as nx
import numpy as np
import pandas as pd
from numpy import ones, r_
from numpy.linalg import norm
from pandas.api.types import is_list_like
from scipy.sparse import csc_matrix, csr_matrix, dok_matrix
from scipy.sparse import hstack as shstack
from scipy.sparse import issparse
from scipy.sparse import vstack as svstack
from scipy.sparse.linalg import spsolve
from pypsa.descriptors import (
Dict,
allocate_series_dataframes,
degree,
get_switchable_as_dense,
zsum,
)
pd.Series.zsum = zsum
def normed(s):
return s / s.sum()
def real(X):
return np.real(X.to_numpy())
def imag(X):
return np.imag(X.to_numpy())
def _as_snapshots(network, snapshots):
if snapshots is None:
snapshots = network.snapshots
if not is_list_like(snapshots):
snapshots = pd.Index([snapshots])
if not isinstance(snapshots, pd.MultiIndex):
snapshots = | pd.Index(snapshots) | pandas.Index |
# coding: utf-8
# In[15]:
import sys, os, time, pickle
from timeit import default_timer as timer
from humanfriendly import format_timespan
# In[16]:
import pandas as pd
import numpy as np
# In[17]:
from dotenv import load_dotenv
load_dotenv('admin.env')
# In[18]:
from db_connect_mag import Session, Paper, PaperAuthorAffiliation, db
# In[19]:
# test_papers_df = pd.read_pickle('data/collect_haystack_20180409/test_papers.pickle')
# target_papers_df = pd.read_pickle('data/collect_haystack_20180409/target_papers.pickle')
# train_papers_df = pd.read_pickle('data/collect_haystack_20180409/train_papers.pickle')
# In[20]:
# this is the data for the fortunato review on Community Detection in Graphs
start = timer()
test_papers_df = | pd.read_pickle('data/collect_haystack_2127048411_seed-1/test_papers.pickle') | pandas.read_pickle |
#
# Prepare the hvorg_movies
#
import os
import datetime
import pickle
import json
import numpy as np
import pandas as pd
from sunpy.time import parse_time
# The sources ids
get_sources_ids = 'getDataSources.json'
# Save the data
save_directory = os.path.expanduser('~/Data/hvanalysis/derived')
# Read in the data
directory = os.path.expanduser('~/Data/hvanalysis/source')
hvorg_movies = 'movies.csv'
hvorg_movies = 'movies_20171128.csv'
path = os.path.expanduser(os.path.join(directory, hvorg_movies))
df = pd.read_csv(path)
hvorg_legacy_movies = 'movies_legacy.csv'
path = os.path.expanduser(os.path.join(directory, hvorg_legacy_movies))
df_legacy = pd.read_csv(path)
# Change the IDs of the legacy movies so they are unique
df_legacy.loc[:, "id"] = df_legacy.loc[:, "id"] + 10**(1 + np.int(np.ceil(np.log10(len(df)))))
# Append the legacy movies
df = df.append(df_legacy, ignore_index=True)
data_type = 'helioviewer.org movies'
# Get some figures of merit for the movies
# When was the movie requested?
request_time = [parse_time(x) for x in df.timestamp.tolist()]
# What was the movie start time?
movie_start_time = [parse_time(x) for x in df.StartDate.tolist()]
# What was the movie end time?
movie_end_time = [parse_time(x) for x in df.EndDate.tolist()]
# How many movies in the CSV file?
nmovies = len(request_time)
# How much time did the movie cover?
movie_durations = np.asarray([(movie_end_time[i] - movie_start_time[i]).total_seconds() for i in range(0, nmovies)])
# What was the mid point of the movie?
movie_mid_point = [movie_start_time[i] + datetime.timedelta(seconds=0.5*movie_durations[i]) for i in range(0, nmovies)]
# Calculate the time difference between the time of the request and the
# movie time. The start time is used since this is the only one that
# can
time_difference = np.asarray([(request_time[i] - movie_start_time[i]).total_seconds() for i in range(0, nmovies)])
# Save the time information
f = os.path.join(save_directory, 'hvorg_movie_durations_seconds.npy')
np.save(f, movie_durations)
f = os.path.join(save_directory, 'hvorg_movie_mid_point_seconds.npy')
np.save(f, movie_mid_point)
f = os.path.join(save_directory, 'hvorg_movie_time_difference_seconds.npy')
np.save(f, time_difference)
f = os.path.join(save_directory, 'hvorg_movie_request_time.pkl')
pickle.dump(request_time, open(f, 'wb'))
f = os.path.join(save_directory, 'hvorg_movie_start_time.pkl')
pickle.dump(movie_start_time, open(f, 'wb'))
f = os.path.join(save_directory, 'hvorg_movie_end_time.pkl')
pickle.dump(movie_end_time, open(f, 'wb'))
# Analyze the sourceID column. Split it up, find the unique elements,
# and create a data frame.
all_sources = []
for source_id in df.DataSourceID.tolist():
ids = source_id.split(',')
for id in ids:
if id not in all_sources:
all_sources.append(id)
def extract_data_source_names(a, removal_type='all'):
if removal_type == 'all':
b = a.replace(" ", "")
b = b.split(',')
if removal_type == 'lr':
b = a.strip()
b = b.split(' , ')
return b
all_data_source_names = []
for i, data_source_names in enumerate(df.DataSourceNames.tolist()):
if isinstance(data_source_names, str):
dsns = extract_data_source_names(data_source_names, removal_type='lr')
for dsn in dsns:
if dsn not in all_data_source_names:
all_data_source_names.append(dsn)
# Analyze the Helioviewer getsourcesid return - map the sourceIds to the nicknames
f = os.path.expanduser(os.path.join(directory, get_sources_ids))
j = json.load(open(f, 'r'))
def id_generator(dict_var):
for k, v in dict_var.items():
if k == "sourceId":
yield dict_var["sourceId"], dict_var["nickname"]
elif isinstance(v, dict):
for id_val in id_generator(v):
yield id_val
source_ids_and_nicknames = list(id_generator(j))
f = os.path.join(save_directory, 'hvorg_sourceids_and_nicknames.pkl')
pickle.dump(source_ids_and_nicknames, open(f, 'wb'))
# Create a new dataframe that explicitly holds which data source was used in each movie
df_new = | pd.DataFrame(0, index=df.index, columns=all_sources) | pandas.DataFrame |
"""
try to classify reddit posts.
"""
import os
import glob
from collections import defaultdict
from pprint import pprint
import time
from datetime import datetime
import pandas as pd
from sklearn_pandas import DataFrameMapper, cross_val_score
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, LabelBinarizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.feature_selection import SelectKBest, mutual_info_classif, f_classif
from sklearn.metrics import classification_report, precision_recall_curve
from sklearn.model_selection import cross_validate, KFold, train_test_split, GridSearchCV
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.svm import LinearSVC, SVC
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
def print_topk(k, feature_names, clf):
"""Prints features with the highest coefficient values, per class"""
topk = np.argsort(clf.coef_[0])[-k:]
print(
"{}".format(
" ".join(feature_names[j] for j in topk[::-1])
)
)
SCORES = [
'accuracy',
'roc_auc',
#'recall',
#'precision',
]
def run_experiment(X, y, max_features, feature_selector, args):
long_precision_recall_row_dicts = []
long_result_row_dicts = []
algo_to_score = defaultdict(dict)
clf_sets = []
C_vals = [0.1, 1, 10, 100]
for C_val in C_vals:
clf_sets += [
(LogisticRegression(C=C_val), 'logistic__c={}'.format(C_val), 'logistic', C_val, 'default'),
]
clf_sets += [
(LinearSVC(C=C_val), 'linearsvc__c={}'.format(C_val), 'linearsvc', C_val, 'default'),
]
if args.other_class_weights:
clf_sets += [
(
LogisticRegression(C=C_val, class_weight='balanced'),
'logistic__c={}__class_weight=balanced'.format(C_val),
'logistic',
C_val, 'balanced'
),
(
LogisticRegression(C=C_val, class_weight={0: 1, 1:50}), 'logistic__c={}__class_weight=50x'.format(C_val),
'logistic',
C_val, '50x'
),
(
LinearSVC(C=C_val, class_weight='balanced'), 'linearsvc__c={}__class_weight=balanced'.format(C_val),
'linearsvc',
C_val, 'balanced'
),
(
LinearSVC(C=C_val, class_weight={0: 1, 1:50}), 'linearsvc__c={}__class_weight=500x'.format(C_val),
'linearsvc',
C_val, '50x'
),
]
clf_sets += [
(DummyClassifier(strategy='most_frequent'), 'SelectNoSentences', 'SelectNoSentences', 0.1, 'default'),
#(DummyClassifier(strategy='constant', constant=1), 'SelectEverySentence',),
]
if args.data_dir == 'psa_research':
clf_sets += [
(DecisionTreeClassifier(), 'tree', 'tree', 0.1, 'default'),
(GaussianNB(), 'GaussianNb', 'GaussianNb', 0.1, 'default'),
(KNeighborsClassifier(3), '3nn', '3nn', 0.1, 'default'),
]
for clf, name, algo_name, C_val, weights in clf_sets:
# if name == 'logistic':
# clf.fit(X, data.has_citation)
# print_topk(10, mapper.transformed_names_, clf)
cv = KFold(n_splits=5, shuffle=True, random_state=0)
start = time.time()
scores = cross_validate(
clf, X=X, y=y, cv=cv,
scoring=SCORES)
ret = {}
for key, val in scores.items():
if 'test_' in key:
score = key.replace('test_', '')
ret[score] = np.mean(val)
ret[score + '_std'] = np.std(val)
algo_to_score[name] = ret
tic = round(time.time() - start, 3)
algo_to_score[name]['time'] = tic
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0)
cal_clf = CalibratedClassifierCV(clf)
cal_clf.fit(X_train, y_train)
y_proba = cal_clf.predict_proba(X_test)
precision, recall, thresholds = precision_recall_curve(
y_test, y_proba[:,1])
long_result_row_dict = {
'name': name,
'algo_name': algo_name,
'max_features': max_features,
'feature_selector': feature_selector.__name__,
'C_val': C_val,
'weights': weights,
'time': tic,
}
for score in SCORES:
long_result_row_dict[score] = algo_to_score[name][score]
long_result_row_dict[score + '_std'] = algo_to_score[name][score + '_std']
long_result_row_dicts.append(long_result_row_dict)
for precision_val, recall_val in zip(precision, recall):
long_precision_recall_row_dicts.append({
'precision': precision_val,
'recall': recall_val,
'name': name,
'max_features': max_features,
'feature_selector': feature_selector.__name__,
'algo_name': algo_name,
'C_val': C_val,
'weights': weights,
})
#print(name, tic)
result_df = pd.DataFrame(algo_to_score)
result_df.to_csv('results/{}/{}_{}.csv'.format(
args.data_dir,
max_features,
feature_selector.__name__,
))
maxes = {}
for key in SCORES:
max_idx = result_df.loc[key].idxmax()
max_val = result_df.loc[key, max_idx]
maxes[key] = [max_val, max_idx]
#print(maxes)
results_df = pd.DataFrame(long_result_row_dicts)
long_precision_recall_df = pd.DataFrame(long_precision_recall_row_dicts)
# sns.factorplot(data=long_df, x='recall', y='precision', col='name', col_wrap=5)
# plt.show()
return maxes, results_df, long_precision_recall_df
def main(args):
if False:
path = 'labeled_sentences/vincent_etal_chi_2018.csv'
data = pd.read_csv(path, encoding='utf-8t')
else:
sentences_filepaths = glob.glob(
"labeled_sentences/{}/*.csv".format(args.data_dir)
)
data = None
count = 100
for i, path in enumerate(sentences_filepaths):
if i > count:
break
if data is None:
data = pd.read_csv(path, encoding='utf-8')
else:
data = pd.concat([data, pd.read_csv(path, encoding='utf-8')])
# "Feature Engineering"
def compute_hand_features(processed_text):
ret = [
len(processed_text),
any(char.isdigit() for char in processed_text),
',' in processed_text,
'"' in processed_text,
any(char.isupper() for char in processed_text[1:]),
]
return ret
data['length'], data['has_digits'], data['has_comma'], data['has_quote'], data['has_upper'] = zip(
*map(compute_hand_features, data['processed_text'])
)
mapper = DataFrameMapper([
('processed_text',
TfidfVectorizer(
#stop_words='english',
#lowercase=True,
#ngram_range=(5,5)
strip_accents='unicode',
max_features=10000
)
),
(['length'], StandardScaler()),
('has_digits', LabelBinarizer()),
('has_comma', LabelBinarizer()),
('has_quote', LabelBinarizer()),
('has_upper', LabelBinarizer()),
])
X = mapper.fit_transform(data.copy())
y = data.has_citation
feature_selectors = [
f_classif,
#mutual_info_classif,
]
maxes = {x: [0, None] for x in SCORES}
long_result_dfs = []
long_precision_recall_dfs = []
#print(maxes)
best_feature_max = None
best_feature_selector = None
for max_features in args.max_features_vals:
for feature_selector in feature_selectors:
if max_features != -1:
#print('Features:', max_features)
X_new = SelectKBest(feature_selector, k=max_features).fit_transform(X, y)
if args.run:
maxes_from_experiment, long_result_df, long_precision_recall_df = run_experiment(X_new, y, max_features, feature_selector, args)
long_precision_recall_dfs.append(long_precision_recall_df)
long_result_dfs.append(long_result_df)
for score in SCORES:
if maxes_from_experiment[score][0] > maxes[score][0]:
maxes[score] = maxes_from_experiment[score] + ['{}_{}'.format(max_features, feature_selector.__name__)]
if score == 'roc_auc':
best_feature_max = max_features
best_feature_selector = feature_selector
if args.grid:
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X_new, y, test_size=0.5, random_state=0)
tuned_parameters = [
{
'C': [0.1, 1, 10, 100, 1000]
}
]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(LinearSVC(), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print(clf.best_params_)
print("Grid scores on development set:")
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print("Detailed classification report:")
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print(maxes)
merged_precision_recall_df = long_precision_recall_dfs[0]
for long_df in long_precision_recall_dfs[1:]:
merged_precision_recall_df = pd.concat([merged_precision_recall_df, long_df])
merged_precision_recall_df.to_csv('results/{}/precision_recall_dataframe.csv'.format(args.data_dir))
merged_result_df = long_result_dfs[0]
for long_df in long_result_dfs[1:]:
merged_result_df = | pd.concat([merged_result_df, long_df]) | pandas.concat |
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# DATA COMPARISONS OF NEW hurs AND OLD hur DOWNSCALED DATA
# <NAME> (<EMAIL>) September 2018
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def open_raster( fn, band=1 ):
with rasterio.open( fn ) as rst:
arr = rst.read( band )
return arr
def seasonize( row ):
if row['month'] in (12,1,2):
season = 'DJF'
if row['month'] in (3,4,5):
season = 'MAM'
if row['month'] in (6,7,8):
season = 'JJA'
if row['month'] in (9,10,11):
season = 'SON'
return season
def make_seasonal_average( files, model, scenario, season, output_path ):
arr = np.round( np.mean([ open_raster( fn, band=1 ) for fn in files ], axis=0), 2 )
basename = os.path.basename( files[0] )
month_str = basename.split('_')[-2]
basename = basename.replace('_'+month_str+'_', '_'+season+'_' )
output_filename = os.path.join( output_path, model, scenario, basename )
try:
dirname = os.path.dirname( output_filename )
if not os.path.exists( dirname ):
_ = os.makedirs( dirname )
except:
pass
with rasterio.open( files[0] ) as rst:
meta = rst.meta.copy()
mask = rst.read(1) == -9999
meta.update( compress='lzw' )
arr[ mask ] = -9999
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( arr.astype(np.float32), 1 )
return output_filename
def season_group_ids( df ):
# unpack
groups = [ j for i,j in list(df.groupby(np.arange(0,df.shape[0])//3))]
for count, df in enumerate(groups):
df['season_id'] = count
return | pd.concat(groups) | pandas.concat |
### Notes:
### What does this function return ?
# 1. A dataframe with daywise summary for the spends, leads, appointments and surgeries
# 2. Added columns of CPL, CPA and CPS
# 3. Split of Total leads, appointment and surgeries from Facebook and Google
### What does this function take as input?:
# 1. Nothing. The function automatically reads the required csv files and returns the dataframes mentioned above.
import pandas as pd
import datetime
import FB_Google
import non_mri
import numpy as np
import time
import FB_service_map
import Google_service_map
import Leads_cities_mapping
import Apts_cities_mapping
import Online_cities_mapping
class summary_cities:
def __init__(self):
self.FB_data_raw = FB_service_map.FB_service_map()
self.Google_data_raw = Google_service_map.Google_service_map()
self.leads_data_raw = Leads_cities_mapping.Leads_city_mapping()
self.apts_data_raw = Online_cities_mapping.Online_cities_mapping()
def summary(self, city):
FB_raw_data_raw = self.FB_data_raw
Google_spends_data_raw = self.Google_data_raw
leads_raw = self.leads_data_raw
apts_raw = self.apts_data_raw
FB_raw_data = FB_raw_data_raw[FB_raw_data_raw.City==city]
Google_spends_data = Google_spends_data_raw[Google_spends_data_raw.City==city]
leads_data = leads_raw[leads_raw.City==city]
apts_data = apts_raw[apts_raw.City==city]
#FB_raw_data = FB_state_city_mapping.FB_state_city_mapping()
### Lets define a daterange dataframe to be joined to every dataframe in this notebook
#FB_raw_data = FB_state_city_mapping.FB_state_city_mapping()
### Lets define a daterange dataframe to be joined to every dataframe in this notebook
dates = pd.date_range('2019-01-01', time.strftime("%Y-%m-%d"), freq='D').to_list()
dates_df = pd.DataFrame({"Date":dates})
#### Lets process Facebook Spend here
FB_data_dropped = FB_raw_data[["Reporting ends", "Ad set name", "Results",
"Amount spent (INR)", "Impressions", "Link clicks",
"CTR (link click-through rate)", "City", "Extracted_service", "Service", "Dept"]]
new_columns = ["Date", "FB_Ad_set", "FB_Results",
"Spend", "FB_Impressions", "FB_Link_clicks",
"FB_CTR", "City", "FB_Extracted_service", "Service", "Dept"]
FB_data_dropped.columns = new_columns
FB_data_dropped["FB_spend"] = FB_data_dropped["Spend"]*1.18
### Now lets summarise the FB spend data daywise to be later attached with the leads, appointments and surgery
FB_datewise_spends = FB_data_dropped.groupby(["Dept", "Service", "Date"]).sum().drop(columns = ["Spend"])
FB_daily_spends = FB_datewise_spends.reset_index()
FB_daily_spends.FB_spend = FB_daily_spends.FB_spend.replace(np.nan, 0)
FB_daily_spends["Date"] = pd.to_datetime(FB_daily_spends["Date"], format = "%Y-%m-%d")
FB_spends = pd.merge(dates_df,FB_daily_spends, on = "Date", how = "left")
#FB_spends.head()
### Lets process Google Spend here
Google_spends_data = Google_spends_data[["Day","Campaign", "Cost", "Impressions", "Clicks", "Conversions", "City",
"Extracted_service", "Service", "Dept"]]
Google_spends_data.columns = ["Date","Google_Campaign", "Spend", "Google_Impressions", "Google_Clicks",
"Google_Conversions", "City", "Extracted_service", "Service", "Dept"]
Google_spends_data["Google_spend"] = Google_spends_data["Spend"]*1.18
### Now lets summarise the Google spend data daywise to be later attached with the leads, appointments and surgery
Google_datewise_spends = Google_spends_data.groupby(["Dept", "Service","Date"]).sum().drop(columns = ["Spend"])
Google_daily_spends = Google_datewise_spends.reset_index()
Google_daily_spends.Google_spend = Google_daily_spends.Google_spend.replace(np.nan, 0)
Google_daily_spends["Date"] = pd.to_datetime(Google_daily_spends["Date"])
Google_spends = pd.merge(dates_df,Google_daily_spends, on = "Date", how = "left")
#Google_spends.head()
Spends_data = pd.merge(FB_spends, Google_spends, on = ["Date","Dept","Service"], how = "outer")
### Lets process Leads data here
non_mri_leads = non_mri.MRI_filter(leads_data)
non_mri_leads.City = non_mri_leads.City.replace(np.nan, "No_data")
# Lets read overall leads and split it to daily leads for facebook and google
overall_leads = non_mri_leads.groupby(["Date", "Dept", "Service"]).count()
overall_leads = overall_leads.reset_index()
daywise_leads = overall_leads[["Date", "Dept", "Service", "Lead_id"]]
daywise_leads.columns = ["Date", "Dept", "Service","Total_leads"]
daywise_leads.Date = pd.to_datetime(daywise_leads.Date)
FB_leads_df, Google_leads_df = FB_Google.channels(leads_data)
FB_leads = FB_leads_df[['Date',"Dept","Service",'Lead_id']]
FB_leads.columns = ["Date","Dept","Service","FB_leads"]
FB_daywise_leads = FB_leads.groupby(["Date","Dept","Service"]).count()
FB_daily_leads = FB_daywise_leads.reset_index()
FB_daily_leads.Date = pd.to_datetime(FB_daily_leads.Date, format = "%Y-%m-%d")
Google_leads = Google_leads_df[['Date',"Dept","Service",'Lead_id']]
Google_leads.columns = ["Date","Dept","Service","Google_leads"]
Google_daywise_leads = Google_leads.groupby(["Date","Dept","Service"]).count()
Google_daily_leads = Google_daywise_leads.reset_index()
Google_daily_leads.Date = pd.to_datetime(Google_daily_leads.Date, format = "%Y-%m-%d")
dummy1 = pd.merge(dates_df, FB_daily_leads, on = "Date", how = "left")
dummy2 = pd.merge(dummy1, Google_daily_leads, on = "Date", how = "left")
#daywise_leads.Date = pd.to_datetime(daywise_leads.Date, format = "%Y-%m-%d")
leads_df = pd.merge(FB_daily_leads, Google_daily_leads, on = ["Date", "Dept", "Service"], how = "outer")
#leads_df.head()
### Lets combine Spends and Leads dataframes
Spends_leads_df = pd.merge(Spends_data, leads_df, on = ["Date", "Dept", "Service"], how = "outer")
Spends_leads_data = pd.merge(Spends_leads_df, daywise_leads, on = ["Date", "Dept", "Service"], how = "outer")
apts_data_dropped = apts_data[["Lead_id","f2f_sch_date", "f2f_comp_date","Service","Dept"]]
apts_data_dropped.columns = ["Lead_id","f2f_sch","f2f_comp","Service", "Dept"]
apts = non_mri.MRI_filter(apts_data_dropped)
apts = apts[["Lead_id", "f2f_sch", "f2f_comp","Service","Dept_x"]]
apts.columns = ["Lead_id", "f2f_sch", "f2f_comp","Service","Dept"]
f2f_sch = apts[["Lead_id","f2f_sch", "Service", "Dept"]]
f2f_sch.columns = ["Total_f2f_sch","Date", "Service", "Dept"]
f2f_sch_df = f2f_sch.groupby(["Date", "Dept","Service"]).count()
f2f_sch_df = f2f_sch_df.reset_index()
f2f_sch_df.Date = pd.to_datetime(f2f_sch_df.Date, format = "%Y-%m-%d")
#f2f_sch_daily = pd.merge(dates_df, f2f_sch_df, on = "Date", how = "left")
f2f_sch_daily = f2f_sch_df
# Lets take daily Total f2f completed
f2f_comp = apts[["Lead_id","f2f_comp", "Service", "Dept"]]
f2f_comp.columns = ["Total_f2f_comp","Date", "Service", "Dept"]
f2f_comp_df = f2f_comp.groupby(["Date", "Dept","Service"]).count()
f2f_comp_df = f2f_comp_df.reset_index()
f2f_comp_df = f2f_comp_df[f2f_comp_df.Date>="2019-01-01"]
f2f_comp_df = f2f_comp_df.reset_index()
f2f_comp_df = f2f_comp_df[["Date","Dept", "Service","Total_f2f_comp"]]
f2f_comp_df = f2f_comp_df[f2f_comp_df.Date!="Service NA"]
f2f_comp_df.Date = pd.to_datetime(f2f_comp_df.Date, format = "%Y-%m-%d")
#f2f_comp_df = f2f_comp_df.replace(r"\\N",0,regex = True)
f2f_comp_df.Date = pd.to_datetime(f2f_comp_df.Date)
#f2f_comp_daily = pd.merge(dates_df, f2f_comp_df, on = "Date", how = "left")
f2f_comp_daily = f2f_comp_df
# Combining Total daily f2f_sch and f2f_comp
Apts_df = | pd.merge(f2f_sch_daily, f2f_comp_daily, on = ["Date", "Dept", "Service"], how = "outer") | pandas.merge |
from keras.models import load_model
from random import seed
from random import randint
import numpy as np
import pandas as pd
import sys
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
def position_3D_approximation(result, strategy):
# result => predicted
#yclone = np.copy(y)
#dfyclone = pd.DataFrame.from_records(yclone)
global dfyclone
global yclone
#print(result)
#print(result.shape)
#print(result.shape[0], 3)
#result.reshape((result.shape[0], 3))
#a = result.reshape(result.shape[0], 3)
#print(a.shape)
if (Neural_Network_Model == 3): #LSTM without channel
result = result.reshape(result.shape[0], 3)
#print(result.shape)
#b = result.values.flatten()
#n_patterns=len(result)
#print(n_patterns)
#result = np.reshape(b,(n_patterns,3))
#print(result)
df3d = pd.DataFrame({'X':result[:,0],'Y':result[:,1],'Z':result[:,2]})
#print(df3d)
df3d['X-pred'] = 0
df3d['Y-pred'] = 0
df3d['Z-pred'] = 0
df3d['ch0'] = 0
df3d['ch1'] = 0
df3d['w'] = 0
#dfyclone = pd.DataFrame.from_records(yclone)
for index, row in df3d.iterrows():
#obtain the row with least geometric distance between predicted row and original rows (in yclone)
Xpred=df3d.loc[index, 'X']
Ypred=df3d.loc[index, 'Y']
Zpred=df3d.loc[index, 'Z']
if (strategy==0):
dfyclone['geodist'] = ( ((dfyclone[0] - Xpred) **2) + ((dfyclone[1] - Ypred) **2) + ((dfyclone[2] - Zpred) **2) )
row=dfyclone.loc[dfyclone['geodist'].idxmin()]
lowerGeodistIndex=row.name
else:
lowerGeodistIndex=randint(0,dfyclone.shape[0]-1)
X=yclone[lowerGeodistIndex,0]
Y=yclone[lowerGeodistIndex,1]
Z=yclone[lowerGeodistIndex,2]
ch0=yclone[lowerGeodistIndex,3]
ch1=yclone[lowerGeodistIndex,4]
df3d.loc[index, 'X-pred'] = X
df3d.loc[index, 'Y-pred'] = Y
df3d.loc[index, 'Z-pred'] = Z
df3d.loc[index, 'ch0'] = ch0
df3d.loc[index, 'ch1'] = ch1
#remove the hit used as approximation
dfyclone.drop(dfyclone.index[lowerGeodistIndex])
#print(df3d)
df3d.drop('X', axis=1, inplace=True)
df3d.drop('Y', axis=1, inplace=True)
df3d.drop('Z', axis=1, inplace=True)
#print(df3d)
#return the fourth hit of all tracks
return(df3d)
seed(1)
# configure gpu_options.allow_growth = True in order to CuDNNLSTM layer work on RTX
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
#Read input file with first 3 hits to dataframe
tracks_to_be_reconstructed_file = sys.argv[1]
all_hits_available_file = sys.argv[2]
trained_model_file = sys.argv[3]
reconstructed_tracks_file = sys.argv[4]
#strategyFlag=0 -> LSTM
#strategyFlag=1 -> Random
strategyFlag= int(sys.argv[5])
Neural_Network_Model= int(sys.argv[6])
df = pd.read_csv(tracks_to_be_reconstructed_file)
#create dataset ofa ll hists in dfyclone
df_all_hits = pd.read_csv(all_hits_available_file)
y=df_all_hits.iloc[:, [ 25,26,27,28,29 ]]
yclone = np.copy(y)
dfyclone = pd.DataFrame.from_records(yclone)
#all tracks have at maximum 29 hits
hits = np.arange(0,29,1)
firstHit=1
lastHit=4
df1 = df.iloc[:,1:25]
for firstHit in range(1, 26):
#for firstHit in range(1, 2):
lastHit = lastHit + 1
#begin with 3 know hits
known_hits= np.arange(firstHit,lastHit,1)
#next hit to predict
hit_to_predict=known_hits[3]
dataX2=df1.iloc[:, [ (hits[known_hits[0]]*6)+1,(hits[known_hits[0]]*6)+2,(hits[known_hits[0]]*6)+3 , (hits[known_hits[1]]*6)+1,(hits[known_hits[1]]*6)+2,(hits[known_hits[1]]*6)+3 , (hits[known_hits[2]]*6)+1,(hits[known_hits[2]]*6)+2,(hits[known_hits[2]]*6)+3 ]]
dataXfeatures=df1.iloc[:, [ (hits[known_hits[0]]*6)+4,(hits[known_hits[0]]*6)+5 , (hits[known_hits[1]]*6)+4 , (hits[known_hits[1]]*6)+5 , (hits[known_hits[2]]*6)+4 , (hits[known_hits[2]]*6)+5 ]]#dataXfeatures=df.iloc[:, [ hits[known_hits[0]]+4,hits[known_hits[0]]+5,hits[known_hits[0]]+6 , hits[known_hits[1]]+4,hits[known_hits[1]]+5,hits[known_hits[1]]+6 , hits[known_hits[2]]+4,hits[known_hits[2]]+5,hits[known_hits[2]]+6 ]]
#prepare data to inference
b = dataX2.values.flatten()
bfeat=dataXfeatures.values.flatten()
n_patterns=len(df)
X = np.reshape(b,(n_patterns,3,3))
Xfeat = np.reshape(bfeat,(n_patterns,3,2))
#perform the prediction
model = load_model(trained_model_file)
if (Neural_Network_Model == 1): #LSTM with channel as features
result = model.predict([X, Xfeat],verbose=1)
if (Neural_Network_Model == 2): #MLP
result = model.predict(dataX2,verbose=1)
if (Neural_Network_Model == 3): #LSTM without channel
result = model.predict(X,verbose=1)
pred = position_3D_approximation(result, strategyFlag)
#concat tracks with predicted positions
#print("df1")
#print(df1)
df1= | pd.concat([df1,pred],axis=1) | pandas.concat |
import os
import time
import shutil
import sys
sys.path.append('C:/prismx/')
import h5py as h5
import pandas as pd
import numpy as np
import random
import show_h5 as ph5
import seaborn as sns
import matplotlib.patches as mpatches
from scipy import stats
import matplotlib.pyplot as plt
import prismx as px
f100 = pd.read_csv("logs/validationscore100.tsv", sep="\t")
pgene = stats.ttest_ind(f100["global_gene"], f100["prismx_gene"])[1]
sns.set(font_scale = 2)
f, ax = plt.subplots(figsize=(6, 6), frameon=True)
ax.grid(True)
ax.set_facecolor("white")
ax.spines['bottom'].set_color('0')
ax.spines['top'].set_color('0')
ax.spines['right'].set_color('0')
ax.spines['left'].set_color('0')
plt.tight_layout()
ax.scatter(f100["global_gene"], f100["prismx_gene"])
ax.plot([0, 1], [0, 1], transform=ax.transAxes, color='r', ls='--')
ax.set_xlabel("global average AUC", fontsize=20)
ax.set_ylabel("PrismEXP average AUC", fontsize=20)
plt.savefig("figures/validation_gene.pdf")
plt.close()
plt.rcParams["axes.labelsize"] = 14
sns.set(font_scale = 3)
plt.tight_layout()
dd = pd.DataFrame({"global":f100["global_gene"], "PrismEXP":f100["prismx_gene"]})
ax = sns.violinplot(data=dd)
ax.set_ylabel("average AUC", fontsize=35)
plt.ylim(0.3, 1.2)
plt.plot([0, 1],[1.07, 1.07], 'k-', lw=2)
plt.text(-0.07, 1.1, "p-value: "+"{:.2e}".format(float(pgene)), fontsize=28)
plt.savefig("figures/validation_gene_violin.pdf",bbox_inches='tight')
plt.close()
pset = stats.ttest_ind(f100["global_set"], f100["prismx_set"])[1]
f, ax = plt.subplots(figsize=(6, 6))
ax.grid(True)
ax.set_facecolor("white")
ax.spines['bottom'].set_color('0')
ax.spines['top'].set_color('0')
ax.spines['right'].set_color('0')
ax.spines['left'].set_color('0')
plt.tight_layout()
ax.scatter(f100["global_set"], f100["prismx_set"])
ax.plot([0, 1], [0, 1], transform=ax.transAxes, color='r', ls='--')
ax.set_xlabel("global average AUC", fontsize=20)
ax.set_ylabel("PrismEXP average AUC", fontsize=20)
plt.savefig("figures/validation_set.pdf")
plt.close()
plt.rcParams["axes.labelsize"] = 14
sns.set(font_scale = 3)
plt.tight_layout()
dd = pd.DataFrame({"global":f100["global_set"], "PrismEXP":f100["prismx_set"]})
ax = sns.violinplot(data=dd)
ax.set_ylabel("average AUC", fontsize=35)
plt.ylim(0.3, 1.2)
plt.plot([0, 1],[1.07, 1.07], 'k-', lw=2)
plt.text(-0.07, 1.1, "p-value: "+"{:.2e}".format(float(pset)), fontsize=28)
plt.savefig("figures/validation_set_violin.pdf",bbox_inches='tight')
plt.close()
f50 = | pd.read_csv("logs/validationscore50.tsv", sep="\t") | pandas.read_csv |
import pandas as pd
df_raw = | pd.read_csv("/data/wifi-analysis/competition/train.csv") | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas import CategoricalIndex, Index
import pandas._testing as tm
class TestMap:
@pytest.mark.parametrize(
"data, categories",
[
(list("abcbca"), list("cab")),
(pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),
],
ids=["string", "interval"],
)
def test_map_str(self, data, categories, ordered):
# GH 31202 - override base class since we want to maintain categorical/ordered
index = CategoricalIndex(data, categories=categories, ordered=ordered)
result = index.map(str)
expected = CategoricalIndex(
map(str, data), categories=map(str, categories), ordered=ordered
)
tm.assert_index_equal(result, expected)
def test_map(self):
ci = pd.CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True)
tm.assert_index_equal(result, exp)
ci = pd.CategoricalIndex(
list("ABABC"), categories=list("BAC"), ordered=False, name="XXX"
)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(
list("ababc"), categories=list("bac"), ordered=False, name="XXX"
)
tm.assert_index_equal(result, exp)
# GH 12766: Return an index not an array
tm.assert_index_equal(
ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX")
)
# change categories dtype
ci = pd.CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False)
def f(x):
return {"A": 10, "B": 20, "C": 30}.get(x)
result = ci.map(f)
exp = pd.CategoricalIndex(
[10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False
)
tm.assert_index_equal(result, exp)
result = ci.map(pd.Series([10, 20, 30], index=["A", "B", "C"]))
tm.assert_index_equal(result, exp)
result = ci.map({"A": 10, "B": 20, "C": 30})
tm.assert_index_equal(result, exp)
def test_map_with_categorical_series(self):
# GH 12756
a = pd.Index([1, 2, 3, 4])
b = pd.Series(["even", "odd", "even", "odd"], dtype="category")
c = pd.Series(["even", "odd", "even", "odd"])
exp = CategoricalIndex(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(b), exp)
exp = pd.Index(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(c), exp)
@pytest.mark.parametrize(
("data", "f"),
(
([1, 1, np.nan], pd.isna),
([1, 2, np.nan], pd.isna),
([1, 1, np.nan], {1: False}),
([1, 2, np.nan], {1: False, 2: False}),
([1, 1, np.nan], | pd.Series([False, False]) | pandas.Series |
from collections import OrderedDict
import contextlib
from datetime import datetime, time
from functools import partial
import os
from urllib.error import URLError
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.util.testing as tm
@contextlib.contextmanager
def ignore_xlrd_time_clock_warning():
"""
Context manager to ignore warnings raised by the xlrd library,
regarding the deprecation of `time.clock` in Python 3.7.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
action="ignore",
message="time.clock has been deprecated",
category=DeprecationWarning,
)
yield
read_ext_params = [".xls", ".xlsx", ".xlsm", ".ods"]
engine_params = [
# Add any engines to test here
# When defusedxml is installed it triggers deprecation warnings for
# xlrd and openpyxl, so catch those here
pytest.param(
"xlrd",
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param(
"openpyxl",
marks=[
td.skip_if_no("openpyxl"),
pytest.mark.filterwarnings("ignore:.*html argument"),
],
),
pytest.param(
None,
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param("odf", marks=td.skip_if_no("odf")),
]
def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool:
"""
Filter out invalid (engine, ext) pairs instead of skipping, as that
produces 500+ pytest.skips.
"""
engine = engine.values[0]
if engine == "openpyxl" and read_ext == ".xls":
return False
if engine == "odf" and read_ext != ".ods":
return False
if read_ext == ".ods" and engine != "odf":
return False
return True
def _transfer_marks(engine, read_ext):
"""
engine gives us a pytest.param objec with some marks, read_ext is just
a string. We need to generate a new pytest.param inheriting the marks.
"""
values = engine.values + (read_ext,)
new_param = pytest.param(values, marks=engine.marks)
return new_param
@pytest.fixture(
autouse=True,
params=[
_transfer_marks(eng, ext)
for eng in engine_params
for ext in read_ext_params
if _is_valid_engine_ext_pair(eng, ext)
],
)
def engine_and_read_ext(request):
"""
Fixture for Excel reader engine and read_ext, only including valid pairs.
"""
return request.param
@pytest.fixture
def engine(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return engine
@pytest.fixture
def read_ext(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return read_ext
class TestReaders:
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for read_excel calls.
"""
func = partial(pd.read_excel, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "read_excel", func)
def test_usecols_int(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["A", "B", "C"])
# usecols as int
msg = "Passing an integer for `usecols`"
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols=3)
# usecols as int
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=3
)
def test_usecols_list(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["B", "C"])
df1 = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=[0, 2, 3]
)
df2 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=[0, 2, 3]
)
# TODO add index to xls file)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_usecols_str(self, read_ext, df_ref):
df1 = df_ref.reindex(columns=["A", "B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A:D"
)
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C,D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C,D"
)
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C:D"
)
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@pytest.mark.parametrize(
"usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
)
def test_usecols_diff_positional_int_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["A", "C"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=usecols
)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]])
def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["B", "D"]]
expected.index = range(len(expected))
result = pd.read_excel("test1" + read_ext, "Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, read_ext, df_ref):
expected = df_ref
result = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, read_ext, df_ref):
expected = df_ref[["C", "D"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols="A,D:E"
)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str_invalid(self, read_ext):
msg = "Invalid column name: E1"
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, "Sheet1", usecols="D:E1")
def test_index_col_label_error(self, read_ext):
msg = "list indices must be integers.*, not str"
with pytest.raises(TypeError, match=msg):
pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=["A"], usecols=["A", "C"]
)
def test_index_col_empty(self, read_ext):
# see gh-9208
result = pd.read_excel("test1" + read_ext, "Sheet3", index_col=["A", "B", "C"])
expected = DataFrame(
columns=["D", "E", "F"],
index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [None, 2])
def test_index_col_with_unnamed(self, read_ext, index_col):
# see gh-18792
result = pd.read_excel("test1" + read_ext, "Sheet4", index_col=index_col)
expected = DataFrame(
[["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"]
)
if index_col:
expected = expected.set_index(expected.columns[index_col])
tm.assert_frame_equal(result, expected)
def test_usecols_pass_non_existent_column(self, read_ext):
msg = (
"Usecols do not match columns, "
"columns expected but not found: " + r"\['E'\]"
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E"])
def test_usecols_wrong_type(self, read_ext):
msg = (
"'usecols' must either be list-like of "
"all strings, all unicode, all integers or a callable."
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E1", 0])
def test_excel_stop_iterator(self, read_ext):
parsed = pd.read_excel("test2" + read_ext, "Sheet1")
expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, read_ext):
parsed = pd.read_excel("test3" + read_ext, "Sheet1")
expected = DataFrame([[np.nan]], columns=["Test"])
tm.assert_frame_equal(parsed, expected)
def test_excel_table(self, read_ext, df_ref):
df1 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
df2 = pd.read_excel("test1" + read_ext, "Sheet2", skiprows=[1], index_col=0)
# TODO add index to file
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
df3 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, read_ext):
expected = DataFrame.from_dict(
OrderedDict(
[
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
(
"DateCol",
[
datetime(2013, 10, 30),
datetime(2013, 10, 31),
datetime(1905, 1, 1),
datetime(2013, 12, 14),
datetime(2015, 3, 14),
],
),
]
)
)
basename = "test_types"
# should read in correctly and infer types
actual = pd.read_excel(basename + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
actual = pd.read_excel(basename + read_ext, "Sheet1", convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = pd.read_excel(basename + read_ext, "Sheet1", index_col=icol)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext, "Sheet1", converters={"StrCol": str}
)
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext,
"Sheet1",
convert_float=False,
converters={"StrCol": str},
)
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self, read_ext):
basename = "test_converters"
expected = DataFrame.from_dict(
OrderedDict(
[
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ["Found", "Found", "Found", "Not found", "Found"]),
("StrCol", ["1", np.nan, "3", "4", "5"]),
]
)
)
converters = {
"IntCol": lambda x: int(x) if x != "" else -1000,
"FloatCol": lambda x: 10 * x if x else np.nan,
2: lambda x: "Found" if x != "" else "Not found",
3: lambda x: str(x) if x else "",
}
# should read in correctly and set types of single cells (not array
# dtypes)
actual = pd.read_excel(basename + read_ext, "Sheet1", converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reader_dtype(self, read_ext):
# GH 8212
basename = "testdtype"
actual = pd.read_excel(basename + read_ext)
expected = DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
).reindex(columns=["a", "b", "c", "d"])
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
basename + read_ext, dtype={"a": "float64", "b": "float32", "c": str}
)
expected["a"] = expected["a"].astype("float64")
expected["b"] = expected["b"].astype("float32")
expected["c"] = ["001", "002", "003", "004"]
tm.assert_frame_equal(actual, expected)
with pytest.raises(ValueError):
pd.read_excel(basename + read_ext, dtype={"d": "int64"})
@pytest.mark.parametrize(
"dtype,expected",
[
(
None,
DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
),
),
(
{"a": "float64", "b": "float32", "c": str, "d": str},
DataFrame(
{
"a": Series([1, 2, 3, 4], dtype="float64"),
"b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
"c": ["001", "002", "003", "004"],
"d": ["1", "2", np.nan, "4"],
}
),
),
],
)
def test_reader_dtype_str(self, read_ext, dtype, expected):
# see gh-20377
basename = "testdtype"
actual = pd.read_excel(basename + read_ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
basename = "test_multisheet"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
# ensure this is not alphabetical to test order preservation
expected_keys = ["Charlie", "Alpha", "Beta"]
tm.assert_contains_all(expected_keys, dfs.keys())
# Issue 9930
# Ensure sheet order is preserved
assert expected_keys == list(dfs.keys())
def test_reading_multiple_specific_sheets(self, read_ext):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
basename = "test_multisheet"
# Explicitly request duplicates. Only the set should be returned.
expected_keys = [2, "Charlie", "Charlie"]
dfs = pd.read_excel(basename + read_ext, sheet_name=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_reading_all_sheets_with_blank(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# In the case where some sheets are blank.
# Issue #11711
basename = "blank_with_header"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
expected_keys = ["Sheet1", "Sheet2", "Sheet3"]
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self, read_ext):
actual = pd.read_excel("blank" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, DataFrame())
def test_read_excel_blank_with_header(self, read_ext):
expected = DataFrame(columns=["col_1", "col_2"])
actual = pd.read_excel("blank_with_header" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
def test_date_conversion_overflow(self, read_ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
expected = pd.DataFrame(
[
[pd.Timestamp("2016-03-12"), "<NAME>"],
[pd.Timestamp("2016-03-16"), "<NAME>"],
[1e20, "<NAME>"],
],
columns=["DateColWithBigInt", "StringCol"],
)
if pd.read_excel.keywords["engine"] == "openpyxl":
pytest.xfail("Maybe not supported by openpyxl")
result = pd.read_excel("testdateoverflow" + read_ext)
tm.assert_frame_equal(result, expected)
def test_sheet_name(self, read_ext, df_ref):
filename = "test1"
sheet_name = "Sheet1"
df1 = pd.read_excel(
filename + read_ext, sheet_name=sheet_name, index_col=0
) # doc
with ignore_xlrd_time_clock_warning():
df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_excel_read_buffer(self, read_ext):
pth = "test1" + read_ext
expected = pd.read_excel(pth, "Sheet1", index_col=0)
with open(pth, "rb") as f:
actual = pd.read_excel(f, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
def test_bad_engine_raises(self, read_ext):
bad_engine = "foo"
with pytest.raises(ValueError, match="Unknown engine: foo"):
pd.read_excel("", engine=bad_engine)
@tm.network
def test_read_from_http_url(self, read_ext):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/excel/test1" + read_ext
)
url_table = pd.read_excel(url)
local_table = pd.read_excel("test1" + read_ext)
| tm.assert_frame_equal(url_table, local_table) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import numpy as np
from tensorflow import keras
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
# instance of the neural network to predit future prices
class Neural_Network:
def neural_network(self, n_df):
df = n_df.copy()
df = df.replace('^\s*$', np.nan, regex=True)
#df['itemId'] = df['itemId'].astype(int)
df['listingType'] = pd.get_dummies(df['listingType'])
df['endPrice'] = df['endPrice'].astype(np.float)
df['shippingServiceCost'] = df['shippingServiceCost'].astype(np.float)
#df['shippingServiceCost'] = df['shippingServiceCost'].interpolate()
df['shippingServiceCost'] = df['shippingServiceCost'].fillna(df['shippingServiceCost'].mean())
df['bidCount'] = df['bidCount'].astype(np.float)
#df['bidCount'] = df['bidCount'].interpolate()
df['bidCount'] = df['bidCount'].fillna(df['bidCount'].mean())
df['watchCount'] = df['watchCount'].astype(np.float)
#df['watchCount'] = df['watchCount'].interpolate()
df['watchCount'] = df['watchCount'].fillna(df['watchCount'].mean())
df['returnsAccepted'] = pd.get_dummies(df['returnsAccepted'])
df['handlingTime'] = df['handlingTime'].astype(np.float)
df['sellerUserName'] = pd.get_dummies(df['sellerUserName'])
df['feedbackScore'] = df['feedbackScore'].astype(np.float)
df['positiveFeedbackPercent'] = df['positiveFeedbackPercent'].astype(np.float)
df['topRatedSeller'] = pd.get_dummies(df['topRatedSeller'])
df['endDate'] = pd.get_dummies(df['endDate'])
#print('\nnull values in dataframe are:\n', df.isnull().any())
features_df = df.drop(['itemId','title','endPrice','location','endTime','startTime','endTimeOfDay'], axis=1)
corr = features_df.corr()
#print('\ncorr:\n', corr)
num_of_cols = len(features_df.columns)
#print('\nnumber of feature columns:\n', num_of_cols)
features = features_df.values
target = df.endPrice.values
#print('\ntarget values:\n', target)
#print('\nfeatures values:\n', features)
#print('\ntarget shape:\n', target.shape)
#print('\nfeatures shape:\n', features.shape)
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.3, random_state=124)
#print('\nTRAIN TEST SPLIT EXECUTED\n')
X_train = MinMaxScaler(feature_range=(-1,1)).fit_transform(X_train)
X_test = MinMaxScaler(feature_range=(-1,1)).fit_transform(X_test)
#print('\nX_train and X_test scaled\n')
y_train = y_train.reshape(-1,1)
y_test = y_test.reshape(-1,1)
y_train = MinMaxScaler(feature_range=(-1,1)).fit_transform(y_train)
y_test = MinMaxScaler(feature_range=(-1,1)).fit_transform(y_test)
y_train = y_train.reshape(-1)
y_test = y_test.reshape(-1)
#print('\nshape of X_train:\n', X_train.shape)
#print('\nshape of X_test:\n', X_test.shape)
#print('\nshape of y_train:\n', y_train.shape)
#print('\nshape of y_test:\n', y_test.shape)
model = keras.Sequential()
'''
input_layer = keras.layers.Dense(16, input_dim=num_of_cols, activation='sigmoid')
model.add(input_layer)
hidden_layer = keras.layers.Dense(num_of_cols, input_dim=16, activation='sigmoid')
model.add(hidden_layer)
output_layer = keras.layers.Dense(1, input_dim=num_of_cols, activation='softmax')
model.add(output_layer)
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_split=0.2, batch_size=32, epochs=100, shuffle=True)
'''
input_layer = keras.layers.Dense(units=16, kernel_initializer='uniform', input_dim=num_of_cols, activation='relu')
model.add(input_layer)
hidden_layer1 = keras.layers.Dense(units=18, kernel_initializer='uniform', activation='relu')
model.add(hidden_layer1)
model.add(keras.layers.Dropout(rate=0.25))
hidden_layer2 = keras.layers.Dense(20, kernel_initializer='uniform', activation='relu')
model.add(hidden_layer2)
hidden_layer3 = keras.layers.Dense(24, kernel_initializer='uniform', activation='relu')
model.add(hidden_layer3)
output_layer = keras.layers.Dense(1, kernel_initializer='uniform', activation='sigmoid')
model.add(output_layer)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_split=0.2, batch_size=15, epochs=10, shuffle=2)
predictions = model.predict(X_test, verbose=1, steps=1)
#print('\npredictions shape:\n', predictions.shape)
#print('\npredictions values:\n', predictions)
pred_nn_df = pd.DataFrame({'predictions':pd.Series(predictions.reshape(-1)),'actual_sell_prices': | pd.Series(y_test) | pandas.Series |
#!/usr/bin/env python3
"""A module for analyzing answer agreement.
The experiment is this:
* Each member of a group records the answers from a respondent.
* All members of all groups submit their surveys to form a dataset.
This dataset is analyzed with this module.
A dataset is all submissions for a survey. A column in the dataset
should identify which submitted surveys belong to which group. The
simplest way to do this is to have a `group_id` column.
Standard use cases for analysis are take as input a dataset and
possibly a start and end column. In that case, the following analyzes:
>>> DatasetAgreement.from_file(df_path, group_column, mask_first=first,
... mask_last=last)
Another standard use case is to analyze results from an ODK dataset.
Here, an XlsForm and a dataset are used as inputs, and possibly a start
and end column. The XlsForm helps to make a mask of meaningful columns
for comparison. Use the following:
>>> DatasetAgreement.from_csv_and_odk(csv_path, odk_path, group_column,
... mask_first=first, mask_last=last)
Logger:
Has the name of 'aa.aa'
Dataset columns:
C_CORRECT_ANSWER = 'correct_answer'
C_ALL_MISSING = 'all_missing'
C_GROUP_ALL_CORRECT = 'group_all_correct'
C_CORRECT_ANSWER_COUNT = 'correct_answer_count'
C_PERCENT_CORRECT = 'percent_correct'
Module attributes:
SKIPPED_ODK_TYPES: A sequence of blacklisted ODK types that are not
used for comparison within groups.
SKIPPED_ODK_TYPES_START: A sequence of strings that blacklist all
ODK types that begin with one of them.
This module's mascot is aa, the Hawaiian name for a frothy lava flow.
"""
from collections import namedtuple
from typing import Iterable, List
import pandas as pd
import xlrd
C_CORRECT_ANSWER = 'correct_answer'
C_ALL_MISSING = 'all_missing'
C_GROUP_ALL_CORRECT = 'group_all_correct'
C_CORRECT_ANSWER_COUNT = 'correct_answer_count'
C_PERCENT_CORRECT = 'percent_correct'
class GroupAgreement:
"""Class to hold analysis information for a group within a dataset.
All results are attributes of the object.
Instance attributes:
dataframe: The full dataframe for this group
group_id: The group id for this group
column_mask: A list of column labels for which columns to keep
and do analysis on. Default of None means to use all
columns.
group_size: The number of group members in this group.
agreement_results: A dataframe with the column_mask as index and
various calculated values of interest as columns.
total_agreement: The proportion of masked questions with
agreement.
comparisons: The number of columns of comparison
Properties:
masked_dataframe: Group dataframe with the column mask applied.
disagree_dataframe: Dataframe with only the columns where there
is disagreement.
"""
def __init__(self, dataframe: pd.DataFrame, group_id,
column_mask: list = None):
"""Initialize a GroupAgreement and do all relevant analysis.
Args:
dataframe: The full dataframe for this group.
group_id: The group id for this group. Could be any value
stored in a dataframe.
column_mask: A list of column labels for which columns to
keep and do analysis on. Default of None means to use
all columns.
"""
self.dataframe = dataframe
self.group_id = group_id
self.column_mask = column_mask
self.group_size = len(self.dataframe)
masked = self.masked_dataframe
self.agreement_results = self.generate_agreement_measures(masked)
completed_mask = ~self.agreement_results[C_ALL_MISSING]
self.total_agreement = self.agreement_results.loc[
completed_mask, C_GROUP_ALL_CORRECT
].mean()
self.comparisons = completed_mask.sum()
@staticmethod
def generate_agreement_measures(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Apply analysis function to columns, combine results, return.
The input dataframe is a standard dataset with measurement
variables as column headers and records as individual surveys.
This function creates a dataframe where the measurement
variables form the index and the new column headers are the
various computed quantities.
Args:
dataframe: The full dataframe for this group.
Returns:
A dataframe with the computed quantities of interest.
"""
result = dataframe.T.apply(GroupAgreement.analyze_answer_votes, axis=1)
return result
@staticmethod
def analyze_answer_votes(series: pd.Series) -> pd.Series:
"""Analyze a column of the dataset for agreement.
This function is the workhorse of the analysis. It calculates
all relevant quantities and saves them as a common series.
This function calculates:
* What the "correct" answer is based on the number of votes
* Whether all values are missing
* Whether each group member answered the same way
* The number of group members answering the "correct" answer
* The percentage of group members answering the "correct"
answer
Args:
series: The column of the dataset to analyze.
Returns:
A series with calculated values and identifying names.
"""
correct_answer = None
all_missing = True
group_all_correct = False
correct_answer_count = 0
group_size = len(series)
counts = series.value_counts()
if counts.empty:
# All answers are NA, do nothing
pass
elif len(counts) == 1:
# All are non-NA (at least one) are one answer
correct_answer = counts.index[0]
all_missing = False
group_all_correct = counts.iloc[0] == group_size
correct_answer_count = counts.iloc[0]
elif counts.iloc[0] == counts.iloc[1]:
# Top two answers have same vote count
all_missing = False
else:
# One answer has more than anything else
correct_answer = counts.index[0]
all_missing = False
correct_answer_count = counts.iloc[0]
percent_correct = correct_answer_count / group_size
new_series = pd.Series((
correct_answer,
all_missing,
group_all_correct,
correct_answer_count,
percent_correct
), index=(
C_CORRECT_ANSWER,
C_ALL_MISSING,
C_GROUP_ALL_CORRECT,
C_CORRECT_ANSWER_COUNT,
C_PERCENT_CORRECT,
))
return new_series
@property
def masked_dataframe(self) -> pd.DataFrame:
"""Return the masked dataframe.
Combines the full dataframe and the mask to create a masked
dataframe.
"""
masked_dataframe = self.dataframe
if self.column_mask is not None:
masked_dataframe = self.dataframe[self.column_mask]
return masked_dataframe
@property
def disagree_dataframe(self) -> pd.DataFrame:
"""Return the dataframe of questions with disagreement.
Returns:
The subset of rows of the masked dataframe where there is
disagreement and at least one answer.
"""
completed_mask = ~self.agreement_results[C_ALL_MISSING]
some_disagreement = ~self.agreement_results[C_GROUP_ALL_CORRECT]
masked = self.masked_dataframe
return masked.loc[:, completed_mask & some_disagreement]
def print_summary(self):
"""Print a summary of results."""
print(f'*** Summary for group {repr(self.group_id)}')
print(f'- Points of comparison: {self.comparisons}')
print(f'- Total agreement: {self.total_agreement}')
def __repr__(self):
"""Get a representation of this object."""
return f'<GroupAgreement {repr(self.group_id)}>'
class DatasetAgreement:
"""Class to hold answer agreement information for a whole dataset.
Access to the individual GroupAgreement objects is meant through
the `group` method.
Instance attributes:
dataframe: The full dataframe for the full dataset (has all
groups).
group_column: The column label for the column that defines
which rows are in which groups. Default of None means the
entire dataset is from the same group.
column_mask: A list of column labels for which columns to keep
and do analysis on. Default of None means to use all
columns.
unaccounted: A dataframe containing the rows that do not fit
into any of the GroupAgreement objects. These are the rows
that are null in a specified group_column.
_data: A list of GroupAgreement objects, one for each group.
Properties:
group_ids: A list of the IDs in this dataset.
"""
def __init__(self, dataframe: pd.DataFrame, group_column: str = None,
column_mask: list = None):
"""Analyze the groups of a dataframe.
For each group in the dataset, this __init__ creates a
GroupAgreement object and saves it.
This __init__ uses a Pandas dataframe as input.
The unaccounted attribute is populated only when a group_column
is specified.
Args:
dataframe: The full dataframe for the full dataset (has
all groups).
group_column: The column label for the column that defines
which rows are in which groups. Default of None means
the entire dataset is from the same group.
column_mask: A list of column labels for which columns to
keep and do analysis on. Default of None means to use
all columns.
"""
self.dataframe = dataframe
self.group_column = group_column
self.column_mask = column_mask
self.unaccounted = None
self._data = []
if self.group_column is None:
this_group = GroupAgreement(self.dataframe, None, self.column_mask)
self._data.append(this_group)
else:
grouped = self.dataframe.groupby(group_column)
for key, group in grouped:
this_group = GroupAgreement(group, key, self.column_mask)
self._data.append(this_group)
unaccounted_bool = self.dataframe[group_column].isnull()
self.unaccounted = self.dataframe[unaccounted_bool]
# pylint: disable=too-many-arguments
@classmethod
def from_file(cls, df_path: str, group_column: str = None,
column_names: List[str] = None, mask_first: str = None,
mask_last: str = None):
"""Create a DatasetAgreement object from file.
This static method allows to initialize a DatasetAgreement
object with more flexibility than the __init__ by accepting a
path to the dataset and additional parameters to get the column
masking correct.
The resulting mask from inputs `column_mask`, `mask_first`, and
`mask_last` is the set of columns that follow all conditions.
Args:
df_path: Path to the dataset. Currently only .csv and
.xls(x) files are supported
group_column: The column label for the column that defines
which rows are in which groups. Default of None means
the entire dataset is from the same group.
column_names: A list of column labels for which columns to
keep and do analysis on. Default of None means to use
all columns.
mask_first: The first possible column label to keep in the
mask. This does not have to be in the column_mask.
Default of None means start with the first column.
mask_last: The last possible column label to keep in the
mask. This does not have to be in the column_mask.
Default of None means use through the last column.
Returns:
A properly initialized DatasetAgreement class.
"""
if df_path.endswith('.csv'):
dataframe = pd.read_csv(df_path)
elif df_path.endswith(('.xls', '.xlsx')):
dataframe = pd.read_excel(df_path)
else:
msg = (f'Unable to create dataset from "{df_path}". Known '
f'extensions are .csv, .xls, and .xlsx')
raise TypeError(msg)
mask = create_mask(dataframe, column_names, mask_first, mask_last)
return cls(dataframe, group_column, mask)
# pylint: disable=too-many-arguments
@classmethod
def from_file_and_odk(cls, df_path: str, odk_path: str,
group_column: str = None, mask_first: str = None,
mask_last: str = None, odk_sep: str = ':'):
"""Create a DatasetAgreement object for ODK data.
This static method allows to initialize a DatasetAgreement
object by using information from an ODK file to produce a
column mask.
The resulting mask from inputs `column_mask`, `mask_first`, and
`mask_last` is the set of columns that follow all conditions.
Args:
df_path: Path to the dataset. Currently only .csv and
.xls(x) files are supported
odk_path: The path to the XlsForm associated with csv_path
group_column: The column label for the column that defines
which rows are in which groups. Default of None means
the entire dataset is from the same group.
mask_first: The first possible column label to keep in the
mask. This does not have to be in the column_mask.
Default of None means start with the first column.
mask_last: The last possible column label to keep in the
mask. This does not have to be in the column_mask.
Default of None means use through the last column.
odk_sep: The group prefix separator. ODK CSV files use
either '-' or ':', depending on if ODK Briefcase or ODK
Aggregate, respectively, creates the file.
Returns:
A properly initialized DatasetAgreement class.
"""
if df_path.endswith('.csv'):
dataframe = | pd.read_csv(df_path) | pandas.read_csv |
import logging
import numpy as np
import pandas as pd
import re
from os import PathLike
from pathlib import Path
from scipy.ndimage import maximum_filter
from typing import (
Generator,
List,
Optional,
Sequence,
Tuple,
Union,
)
from steinbock import io
try:
from readimc import MCDFile, TXTFile
from readimc.data import Acquisition, AcquisitionBase
imc_available = True
except:
imc_available = False
_logger = logging.getLogger(__name__)
def list_mcd_files(mcd_dir: Union[str, PathLike]) -> List[Path]:
return sorted(Path(mcd_dir).rglob("*.mcd"))
def list_txt_files(txt_dir: Union[str, PathLike]) -> List[Path]:
return sorted(Path(txt_dir).rglob("*.txt"))
def create_panel_from_imc_panel(
imc_panel_file: Union[str, PathLike],
imc_panel_channel_col: str = "Metal Tag",
imc_panel_name_col: str = "Target",
imc_panel_keep_col: str = "full",
imc_panel_ilastik_col: str = "ilastik",
) -> pd.DataFrame:
imc_panel = pd.read_csv(
imc_panel_file,
sep=",|;",
dtype={
imc_panel_channel_col: pd.StringDtype(),
imc_panel_name_col: pd.StringDtype(),
imc_panel_keep_col: pd.BooleanDtype(),
imc_panel_ilastik_col: pd.BooleanDtype(),
},
engine="python",
true_values=["1"],
false_values=["0"],
)
for required_col in (imc_panel_channel_col, imc_panel_name_col):
if required_col not in imc_panel:
raise ValueError(f"Missing '{required_col}' column in IMC panel")
for notnan_col in (
imc_panel_channel_col,
imc_panel_keep_col,
imc_panel_ilastik_col,
):
if notnan_col in imc_panel and imc_panel[notnan_col].isna().any():
raise ValueError(f"Missing values for '{notnan_col}' in IMC panel")
rename_columns = {
imc_panel_channel_col: "channel",
imc_panel_name_col: "name",
imc_panel_keep_col: "keep",
imc_panel_ilastik_col: "ilastik",
}
drop_columns = [
panel_col
for imc_panel_col, panel_col in rename_columns.items()
if panel_col in imc_panel.columns and panel_col != imc_panel_col
]
panel = imc_panel.drop(columns=drop_columns).rename(columns=rename_columns)
for _, g in panel.groupby("channel"):
panel.loc[g.index, "name"] = " / ".join(g["name"].dropna().unique())
if "keep" in panel:
panel.loc[g.index, "keep"] = g["keep"].any()
if "ilastik" in panel:
panel.loc[g.index, "ilastik"] = g["ilastik"].any()
panel = panel.groupby(panel["channel"].values).aggregate("first")
panel = _clean_panel(panel) # ilastik column may be nullable uint8 now
ilastik_mask = panel["ilastik"].fillna(False).astype(bool)
panel["ilastik"] = pd.Series(dtype=pd.UInt8Dtype())
panel.loc[ilastik_mask, "ilastik"] = range(1, ilastik_mask.sum() + 1)
return panel
def create_panel_from_mcd_files(
mcd_files: Sequence[Union[str, PathLike]]
) -> pd.DataFrame:
panels = []
for mcd_file in mcd_files:
with MCDFile(mcd_file) as f:
for slide in f.slides:
for acquisition in slide.acquisitions:
panel = _create_panel_from_acquisition(acquisition)
panels.append(panel)
panel = pd.concat(panels, ignore_index=True, copy=False)
return _clean_panel(panel)
def create_panel_from_txt_files(
txt_files: Sequence[Union[str, PathLike]]
) -> pd.DataFrame:
panels = []
for txt_file in txt_files:
with TXTFile(txt_file) as f:
panel = _create_panel_from_acquisition(f)
panels.append(panel)
panel = pd.concat(panels, ignore_index=True, copy=False)
return _clean_panel(panel)
def filter_hot_pixels(img: np.ndarray, thres: float) -> np.ndarray:
kernel = np.ones((1, 3, 3), dtype=bool)
kernel[0, 1, 1] = False
max_neighbor_img = maximum_filter(img, footprint=kernel, mode="mirror")
return np.where(img - max_neighbor_img > thres, max_neighbor_img, img)
def preprocess_image(
img: np.ndarray, hpf: Optional[float] = None
) -> np.ndarray:
img = img.astype(np.float32)
if hpf is not None:
img = filter_hot_pixels(img, hpf)
return io._to_dtype(img, io.img_dtype)
def try_preprocess_images_from_disk(
mcd_files: Sequence[Union[str, PathLike]],
txt_files: Sequence[Union[str, PathLike]],
channel_names: Optional[Sequence[str]] = None,
hpf: Optional[float] = None,
) -> Generator[
Tuple[Path, Optional["Acquisition"], np.ndarray, Optional[Path], bool],
None,
None,
]:
unmatched_txt_files = list(txt_files)
for mcd_file in mcd_files:
try:
with MCDFile(mcd_file) as f_mcd:
for slide in f_mcd.slides:
for acquisition in slide.acquisitions:
matched_txt_file = _match_txt_file(
mcd_file, acquisition, unmatched_txt_files
)
if matched_txt_file is not None:
unmatched_txt_files.remove(matched_txt_file)
channel_ind = None
if channel_names is not None:
channel_ind = _get_channel_indices(
acquisition, channel_names
)
if isinstance(channel_ind, str):
_logger.warning(
f"Channel {channel_ind} not found for "
f"acquisition {acquisition.id} in file "
"{mcd_file}; skipping acquisition"
)
continue
img = None
recovered = False
try:
img = f_mcd.read_acquisition(acquisition)
except IOError:
_logger.warning(
f"Error reading acquisition {acquisition.id} "
f"from file {mcd_file}"
)
if matched_txt_file is not None:
_logger.warning(
f"Restoring from file {matched_txt_file}"
)
try:
with TXTFile(matched_txt_file) as f_txt:
img = f_txt.read_acquisition()
if channel_names is not None:
channel_ind = _get_channel_indices(
f_txt, channel_names
)
if isinstance(channel_ind, str):
_logger.warning(
f"Channel {channel_ind} "
"not found in file "
f"{matched_txt_file}; "
"skipping acquisition"
)
continue
recovered = True
except IOError:
_logger.exception(
"Error reading file "
f"{matched_txt_file}"
)
if img is not None: # exceptions ...
if channel_ind is not None:
img = img[channel_ind, :, :]
img = preprocess_image(img, hpf=hpf)
yield (
Path(mcd_file),
acquisition,
img,
Path(matched_txt_file)
if matched_txt_file is not None
else None,
recovered,
)
del img
except:
_logger.exception(f"Error reading file {mcd_file}")
while len(unmatched_txt_files) > 0:
txt_file = unmatched_txt_files.pop(0)
try:
channel_ind = None
with TXTFile(txt_file) as f:
if channel_names is not None:
channel_ind = _get_channel_indices(f, channel_names)
if isinstance(channel_ind, str):
_logger.warning(
f"Channel {channel_ind} not found in file "
f"{txt_file}; skipping acquisition"
)
continue
img = f.read_acquisition()
if channel_ind is not None:
img = img[channel_ind, :, :]
img = preprocess_image(img, hpf=hpf)
yield Path(txt_file), None, img, None, False
del img
except:
_logger.exception(f"Error reading file {txt_file}")
def _create_panel_from_acquisition(
acquisition: "AcquisitionBase",
) -> pd.DataFrame:
panel = pd.DataFrame(
data={
"channel": acquisition.channel_names,
"name": acquisition.channel_labels,
"keep": True,
"ilastik": range(1, acquisition.num_channels + 1),
"deepcell": np.nan,
},
)
panel["channel"] = panel["channel"].astype(pd.StringDtype())
panel["name"] = panel["name"].astype( | pd.StringDtype() | pandas.StringDtype |
import requests
import pandas as pd
import numpy as np
import arviz as az
idx = pd.IndexSlice
def get_raw_covidtracking_data():
""" Gets the current daily CSV from COVIDTracking """
url = "https://covidtracking.com/api/v1/states/daily.csv"
data = pd.read_csv(url)
return data
def process_covidtracking_data(data: pd.DataFrame, run_date: pd.Timestamp):
""" Processes raw COVIDTracking data to be in a form for the GenerativeModel.
In many cases, we need to correct data errors or obvious outliers."""
data = data.rename(columns={"state": "region"})
data["date"] = pd.to_datetime(data["date"], format="%Y%m%d")
data = data.set_index(["region", "date"]).sort_index()
data = data[["positive", "total"]]
# Too little data or unreliable reporting in the data source.
data = data.drop(["MP", "GU", "AS", "PR", "VI"])
# On Jun 5 Covidtracking started counting probable cases too
# which increases the amount by 5014.
# https://covidtracking.com/screenshots/MI/MI-20200605-184320.png
data.loc[idx["MI", pd.Timestamp("2020-06-05") :], "positive"] -= 5014
# From CT: On June 19th, LDH removed 1666 duplicate and non resident cases
# after implementing a new de-duplicaton process.
data.loc[idx["LA", pd.Timestamp("2020-06-19") :], :] += 1666
# Now work with daily counts
data = data.diff().dropna().clip(0, None).sort_index()
# Michigan missed 6/18 totals and lumped them into 6/19 so we've
# divided the totals in two and equally distributed to both days.
data.loc[idx["MI", pd.Timestamp("2020-06-18")], "total"] = 14871
data.loc[idx["MI", pd.Timestamp("2020-06-19")], "total"] = 14871
# Note that when we set total to zero, the model ignores that date. See
# the likelihood function in GenerativeModel.build
# Huge outlier in NJ causing sampling issues.
data.loc[idx["NJ", pd.Timestamp("2020-05-11")], :] = 0
# Huge outlier in CA causing sampling issues.
data.loc[idx["CA", pd.Timestamp("2020-04-22")], :] = 0
# Huge outlier in CA causing sampling issues.
# TODO: generally should handle when # tests == # positives and that
# is not an indication of positive rate.
data.loc[idx["SC", | pd.Timestamp("2020-06-26") | pandas.Timestamp |
import gym
import numpy as np
import torch
import torch.nn as nn
import tqdm
import wandb
import inspect
from typing import List, Tuple, Optional, Dict
from dataclasses import dataclass
from gym import spaces
from numpy import floor, inf
from sequoia.methods import Method
from sequoia.settings import (Actions, Environment, Observations, Rewards, Setting,
SettingType)
from sequoia.settings.sl import ClassIncrementalSetting, SLEnvironment, SLSetting
from sequoia.settings.rl import RLEnvironment, RLSetting
from sequoia.common.spaces import Image, TypedDictSpace
from simple_parsing import ArgumentParser
from torch import Tensor
from wandb.wandb_run import Run
from .models import (ActorCritic, Model, model_types_map, optimizers_map,
schedulers_map)
# Hparams include all hyperparameters for all methods
from .utils import BaseHParams, seed_everything
from .utils.rl_utils import sample_action
class BaseMethod(Method, target_setting=Setting):
"""BaseMethod as a base for both SL and RL Settings
"""
@dataclass
class HParams(BaseHParams):
pass
def __init__(self, hparams: BaseHParams = None) -> None:
"""initialization f the base method class
Args:
hparams (BaseHParams, optional): Hyperparameters used by the experiment. Defaults to None.
"""
self.hparams: BaseHParams = hparams or BaseHParams()
self.model: Model
self.optimizer: torch.optim.Optimizer
self._scheduler_step = False
self.scheduler = None
self.buffer = None
self.n_classes = None
self.smoothing = 0.04 # for total loss plotting
self.prev_loss = None
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
self._is_rl = False
if torch.cuda.is_available():
# cudnn auto tuner for faster inference
torch.backends.cudnn.benchmark = True
# reproducibility
seed_everything(self.hparams.seed)
def configure(self, setting: SettingType):
"""Called before the method is applied on a setting (before training).
You can use this to instantiate your model, for instance, since this is
where you get access to the observation & action spaces.
Args:
setting (SettingType): Current setting from Sequoia (ClassIncremental RL or SL)
"""
# This example is intended for classification / discrete action spaces.
setting.num_workers = self.hparams.num_workers
setting.batch_size = self.hparams.batch_size
observation_space = setting.observation_space
action_space = setting.action_space
reward_space = setting.reward_space
if isinstance(setting, RLSetting):
# Default batch size of 1 in RL
self.hparams.batch_size = 1
self.max_rl_train_steps = setting.steps_per_phase
# to enable rendering of the gym
self.render = False
self._is_rl = True
else:
# SL setting
assert isinstance(action_space, spaces.Discrete)
assert action_space == reward_space
self.classes_per_task = setting.increment
self.n_classes = action_space.n
x_space = observation_space.x
# Downgrade the encoder to a mlp if the input space is not an image.
if not isinstance(observation_space.x, Image):
self.hparams.model_type = "mlp"
# update model to actor critic for RL
model_type = model_types_map[self.hparams.model_type]
if self._is_rl:
self.model = ActorCritic(model_type, x_space, action_space.n).to(
self.device
)
else:
self.model = model_type(x_space, self.n_classes, bic=self.hparams.bic).to(self.device)
optim_type, optim_defaults = self._get_optim_defaults(
self.hparams.optimizer)
if "base_optim" in optim_defaults:
base_optim_type, base_optim_defaults = self._get_optim_defaults(
optim_defaults["base_optim"]
)
base_optim = base_optim_type(
self.model.parameters(), **base_optim_defaults,
)
optim_defaults.pop("base_optim")
self.optimizer = optim_type(base_optim, **optim_defaults)
else:
self.optimizer = optim_type(
self.model.parameters(), **optim_defaults,)
if self.hparams.use_scheduler:
scheduler_type, scheduler_defaults = schedulers_map[
self.hparams.scheduler_name
]
if "step" in scheduler_defaults:
self._scheduler_step = scheduler_defaults["step"]
scheduler_defaults.pop("step")
self.scheduler = scheduler_type(
self.optimizer, **scheduler_defaults)
self.task = 0
self.epoch = 0
# smooth loss initialization
self.prev_loss = None
self.best_smooth_loss = inf
self.task_type = "RL" if self._is_rl else "SL"
self.iteration_name = "step" if self._is_rl else "epoch"
def setup_wandb(self, run: Run) -> None:
run.config.update(self.hparams.__dict__)
def _get_optim_defaults(self, optim_name):
optim_type, optim_defaults = optimizers_map[optim_name]
optim_signature = inspect.signature(
optim_type.__init__).parameters.keys()
if "weight_decay" in optim_signature:
if "weight_decay" not in optim_defaults:
optim_defaults["weight_decay"] = self.hparams.weight_decay
else:
self.hparams.weight_decay = optim_defaults["weight_decay"]
if "lr" in optim_signature:
if "lr" not in optim_defaults:
optim_defaults["lr"] = self.hparams.learning_rate
else:
self.hparams.learning_rate = optim_defaults["lr"]
return optim_type, optim_defaults
def _method_specific_configure(self, setting: ClassIncrementalSetting):
"""Method specific initialization used for vars and settings needed per method
Args:
setting (ClassIncrementalSetting): Setting used in the configuration
"""
def fit(
self,
train_env: Environment[Observations, Actions, Rewards],
valid_env: Environment[Observations, Actions, Rewards],
):
"""fitting function that is used for both SL and RL
Args:
train_env (Environment[Observations, Actions, Rewards]): training environment can be active or passive
valid_env (Environment[Observations, Actions, Rewards]): validation environment active or passive
"""
if self._is_rl:
self.fit_rl(train_env, valid_env)
else:
self.fit_sl(train_env, valid_env)
def fit_rl(self, train_env: RLEnvironment, valid_env: RLEnvironment):
"""fitting function that is used for both RL
Args:
train_env (Environment[Observations, Actions, Rewards]): training environment
valid_env (Environment[Observations, Actions, Rewards]): validation environment
"""
nb_epochs = self.pre_fit(train_env)
all_lengths: List[int] = []
average_lengths: List[float] = []
all_rewards: List[float] = []
episode = 0
total_steps = 0
nb_steps = nb_epochs * self.max_rl_train_steps
while not train_env.is_closed() and total_steps < nb_steps:
episode += 1
print(f"Starting Episode {episode}")
log_probs: List[Tensor] = []
critic_values: List[Tensor] = []
rewards: List[Tensor] = []
entropy_term = 0
observation: RLSetting.Observations = train_env.reset()
# Convert numpy arrays in the observation into Tensors on the right device.
observation = observation.torch(device=self.device)
done = False
episode_steps = 0
while not done and total_steps < self.max_rl_train_steps:
episode_steps += 1
actor_output, critic_val = self.model.get_action_critic(
observation.x.float()
)
critic_val = critic_val.cpu().detach().numpy()
action, log_prob, entropy = sample_action(
actor_output, return_entropy_log_prob=True
)
new_observation: Observations
reward: RLSetting.Rewards
reward, new_observation, done = self.send_actions(
train_env, actor_output
)
action = RLSetting.Actions(
y_pred=action.cpu().detach().numpy())
if self.render:
train_env.render()
new_observation = new_observation.torch(device=self.device)
total_steps += 1
reward_value: float = reward.y
rewards.append(reward_value)
critic_values.append(critic_val)
log_probs.append(log_prob)
entropy_term += entropy
observation = new_observation
# TODO update buffer with new observations
Qval, _ = self.model.get_action_critic(new_observation.x.float())
Qval = Qval.detach().cpu().numpy()
all_rewards.append(np.sum(rewards))
all_lengths.append(episode_steps)
average_lengths.append(np.mean(all_lengths[-10:]))
if episode % 10 == 0:
print(
f"step {total_steps}/{nb_steps}, "
f"episode: {episode}, "
f"reward: {np.sum(rewards)}, "
f"total length: {episode_steps}, "
f"average length: {average_lengths[-1]} \n"
)
if total_steps >= nb_steps:
print(f"Reached the limit of {nb_steps} steps.")
break
# compute Q values
Q_values = np.zeros_like(critic_values)
# Use the last value from the critic as the final value estimate.
q_value = Qval
for t, reward in reversed(list(enumerate(rewards))):
q_value = reward + self.hparams.gamma * q_value
Q_values[t] = q_value
# update actor critic
critic_values = torch.as_tensor(
critic_values, dtype=torch.float, device=self.device
)
Q_values = torch.as_tensor(
Q_values, dtype=torch.float, device=self.device)
log_probs = torch.stack(log_probs)
advantage = Q_values - critic_values
actor_loss = (-log_probs * advantage).mean()
critic_loss = 0.5 * advantage.pow(2).mean()
ac_loss = (
actor_loss
+ critic_loss
+ self.hparams.entropy_term_coefficient * entropy_term
)
# TODO use backward function in base_method
self.optimizer.zero_grad()
ac_loss.backward()
self.optimizer.step()
self.post_fit()
def fit_sl(self, train_env: SLEnvironment, valid_env: SLEnvironment):
""" Example train loop.
You can do whatever you want with train_env and valid_env here.
NOTE: In the Settings where task boundaries are known (in this case all
the supervised CL settings), this will be called once per task.
"""
nb_epochs = self.pre_fit(train_env)
for epoch in range(nb_epochs):
self.model.train()
print(f"Starting {self.iteration_name} {epoch}")
self.epoch = epoch
# Training loop:
stop = self._train_loop(train_env)
# Validation loop:
if not (self.hparams.submission) and not (self.hparams.early_stop_train):
early_stop = self._validation_loop(valid_env)
if early_stop:
print(f"Early stopping at {self.iteration_name} {epoch}. ")
break
elif self.hparams.early_stop_train:
if stop:
print(
f"Early stopping during {self.iteration_name} {epoch}. ")
break
self.post_fit()
def pre_fit(self, train_env: Environment):
"""Prefit called before fitting a new task
Args:
train_env (Environment[Observations, Actions, Rewards]): training environment
Returns:
int: number of epochs used for that task
"""
if self._is_rl:
n_batches_per_task = self.max_rl_train_steps
else:
n_batches_per_task = train_env.length()
if self.hparams.use_scheduler:
if self.hparams.scheduler_name == "lambdalr":
# used to tune cyclic learning rate
start_lr = 1e-7
end_lr = 0.2
def lambda_fn(x):
return math.exp(
x
* math.log(end_lr / start_lr)
/ (self.hparams.max_epochs_per_task * n_batches_per_task)
)
self.scheduler.__init__(self.optimizer, lambda_fn)
elif self.hparams.scheduler_name == "cyclic":
step_size_up = 4 * n_batches_per_task
step_size_down = step_size_up
self.scheduler.total_size = step_size_up + step_size_down
self.scheduler.step_ratio = step_size_up / self.scheduler.total_size
self.best_scheduler = self.scheduler.state_dict()
if not(self._is_rl):
# early stopping initialization
if self.hparams.early_stop:
self.best_val_loss = inf
self.best_iteration = 0
self.patience = self.hparams.early_stop_patience
if self.hparams.reload_best:
self.best_model = self.model.state_dict()
self.best_iteration = 0
self.best_optimizer = self.optimizer.state_dict()
self.best_buffer = None
if self.hparams.early_stop_train:
self.train_patience = self.hparams.early_stop_train_patience
self.best_smooth_loss = inf
print(f"task {self.task}")
self.prev_loss = None
nb_epochs = int(floor(self.hparams.max_epochs_per_task))
return nb_epochs
def post_fit(self):
"""Called after training the current task
"""
if self.hparams.wandb_logging and self.hparams.early_stop:
wandb.run.summary[
f"best {self.iteration_name} on {self.task_type} task {self.task}"
] = self.best_iteration
if (
self.hparams.reload_best
and not (self.hparams.submission)
and self.best_model is not None
):
# FIXME need to think of away to enable reload best during a submission
print(
f"Loading model from {self.iteration_name} {self.best_iteration}!")
self.model.load_state_dict(self.best_model)
if self.scheduler is not None:
self.scheduler.load_state_dict(self.best_scheduler)
self.optimizer.load_state_dict(self.best_optimizer)
# loading buffer corresponding to best epoch
# make sure that best buffer was not empty before loading (this could happen in the first task)
# Warning: the earlier the best epoch happens, the less we added examples to the buffer
if self.best_buffer is not None and "examples" in self.best_buffer:
print(f"Loading buffer from epoch {self.best_iteration}!")
self.buffer.load_state_dict(self.best_buffer)
if self.hparams.wandb_logging:
self._additional_wandb_logging()
# TODO self.task == 11 can be used on last task
if self.hparams.bic and self.task == 11:
self._fit_bic()
self.hparams.max_epochs_per_task *= self.hparams.max_epochs_decay_per_task
self.hparams.early_stop_patience *= self.hparams.early_stop_decay_per_task
self.model.eval()
def _fit_bic(self):
"""BIC based fit
"""
print("Starting bic fit")
self.model.new_task()
self.model.eval()
memory_dict = self.buffer.get_data(self.hparams.buffer_size)
memory_inputs = memory_dict["examples"]
memory_targets = memory_dict["labels"]
buffer_data = torch.utils.data.TensorDataset(
memory_inputs, memory_targets)
buffer_dataloader = torch.utils.data.DataLoader(
buffer_data, batch_size=self.hparams.batch_size)
self.bic_optimizer = torch.optim.Adam([self.model.bic_params], lr=0.1)
# self.bic_optimizer = torch.optim.Adam(
# self.model.bic_layer.parameters(), lr=0.02)
for l in range(self.hparams.bic_epochs):
if self.hparams.submission:
# disable progress bar for faster train time
bic_pbar = buffer_dataloader
else:
postfix = {}
bic_pbar = tqdm.tqdm(buffer_dataloader)
bic_pbar.set_description(f"BIC Epoch {l}")
for i, (inputs, labels) in enumerate(bic_pbar):
self.bic_optimizer.zero_grad()
with torch.no_grad():
logits = self.model(inputs, bic=False)
unbiased_logits = self.model.apply_bic(logits)
loss_bic = self.loss(unbiased_logits, labels)
loss_bic.backward()
self.bic_optimizer.step()
if not (self.hparams.submission):
postfix.update({'CrossEntropyLoss': loss_bic.item()})
bic_pbar.set_postfix(postfix)
if self.hparams.wandb_logging:
# bic_param = self.model.bic_params.detach().cpu().numpy()
wandb.log({f"BIC/Task{self.task}": loss_bic.item(),
# f"BIC/Task{self.task}_w": bic_param[1],
# f"BIC/Task{self.task}_b": bic_param[0]
}
)
def _train_loop(self, train_env):
"""Training loop for all batches inside a training environment
Args:
train_env (Environment[Observations, Actions, Rewards]): training environment
Returns:
bool: Flag used to stop the training used with early stopping
"""
stop = False
torch.set_grad_enabled(True)
if self.hparams.submission:
# disable progress bar for faster train time
train_pbar = train_env
else:
postfix = {}
train_pbar = tqdm.tqdm(train_env)
train_pbar.set_description(f"Training Epoch {self.epoch}")
for i, batch in enumerate(train_pbar):
self.optimizer.zero_grad()
loss_dict, metrics_dict = self.shared_step(
batch, environment=train_env, validation=False,
)
self._backward_loss(loss_dict)
self.optimizer.step()
if not (self.hparams.submission):
postfix.update(metrics_dict)
train_pbar.set_postfix(postfix)
if self.hparams.wandb_logging:
# FIXME disable wandb for submission
# FIXME disable metrics_dict population
wandb_metrics_dict = {}
for key, value in metrics_dict.items():
wandb_metrics_dict[f"Loss/Task{self.task}/"+key] = value
wandb.log(wandb_metrics_dict)
if self._scheduler_step:
self._call_scheduler()
if self.hparams.early_stop_train:
if self.train_patience < 0:
stop = True
break
if not (self._scheduler_step):
self._call_scheduler()
return stop
def _call_scheduler(self):
"""Calls scheduler if it is enabled
"""
if self.scheduler is not None:
self.scheduler.step()
if self.hparams.wandb_logging:
wandb.log(
{
"Train/Task {}/learning_rate".format(
self.task
): self.optimizer.state_dict()["param_groups"][0]["lr"],
}
)
def _validation_loop(self, valid_env):
"""Validation loop after training the training
Args:
valid_env (Environment[Observations, Actions, Rewards]): validation environment
Returns:
flag: used to early stop or not
"""
self.model.eval()
torch.set_grad_enabled(False)
with tqdm.tqdm(valid_env) as val_pbar:
postfix = {}
val_pbar.set_description(f"Validation Epoch {self.epoch}")
epoch_val_loss = 0.0
for i, batch in enumerate(val_pbar):
val_loss_dict, metrics_dict = self.shared_step(
batch, environment=valid_env, validation=True,
)
epoch_val_loss += val_loss_dict["CrossEntropyLoss"]
postfix.update(metrics_dict, val_loss=epoch_val_loss.item())
val_pbar.set_postfix(postfix)
self.model.train()
torch.set_grad_enabled(True)
if self.hparams.wandb_logging:
wandb.log({f"Loss/Task{self.task}/val_loss": epoch_val_loss})
if self.hparams.early_stop:
if epoch_val_loss <= self.best_val_loss:
self.best_val_loss = epoch_val_loss
self.best_iteration = self.epoch
self.patience = self.hparams.early_stop_patience
if self.hparams.reload_best:
self.best_model = self.model.state_dict()
if self.scheduler is not None:
self.best_scheduler = self.scheduler.state_dict()
if self.buffer is not None:
self.best_buffer = self.buffer.state_dict()
self.best_optimizer = self.optimizer.state_dict()
else:
self.patience -= 1
print(f"Patience is {self.patience}")
return self.patience < 0
else:
return False
def _create_metric_dict(self, loss_dict, y_pred, image_labels):
"""Creates metric dictionary automatically
Args:
loss_dict (dict): includes all losses
y_pred (tensor): predictions
image_labels (tensor): image labels
Returns:
dict: including metric summary incuding accuracy
"""
if self.hparams.submission:
return {}
accuracy = (y_pred == image_labels).sum().float() / len(image_labels)
metrics_dict = {"accuracy": accuracy.cpu().item()}
for loss_name, loss_val in loss_dict.items():
metrics_dict[loss_name] = loss_val.detach().cpu().item()
return metrics_dict
def _backward_loss(self, loss_dict, retain_graph=False):
"""back-propagation using input loss dictionary
Args:
loss_dict (dict): dictionary of losses used
retain_graph (bool, optional): flag used to retain graph if we will call backprop twice on same network. Defaults to False.
"""
# first step to do a backward on incoming loss using autograd
n_losses = len(loss_dict)
loss = 0
compute_smooth_loss = (
self.hparams.early_stop_train or self.hparams.wandb_logging
)
for loss_indx, loss_name in enumerate(loss_dict):
loss_val = loss_dict[loss_name]
loss_val.backward(retain_graph=loss_indx <
n_losses - 1 or retain_graph)
if compute_smooth_loss and loss_name == "dark":
loss += loss_val.item()
if compute_smooth_loss:
if self.prev_loss is not None:
loss = self.smoothing * loss + \
(1 - self.smoothing) * self.prev_loss
self.prev_loss = loss
if self.hparams.early_stop_train and self.epoch > 0 and self.task > 0:
if loss < self.best_smooth_loss:
self.best_smooth_loss = loss
self.train_patience = self.hparams.early_stop_train_patience
else:
self.train_patience -= 1
if self.hparams.wandb_logging:
wandb.log(
{"Loss/Task{}/smoothDarkLoss".format(self.task): loss, }
)
def _add_to_buffer(self, examples, logits, labels, task_labels, loss_scores):
"""Add data to buffer used for replay based methods
Args:
examples (tensor): batch of examples
logits (tensor): batch of predicted logits
labels (tensor): batch of labels
task_labels (list): list of task labels
loss_scores (tensor): individual losses used to prioritize sampling
"""
save_to_buffer = self.buffer is not None
if self.hparams.save_from_epoch > 0:
save_to_buffer = (
save_to_buffer and self.hparams.save_from_epoch <= self.epoch
)
if save_to_buffer:
with torch.no_grad():
self.buffer.add_data(
{
"examples": examples,
"logits": logits,
"labels": labels,
"task_labels": task_labels,
"loss_scores": loss_scores,
}
)
def _additional_wandb_logging(self):
"""Logging some extra information to wandb
"""
if self.hparams.wandb_logging_buffer and self.buffer is not None:
import plotly.express as px
import pandas as pd
data = self.buffer.get_all_data()
data.pop("examples")
df = | pd.DataFrame(data) | pandas.DataFrame |
import warnings
import numpy as np
import pandas as pd
def isNumberAndIsNaN(obj):
return obj != obj
def scale_range(x, new_min=0.0, new_max=1.0, old_min=None, old_max=None, squash_outside_range=True, squash_inf=False, ):
"""
Scales a sequence to fit within a new range.
If squash_inf is set, then infinite values will take on the
extremes of the new range (as opposed to staying infinite).
Args:
x:
new_min:
new_max:
old_min:
old_max:
squash_outside_range:
squash_inf:
Note:
Infinity in the input is disregarded in the construction of the scale of the mapping.
>>> scale_range([1,3,5])
array([ 0. , 0.5, 1. ])
>>> scale_range([1,2,3,4,5])
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> scale_range([1,3,5, np.inf])
array([ 0. , 0.5, 1. , inf])
>>> scale_range([1,3,5, -np.inf])
array([ 0. , 0.5, 1. , -inf])
>>> scale_range([1,3,5, -np.inf], squash_inf=True)
array([ 0. , 0.5, 1. , 0. ])
>>> scale_range([1,3,5, np.inf], squash_inf=True)
array([ 0. , 0.5, 1. , 1. ])
>>> scale_range([1,3,5], new_min=0.5)
array([ 0.5 , 0.75, 1. ])
>>> scale_range([1,3,5], old_min=1, old_max=4)
array([ 0. , 0.66666667, 1. ])
>>> scale_range([5], old_max=4)
array([ 1.])
"""
if squash_inf and not squash_outside_range:
# TODO: "warn" is an unresolved reference
warn(ValueError(
'Makes no sense to squash infinity but not other numbers outside the source range. Will squash all outside range.'))
squash_outside_range = True
if isinstance(x, list):
x = np.array(x)
if old_min is None:
old_min = np.min(x[~np.isinf(x)])
if old_max is None:
old_max = np.max(x[~np.isinf(x)])
old_range = old_max - old_min
new_max = float(new_max)
new_min = float(new_min)
new_range = new_max - new_min
retval = (new_range * (x - old_min) / old_range) + new_min
if squash_inf:
retval[np.isinf(x) & (x > 0)] = new_max
retval[np.isinf(x) & (x < 0)] = new_min
if squash_outside_range:
retval[~np.isinf(x) & (x > old_max)] = new_max
retval[~np.isinf(x) & (x < old_min)] = new_min
return retval
def generate_random_data():
np.random.seed(42)
size = 10000
test_data_frame = pd.DataFrame()
test_data_frame['entity'] = list(range(size))
test_data_frame['variant'] = np.random.choice(['A', 'B'], size=size, p=[0.6, 0.4])
test_data_frame['normal_same'] = np.random.normal(size=size)
test_data_frame['normal_shifted'] = np.random.normal(size=size)
test_data_frame.loc[test_data_frame['variant'] == 'B', 'normal_shifted'] \
= np.random.normal(loc=1.0, size=test_data_frame['normal_shifted'][test_data_frame['variant'] == 'B'].shape[0])
test_data_frame['feature'] = np.random.choice(['has', 'non'], size=size)
test_data_frame['normal_shifted_by_feature'] = np.random.normal(size=size)
randdata = np.random.normal(loc=1.0, size=size)
ii = (test_data_frame['variant'] == 'B') & (test_data_frame['feature'] == 'has')
with warnings.catch_warnings(record=True) as w:
# ignore the 'flat[index' warning that comes out of pandas (and is
# not ours to fix)
warnings.simplefilter('ignore', DeprecationWarning)
test_data_frame.loc[ii, 'normal_shifted_by_feature'] = randdata
# provides random treatment start time in the past year
# test_data_frame['treatment_start_time'] = np.random.choice(list(range(int(time() - 1*365*24*60*60), int(time()))), size=size)
test_data_frame['treatment_start_time'] = np.random.choice(list(range(10)), size=size)
test_data_frame['normal_unequal_variance'] = np.random.normal(size=size)
test_data_frame.loc[test_data_frame['variant'] == 'B', 'normal_unequal_variance'] \
= np.random.normal(scale=10,
size=test_data_frame['normal_unequal_variance'][test_data_frame['variant'] == 'B'].shape[0])
metadata = {
'primary_KPI': 'normal_shifted',
'source': 'simulated',
'experiment': 'random_data_generation'
}
return test_data_frame, metadata
def generate_random_data_n_variants(n_variants=3):
np.random.seed(42)
size = 10000
test_data_frame = | pd.DataFrame() | pandas.DataFrame |
import json
import os
import sqlite3
import pyAesCrypt
import pandas
from os import stat
from datetime import datetime
import time
import numpy
# Global variables for use by this file
bufferSize = 64*1024
password = os.environ.get('ENCRYPTIONPASSWORD')
# py -c 'import databaseAccess; databaseAccess.reset()'
def reset():
resetActivities()
resetSplits()
# py -c 'import databaseAccess; databaseAccess.resetActivities()'
def resetActivities():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS activities;')
conn.commit()
conn.close()
encryptDatabase()
# py -c 'import databaseAccess; databaseAccess.resetSplits()'
def resetSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS splits;')
conn.commit()
conn.close()
encryptDatabase()
def getLastDate():
decryptDatabase()
lastActivityDate = '1970-01-01T00:00:00Z'
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='activities';")
result = cur.fetchone()
if result is not None:
# There is data, so let's grab the max datetime
cur.execute("SELECT MAX(start_date_local) FROM activities;")
result = cur.fetchone()
if result is not None:
# Found a max date
lastActivityDate, = result
conn.commit()
conn.close()
encryptDatabase()
return lastActivityDate
def setConfig(strava_tokens):
decryptDatabase()
print('Lets put the tokens into the database')
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS config;')
cur.execute('CREATE TABLE config (token_type VARCHAR, access_token VARCHAR, expires_at BIGINT, expires_in INT, refresh_token VARCHAR);')
cur.execute('INSERT INTO config (token_type, access_token, expires_at, expires_in, refresh_token) values (?, ?, ?, ?, ?);', (strava_tokens['token_type'], strava_tokens['access_token'], strava_tokens['expires_at'], strava_tokens['expires_in'], strava_tokens['refresh_token']))
conn.commit()
conn.close()
encryptDatabase()
def getConfig():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('SELECT * FROM config')
rows = cur.fetchall()
conn.commit()
conn.close()
encryptDatabase()
return json.loads(json.dumps( [dict(ix) for ix in rows] ))[0]
# Must be called to access the database, otherwise it can't be read
# py -c 'import databaseAccess; databaseAccess.decryptDatabase()'
def decryptDatabase():
if os.path.exists('strava_temp.sqlite'):
print('Database already decrypted! Skipping. . .')
else:
if os.path.exists('strava.sqlite'):
encFileSize = stat('strava.sqlite').st_size
with open('strava.sqlite', 'rb') as fIn:
with open('strava_temp.sqlite', 'wb') as fOut:
pyAesCrypt.decryptStream(fIn, fOut, password, bufferSize, encFileSize)
else:
print('Unable to find database to decrypt! Skipping. . .')
# Always call this after you touch the database to re-encrypt it
def encryptDatabase():
if os.path.exists('strava_temp.sqlite'):
if os.path.exists('strava.sqlite'):
os.remove('strava.sqlite')
with open('strava_temp.sqlite', 'rb') as fIn:
with open('strava.sqlite', 'wb') as fOut:
pyAesCrypt.encryptStream(fIn, fOut, password, bufferSize)
if os.path.exists('strava_temp.sqlite'):
os.remove('strava_temp.sqlite')
else:
print('Unable to find database to encrypt, skipping...')
def setActvities(activities):
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS activities (id BIGINT, name NVARCHAR, upload_id BIGINT, type VARCHAR, distance NUMERIC, moving_time INT, average_speed NUMERIC, max_speed NUMERIC, total_elevation_gain NUMERIC, start_date_local DATETIME, average_cadence NUMERIC, average_watts NUMERIC, average_heartrate NUMERIC, UNIQUE(id));')
conn.commit()
for _, currentActivity in activities.iterrows():
acitivityName = currentActivity['name']
activityId = currentActivity['id']
print(f'Insert activity id [{activityId}], [{acitivityName}] to database')
cur.execute('INSERT OR IGNORE INTO activities (id, name, upload_id, type, distance, moving_time, average_speed, max_speed, total_elevation_gain, start_date_local, average_cadence, average_watts, average_heartrate) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);', (activityId, acitivityName, currentActivity['upload_id'], currentActivity['type'], currentActivity['distance'], currentActivity['moving_time'], currentActivity['average_speed'], currentActivity['max_speed'], currentActivity['total_elevation_gain'], currentActivity['start_date_local'], currentActivity['average_cadence'], currentActivity['average_watts'], currentActivity['average_heartrate']))
conn.commit()
print(f'[{acitivityName}] done. . .')
conn.close()
encryptDatabase()
def setSplits(splits):
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS splits (split_id INT, activity_id BIGINT, activity_date DATETIME, average_speed NUMERIC, distance NUMERIC, elapsed_time INT, elevation_difference NUMERIC, moving_time INT, pace_zone INT, split INT, average_grade_adjusted_speed NUMERIC, average_heartrate NUMERIC, UNIQUE(split_id, activity_id));')
conn.commit()
for index, row in splits.iterrows():
cur.execute('INSERT OR IGNORE INTO splits (split_id, activity_id, activity_date, average_speed, distance, elapsed_time, elevation_difference, moving_time, pace_zone, split, average_grade_adjusted_speed, average_heartrate) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);', (index, row['id'], row['date'], row['average_speed'], row['distance'], row['elapsed_time'], row['elevation_difference'], row['moving_time'], row['pace_zone'], row['split'], row['average_grade_adjusted_speed'], row['average_heartrate']))
conn.commit()
conn.close()
encryptDatabase()
def getActvitiesMissingSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
storedActivities = pandas.DataFrame()
if result is not None:
storedActivities = pandas.read_sql_query('SELECT * FROM activities WHERE id NOT IN (SELECT activity_id FROM splits)', conn)
else:
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
if result is not None:
storedActivities = pandas.read_sql_query('SELECT * FROM activities', conn)
conn.commit()
conn.close()
encryptDatabase()
return storedActivities
def deleteActvitiesMissingSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
if result is not None:
cur = conn.cursor()
cur.execute('DELETE FROM activities WHERE id NOT IN (SELECT activity_id FROM splits)')
conn.commit()
conn.close()
encryptDatabase()
def getSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
storedSplits = pandas.DataFrame()
if result is not None:
storedSplits = pandas.read_sql_query('SELECT s.split_id, s.activity_id, s.activity_date, s.average_speed, s.distance, s.elapsed_time, s.elevation_difference, s.moving_time, s.pace_zone, s.split, s.average_grade_adjusted_speed, s.average_heartrate, a.name, a.upload_id, a.type, a.distance AS total_distance, a.moving_time AS total_moving_time, a.average_speed AS total_average_speed, a.max_speed, a.total_elevation_gain, a.start_date_local, a.average_cadence FROM splits s INNER JOIN activities a ON a.id = s.activity_id', conn)
conn.commit()
conn.close()
encryptDatabase()
return storedSplits
def getMonthSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
storedSplits = pandas.DataFrame()
if result is not None:
storedSplits = pandas.read_sql_query('SELECT split_id, activity_id, STRFTIME("%Y-%m", activity_date) AS activity_month, activity_date, average_speed, distance, elapsed_time, elevation_difference, moving_time, pace_zone, split, average_grade_adjusted_speed, average_heartrate FROM splits', conn)
conn.commit()
conn.close()
encryptDatabase()
return storedSplits
def getActivityDistances():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='activities';")
result = cur.fetchone()
activityCount = pandas.DataFrame()
if result is not None:
activityCount = | pandas.read_sql_query("SELECT COUNT(*) AS cnt, CAST(CAST(nearest_5miles AS INT) AS VARCHAR(1000)) || ' < ' || CAST(CAST(nearest_5miles + 5 AS INT) AS VARCHAR(1000)) AS nearest_5miles FROM (SELECT id, ROUND((distance* 0.000621371)/5,0)*5 AS nearest_5miles FROM activities) a GROUP BY nearest_5miles", conn) | pandas.read_sql_query |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Atividade para trabalhar o pré-processamento dos dados.
Criação de modelo preditivo para diabetes e envio para verificação de peformance
no servidor.
@author: <NAME> <<EMAIL>>
"""
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
import requests
print('\n - Lendo o arquivo com o dataset sobre diabetes')
data = pd.read_csv('https://raw.githubusercontent.com/audreyemmely/mlclass/master/01_Preprocessing/diabetes_dataset.csv')
#mostrando valores faltantes no dataset
print(data.isnull().sum())
#plotando o histograma
plot1 = data.hist(figsize = (10,10))
plt.show()
#substituindo os valores faltantes com média (em caso de distribuição normal) ou mediana (caso n seja normal)
data['Glucose'].fillna(data['Glucose'].mean(), inplace = True)
data['BloodPressure'].fillna(data['BloodPressure'].mean(), inplace = True)
data['SkinThickness'].fillna(data['SkinThickness'].median(), inplace = True)
data['Insulin'].fillna(data['Insulin'].median(), inplace = True)
data['BMI'].fillna(data['BMI'].median(), inplace = True)
#plotando o histograma após a substituição
plot2 = data.hist(figsize = (10,10))
plt.show()
# Criando X and y par ao algorítmo de aprendizagem de máquina.\
print(' - Criando X e y para o algoritmo de aprendizagem a partir do arquivo diabetes_dataset')
# Caso queira modificar as colunas consideradas basta algera o array a seguir.
feature_cols = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness',
'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']
X = data[feature_cols]
y = data.Outcome
# Criando o modelo preditivo para a base trabalhada
print(' - Criando modelo preditivo')
neigh = KNeighborsClassifier(n_neighbors=3)
neigh.fit(X, y)
#realizando previsões com o arquivo de
print(' - Aplicando modelo e enviando para o servidor')
data_app = pd.read_csv('https://raw.githubusercontent.com/audreyemmely/mlclass/master/01_Preprocessing/diabetes_app.csv')
data_app = data_app[feature_cols]
y_pred = neigh.predict(data_app)
# Enviando previsões realizadas com o modelo para o servidor
URL = "https://aydanomachado.com/mlclass/01_Preprocessing.php"
#TODO Substituir pela sua chave aqui
DEV_KEY = "shantay u stay"
# json para ser enviado para o servidor
data = {'dev_key':DEV_KEY,
'predictions': | pd.Series(y_pred) | pandas.Series |
""" Finnhub View """
__docformat__ = "numpy"
import os
from tabulate import tabulate
import pandas as pd
from matplotlib import pyplot as plt
from pandas.plotting import register_matplotlib_converters
from gamestonk_terminal.stocks.due_diligence import finnhub_model
from gamestonk_terminal.helper_funcs import plot_autoscale, export_data
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal import feature_flags as gtff
register_matplotlib_converters()
def plot_rating_over_time(rot: pd.DataFrame, ticker: str):
"""Plot rating over time
Parameters
----------
rot : pd.DataFrame
Rating over time
ticker : str
Ticker associated with ratings
"""
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
rot.sort_values("period", inplace=True)
plt.plot(pd.to_datetime(rot["period"]), rot["strongBuy"], c="green", lw=3)
plt.plot(pd.to_datetime(rot["period"]), rot["buy"], c="lightgreen", lw=3)
plt.plot(pd.to_datetime(rot["period"]), rot["hold"], c="grey", lw=3)
plt.plot( | pd.to_datetime(rot["period"]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.ticker import MultipleLocator
import lasio
import math
from datetime import datetime
import matplotlib.lines as mlines
# %% Load LAS File
las0 = lasio.read('WestAfricaLogs/YoyoPSI.las')
dstep = las0.well.STEP.value
znull = las0.well.STRT.value
zstop = las0.well.STOP.value
# %% Load Deviation Survey
df_Deviation = pd.read_csv('WestAfricaLogs/Yoyo_Deviation.txt', delimiter = '\t')
df_Deviation.rename(columns={"Depth": "DEPTH"}, inplace=True)
df_Deviation=df_Deviation[['DEPTH','TVD']].astype(float)
# %% Load Mudweight Data
df_MW = pd.read_csv('WestAfricaLogs/YoyoMW_md.txt', delimiter='\t')
#Check for capital letters
df_MW.rename(columns={"Depth": "DEPTH"}, inplace=True)
#Change type to float from int
df_MW=df_MW[['DEPTH','MW']].astype(float)
# %% Load LOT
df_LOT = | pd.read_csv('WestAfricaLogs/YoyoLOT_md.txt', delimiter='\t') | pandas.read_csv |
from typing import List, Text, Dict
from dataclasses import dataclass
import ssl
import urllib.request
from io import BytesIO
from zipfile import ZipFile
from urllib.parse import urljoin
from logging import exception
import os
from re import findall
from datetime import datetime, timedelta
import lxml.html as LH
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import time
from selenium.webdriver.support.ui import WebDriverWait
import warnings
import string
import re
from bs4 import BeautifulSoup
import requests
import glob
import time
import os
from fake_useragent import UserAgent
import brFinance.utils as utils
import pickle
ssl._create_default_https_context = ssl._create_unverified_context
warnings.simplefilter(action='ignore', category=FutureWarning)
@dataclass
class SearchENET:
"""
Perform webscraping on the page https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx according to the input parameters
"""
def __init__(self, cod_cvm: int = None, category: int = None, driver: utils.webdriver = None):
self.driver = driver
# self.cod_cvm_dataframe = self.cod_cvm_list()
self.cod_cvm = cod_cvm
if cod_cvm is not None:
self.check_cod_cvm_exist(self.cod_cvm)
self.category = category
if category is not None:
self.check_category_exist(self.category)
def cod_cvm_list(self) -> pd.DataFrame:
"""
Returns a dataframe of all CVM codes and Company names availble at https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx
"""
if self.driver is None:
driver = utils.Browser.run_chromedriver()
else:
driver=self.driver
driver.get(f"https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx")
#wait_pageload()
for retrie in range(50):
try:
html = str(driver.find_element_by_id('hdnEmpresas').get_attribute("value"))
listCodCVM = re.findall("(?<=\_)(.*?)(?=\')", html)
listNomeEmp = re.findall("(?<=\-)(.*?)(?=\')", html)
codigos_cvm = pd.DataFrame(list(zip(listCodCVM, listNomeEmp)),
columns=['codCVM', 'nome_empresa'])
codigos_cvm['codCVM'] = pd.to_numeric(codigos_cvm['codCVM'])
if len(codigos_cvm.index) > 0:
break
else:
time.sleep(1)
except:
time.sleep(1)
if self.driver is None:
driver.quit()
return codigos_cvm
def check_cod_cvm_exist(self, cod_cvm) -> bool:
codigos_cvm_available = self.cod_cvm_list()
cod_cvm_exists = str(cod_cvm) in [str(cod_cvm_aux) for cod_cvm_aux in codigos_cvm_available['codCVM'].values]
if cod_cvm_exists:
return True
else:
raise ValueError('Código CVM informado não encontrado.')
def check_category_exist(self, category) -> bool:
search_categories_list = [21, 39]
if category in search_categories_list:
return True
else:
raise ValueError('Invalid category value. Available categories are:', search_categories_list)
@property
def search(self) -> pd.DataFrame:
"""
Returns dataframe of search results including cod_cvm, report's url, etc.
"""
dataInicial = '01012010'
dataFinal = datetime.today().strftime('%d%m%Y')
option_text = str(self.category)
if self.driver is None:
driver = utils.Browser.run_chromedriver()
else:
driver=self.driver
driver.get(f"https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx?codigoCVM={str(self.cod_cvm)}")
# Wait and click cboCategorias_chosen
for errors in range(10):
try:
driver.find_element_by_id('cboCategorias_chosen').click()
break
except:
time.sleep(1)
# Wait and click
for errors in range(10):
try:
driver.find_element_by_xpath(
f"//html/body/form[1]/div[3]/div/fieldset/div[5]/div[1]/div/div/ul/li[@data-option-array-index='{option_text}']").click()
break
except:
time.sleep(1)
# Wait and click
for errors in range(10):
try:
driver.find_element_by_xpath("//html/body/form[1]/div[3]/div/fieldset/div[4]/div[1]/label[4]").click()
break
except:
time.sleep(1)
# Wait and send keys txtDataIni
for errors in range(10):
try:
driver.find_element_by_id('txtDataIni').send_keys(dataInicial)
break
except:
time.sleep(1)
# Wait and send keys txtDataFim
for errors in range(10):
try:
driver.find_element_by_id('txtDataFim').send_keys(dataFinal)
break
except:
time.sleep(1)
# Wait and click btnConsulta
for errors in range(10):
try:
driver.find_element_by_id('btnConsulta').click()
break
except:
time.sleep(1)
# Wait html table load the results (grdDocumentos)
for errors in range(10):
try:
table_html = pd.read_html(str(driver.find_element_by_id('grdDocumentos').get_attribute("outerHTML")))[-1]
if len(table_html.index) > 0:
break
else:
time.sleep(1)
except:
time.sleep(1)
table_html = str(driver.find_element_by_id('grdDocumentos').get_attribute("outerHTML"))
table = LH.fromstring(table_html)
results = pd.read_html(table_html)
for df_result in results:
if len(df_result.index) > 0:
pattern = "OpenPopUpVer(\'(.*?)\')"
df_result['linkView'] = table.xpath('//tr/td/i[1]/@onclick')
df_result['linkDownload'] = table.xpath('//tr/td/i[2]/@onclick')
df_result['linkView'] = "https://www.rad.cvm.gov.br/ENET/" + \
df_result['linkView'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False)
df3 = df_result['linkDownload'].str.split(',', expand=True)
df3.columns = ['COD{}'.format(x+1) for x in df3.columns]
df_result = df_result.join(df3)
df_result['linkDownload'] = "https://www.rad.cvm.gov.br/ENET/frmDownloadDocumento.aspx?Tela=ext&numSequencia=" + \
df_result['COD1'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False) + \
"&numVersao=" + df_result['COD2'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False) + \
"&numProtocolo=" + df_result['COD3'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False) + \
"&descTipo=" + df_result['COD4'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False) + \
"&CodigoInstituicao=1"
df_result = df_result[['Código CVM', 'Empresa', 'Categoria', 'Tipo', 'Espécie',
'Data Referência', 'Data Entrega', 'Status', 'V', 'Modalidade',
'linkView', 'linkDownload']]
df_result['Data Referência'] = df_result['Data Referência'].str.split(
' ', 1).str[1]
df_result['Data Referência'] = pd.to_datetime(
df_result["Data Referência"], format="%d/%m/%Y", errors="coerce")
df_result = df_result[df_result["Status"] == "Ativo"]
df_result["Código CVM"] = self.cod_cvm
df_result = df_result[['Código CVM', 'Empresa', 'Categoria', 'Tipo', 'Espécie',
'Data Referência', 'Data Entrega', 'Status', 'V', 'Modalidade',
'linkView', 'linkDownload']]
df_result = df_result.reset_index(drop=True)
break
if self.driver is None:
driver.quit()
print(f"Resultados da busca ENET: {len(df_result.index)}")
return df_result
@dataclass
class FinancialReport:
def __init__(self, link: str, driver: utils.webdriver = None):
self.link = link
self.driver = driver
@property
def financial_reports(self) -> Dict:
"""
Returns a dictionary with financial reports available in a page such as
Reports currently available:
-
"""
link = self.link
if self.driver is None:
driver = utils.Browser.run_chromedriver()
else:
driver=self.driver
erros = 0
max_retries = 10
dictDemonstrativos = None
while erros < max_retries:
try:
print("Coletando dados do link:", link)
driver.get(link)
# Wait page load the reports
for retrie in range(max_retries):
# Quando o captcha é que quebrado, options_text trás as opções de demonstrativos
options_text = [x.get_attribute("text") for x in driver.find_element_by_name(
"cmbQuadro").find_elements_by_tag_name("option")]
if len(options_text) > 0:
break
else:
time.sleep(1)
# Navega nos demonstrativos e salva o dataframe no dicionario
refDate = driver.find_element_by_id('lblDataDocumento').text
versaoDoc = driver.find_element_by_id(
'lblDescricaoCategoria').text.split(" - ")[-1].replace("V", "")
report = {"ref_date": refDate,
"versao": int(versaoDoc),
"cod_cvm": int(driver.find_element_by_id('hdnCodigoCvm').get_attribute("value"))
}
dictDemonstrativos = {}
for demonstrativo in options_text:
print(demonstrativo)
driver.find_element_by_xpath("//select[@name='cmbQuadro']/option[text()='{option_text}']".format(option_text=demonstrativo)).click()
iframe = driver.find_element_by_xpath(
"//iframe[@id='iFrameFormulariosFilho']")
driver.switch_to.frame(iframe)
html = driver.page_source
if demonstrativo == "Demonstração do Fluxo de Caixa":
index_moeda = -2
else:
index_moeda = -1
moedaUnidade = driver.find_element_by_id(
'TituloTabelaSemBorda').text.split(" - ")[index_moeda].replace("(", "").replace(")", "")
if demonstrativo == "Demonstração das Mutações do Patrimônio Líquido":
df = pd.read_html(html, header=0, decimal=',')[1]
converters = {c: lambda x: str(x) for c in df.columns}
df = pd.read_html(html, header=0, decimal=',',
converters=converters)[1]
else:
df = pd.read_html(html, header=0, decimal=',')[0]
converters = {c: lambda x: str(x) for c in df.columns}
df = pd.read_html(html, header=0, decimal=',',
converters=converters)[0]
for ind, column in enumerate(df.columns):
if column.strip() != "Conta" and column.strip() != "Descrição":
df[column] = df[column].astype(
str).str.strip().str.replace(".", "")
df[column] = pd.to_numeric(df[column], errors='coerce')
else:
df[column] = df[column].astype(
'str').str.strip().astype('str')
# Pega apenas a primeira coluna de valores, correspondente ao demonstrativo mais atual, e renomeia para "Valor"
if demonstrativo != "Demonstração das Mutações do Patrimônio Líquido":
df = df.iloc[:, 0:3]
df.set_axis([*df.columns[:-1], 'Valor'],
axis=1, inplace=True)
# Add data de referencia e versão aos Dataframes
df["refDate"] = refDate
df["versaoDoc"] = versaoDoc
df["moedaUnidade"] = moedaUnidade
df["refDate"] = pd.to_datetime(df["refDate"], errors="coerce")
# Add ao dicionario de demonstrativos
dictDemonstrativos[demonstrativo] = df
driver.switch_to.default_content()
print("-"*60)
# Add data de referencia ao ditc de demonstrativos
report["reports"] = dictDemonstrativos
break
except Exception as exp:
print("Erro ao carregar demonstrativo. Tentando novamente...")
print(str(exp))
erros += 1
continue
if self.driver is None:
driver.quit()
return report
@dataclass
class Company:
def __init__(self, cod_cvm: int):
self.cod_cvm = cod_cvm
def obtemCompCapitalSocial(self):
self.ComposicaoCapitalSocial = composicao_capital_social(self._codCVM)
def obterDadosCadastrais(self):
listaCodCVM = obtemDadosCadastraisCVM(self._codCVM)
listaCodCVM = listaCodCVM[listaCodCVM["CD_CVM"] == self._codCVM]
self.dadosCadastrais = listaCodCVM.to_dict('r')
@property
def reports(self) -> List:
driver = utils.Browser.run_chromedriver()
search_anual_reports = SearchENET(cod_cvm=self.cod_cvm, category=21, driver=driver).search
search_quarter_reports = SearchENET(cod_cvm=self.cod_cvm, category=39, driver=driver).search
search_reports_result = search_anual_reports.append(search_quarter_reports)
reports = {}
for index, report_info in search_reports_result.iterrows():
m = re.search(r"(?<=\Documento=)(.*?)(?=\&)", report_info['linkView'])
if m:
document_number = m.group(1)
# Create folder and save reports locally
path_save_reports = f'{os.getcwd()}/reports'
report_file = f'{path_save_reports}/{document_number}.plk'
utils.File.create_folder(path_save_reports)
# Check if report is available locally, otherwise scrape it.
if utils.File.check_exist(report_file):
with open(report_file, 'rb') as load_report:
report_obj = pickle.load(load_report)
print("Carregado localmente!")
else:
report_obj = FinancialReport(link=report_info["linkView"], driver=driver).financial_reports
with open(report_file, 'wb') as save_report:
pickle.dump(report_obj, save_report)
reports[report_obj["ref_date"]] = report_obj["reports"]
driver.quit()
return reports
if __name__ == '__main__':
petrobras = Company(cod_cvm=9512)
def obtemDadosCadastraisCVM(compAtivas=True, codCVM=False):
"""
Returns a dataframe of Registration data for all Companies available at http://dados.cvm.gov.br/dados/CIA_ABERTA/CAD/DADOS/cad_cia_aberta.csv
"""
url = "http://dados.cvm.gov.br/dados/CIA_ABERTA/CAD/DADOS/cad_cia_aberta.csv"
#s = requests.get(url).content
dados_cadastrais_empresas = pd.read_csv(url, sep=";", encoding="latin")
if compAtivas:
dados_cadastrais_empresas = dados_cadastrais_empresas[
dados_cadastrais_empresas["SIT"] == "ATIVO"]
if codCVM:
dados_cadastrais_empresas = dados_cadastrais_empresas[dados_cadastrais_empresas["CD_CVM"] == int(
codCVM)]
return dados_cadastrais_empresas
def composicao_capital_social(codCVM):
"""
This metodh will be deprecated
"""
dfQuantPapeis = pd.DataFrame()
for cod in codCVM:
erro = 1
cod = str(cod)
while erro <= 3:
try:
print(cod)
url = "http://bvmf.bmfbovespa.com.br/pt-br/mercados/acoes/empresas/ExecutaAcaoConsultaInfoEmp.asp?CodCVM={codCVM}".format(
codCVM=cod)
#
html_content = requests.get(url).content.decode("utf8")
tableDados = BeautifulSoup(html_content, "lxml").find(
"div", attrs={"id": "accordionDados"})
tickers = re.findall(
"'[a-z|A-Z|0-9][a-z|A-Z|0-9][a-z|A-Z|0-9][a-z|A-Z|0-9][0-9][0-9]?'", str(tableDados))
tickers = [ticker.replace("'", "") for ticker in tickers]
tickers = list(dict.fromkeys(tickers))
tickers = pd.DataFrame(tickers, columns=['ticker'])
tickers["codCVM"] = cod
dicCapitalSocial = BeautifulSoup(html_content, "lxml").find(
"div", attrs={"id": "divComposicaoCapitalSocial"})
dfs = pd.read_html(str(dicCapitalSocial), thousands='.')[0]
dfs.columns = ["Tipo", "Quantidade"]
dfs["codCVM"] = cod
dfs["dt_load"] = datetime.now()
dfs = tickers.merge(dfs, on="codCVM")
print(dfs)
dfQuantPapeis = dfQuantPapeis.append(dfs)
break
except Exception as exp:
print("Tentando novamente:", cod)
print(str(exp))
erro += 1
print("*"*50)
return dfQuantPapeis
def obter_dados_negociacao(dateToday=datetime.now().strftime("%Y-%m-%d")):
print(dateToday)
url = f"https://arquivos.b3.com.br/api/download/requestname?fileName=InstrumentsConsolidated&date={dateToday}"
payload = {}
ua = UserAgent()
headers = {
'User-Agent': str(ua.chrome)}
response = requests.request("GET", url, headers=headers, data=payload)
if response.ok:
token = response.json().get('token')
baseURL = f"https://arquivos.b3.com.br/api/download/?token={token}"
data = pd.read_csv(baseURL,
sep=";",
encoding='latin-1',
error_bad_lines=True)
data["data_load"] = datetime.now()
print("Baixando arquivo!")
r = urllib.request.urlopen(
"https://sistemaswebb3-listados.b3.com.br/isinProxy/IsinCall/GetFileDownload/NDY0ODk=").read()
print("Descompactando arquivo!")
file = ZipFile(BytesIO(r))
dfEmissor = file.open("EMISSOR.TXT")
print("Abrindo arquivo CSV!")
dfEmissor = pd.read_csv(dfEmissor, header=None, names=[
"CODIGO DO EMISSOR", "NOME DO EMISSOR", "CNPJ", "DATA CRIAÇÃO EMISSOR"])
data = data.merge(dfEmissor, left_on="AsstDesc",
right_on="CODIGO DO EMISSOR", how="left")
data.reset_index(drop=True, inplace=True)
else:
data = None
return data
def obtemCodCVM():
url = "https://cvmweb.cvm.gov.br/SWB/Sistemas/SCW/CPublica/CiaAb/ResultBuscaParticCiaAb.aspx?CNPJNome=&TipoConsult=C"
print(url)
tableDados = pd.read_html(url, header=0)[0]
tableDados = tableDados[~tableDados['SITUAÇÃO REGISTRO'].str.contains(
"Cancelado")]
tableDados["CÓDIGO CVM"] = pd.to_numeric(
tableDados["CÓDIGO CVM"], errors="coerce")
tableDados = tableDados.drop_duplicates()
tableDados = tableDados.reset_index(drop=True)
return tableDados
def obter_indices_anbima(dataIni, dataFim):
link = "https://www.anbima.com.br/informacoes/ima/ima-sh.asp"
driver = utils.Browser.run_chromedriver(system="Windows")
#dataIni = datetime.now().strftime("%d%m%Y")
dataIni = datetime.strptime(dataIni, '%d/%m/%Y')
dataFim = datetime.strptime(dataFim, '%d/%m/%Y')
root = os.getcwd() + "/downloads"
try:
os.makedirs(root)
except FileExistsError:
# directory already exists
pass
# Remover arquivos da pasta de destino do download antes de iniciar novo scrapping
files = glob.glob(root)
f = []
for (dirpath, dirnames, filenames) in os.walk(root):
for file in filenames:
if file.endswith(".csv"):
os.remove(root + "/" + file)
while dataFim >= dataIni:
try:
dateAux = dataFim.strftime("%d%m%Y")
driver.get(link)
driver.find_element_by_xpath(
"//input[@name='escolha'][@value='2']").click()
driver.find_element_by_xpath(
"//input[@name='saida'][@value='csv']").click()
dateInput = driver.find_element_by_xpath("//input[@name='Dt_Ref']")
dateInput.click()
dateInput.clear()
dateInput.send_keys(dateAux)
driver.find_element_by_xpath("//img[@name='Consultar']").click()
dataFim -= timedelta(days=1)
except Exception as excep:
print(str(excep))
f = []
dfAmbima = pd.DataFrame()
for (dirpath, dirnames, filenames) in os.walk(root):
for file in filenames:
try:
df = pd.read_csv(root + "/" + file, header=1,
sep=";", encoding="latin", thousands=".")
os.remove(root + "/" + file)
dfAmbima = dfAmbima.append(df)
except:
continue
# Tipos de dados
dfAmbima = dfAmbima.replace("--", "")
dfAmbima["Data de Referência"] = pd.to_datetime(
dfAmbima["Data de Referência"], format='%d/%m/%Y', errors='coerce')
for column in dfAmbima.columns:
if column != "Data de Referência" and column != "Índice":
print(column)
dfAmbima[column] = dfAmbima[column].astype(
str).str.replace('.', '')
dfAmbima[column] = dfAmbima[column].astype(
str).str.replace(',', '.')
dfAmbima[column] = | pd.to_numeric(dfAmbima[column]) | pandas.to_numeric |
import json
import os
from math import ceil, isnan
import openpyxl
import pandas as pd
from Entities.SigfoxProfile import Sigfox
def extract_data(file, output):
| pd.set_option('display.max_columns', None) | pandas.set_option |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Load & merge messages and categories ds
messages_filepath: path for csv with messages
categories_filepath: path for csv with cats
Returns:
df: dataframe
"""
# load messages dataset
messages = pd.read_csv(messages_filepath)
# load categories dataset
categories = pd.read_csv(categories_filepath)
# merge datasets
df =pd.merge(messages,categories,how='inner',on='id')
return df
def clean_data(df):
"""
Loads df and cleans
Input: df with joined messages and categories data
Return: Clean dataframe free from duplicates
"""
categories = df.categories.str.split(';',expand=True)
row = categories.loc[0]
category_colnames = row.apply(lambda x: x[:-2])
categories.columns = category_colnames
for column in categories:
categories[column] = categories[column].apply(lambda x: x[-1:])
categories[column] = categories[column].astype('int')
categories.drop('child_alone', axis = 1, inplace = True)
df.drop('categories',axis=1,inplace=True)
df = | pd.concat([df,categories],axis=1) | pandas.concat |
#############################################################
# ActivitySim verification against TM1
# <NAME>, <EMAIL>, 02/22/19
# C:\projects\activitysim\verification>python compare_results.py
#############################################################
import pandas as pd
import openmatrix as omx
#############################################################
# INPUTS
#############################################################
pipeline_filename = 'asim/pipeline.h5'
distance_matrix_filename = "asim/skims.omx"
asim_nmtf_alts_filename = "asim/non_mandatory_tour_frequency_alternatives.csv"
process_sp = True # False skip work/sch shadow pricing comparisons, True do them
process_tm1 = True # False only processes asim, True processes tm1 as well
asim_sp_work_filename = "asim/shadow_price_workplace_modeled_size_10.csv"
asim_sp_school_filename = "asim/shadow_price_school_modeled_size_10.csv"
asim_sp_school_no_sp_filename = "asim/shadow_price_school_modeled_size_1.csv"
tm1_access_filename = "tm1/accessibility.csv"
tm1_sp_filename = "tm1/ShadowPricing_9.csv"
tm1_work_filename = "tm1/wsLocResults_1.csv"
tm1_ao_filename = "tm1/aoResults.csv"
tm1_hh_filename = "tm1/householdData_1.csv"
tm1_cdap_filename = "tm1/cdapResults.csv"
tm1_per_filename = "tm1/personData_1.csv"
tm1_tour_filename = "tm1/indivTourData_1.csv"
tm1_jtour_filename = "tm1/jointTourData_1.csv"
tm1_trips_filename = "tm1/indivTripData_1.csv"
tm1_jtrips_filename = "tm1/jointTripData_1.csv"
#############################################################
# OUTPUT FILES FOR DEBUGGING
#############################################################
asim_zones_filename = "asim/asim_zones.csv"
asim_access_filename = "asim/asim_access.csv"
asim_per_filename = "asim/asim_per.csv"
asim_hh_filename = "asim/asim_hh.csv"
asim_tour_filename = "asim/asim_tours.csv"
asim_trips_filename = "asim/asim_trips.csv"
#############################################################
# COMMON LABELS
#############################################################
ptypes = ["", "Full-time worker", "Part-time worker", "University student", "Non-worker",
"Retired", "Student of driving age", "Student of non-driving age",
"Child too young for school"]
mode_labels = ["", "DRIVEALONEFREE", "DRIVEALONEPAY", "SHARED2FREE", "SHARED2PAY", "SHARED3FREE",
"SHARED3PAY", "WALK", "BIKE", "WALK_LOC", "WALK_LRF", "WALK_EXP", "WALK_HVY",
"WALK_COM", "DRIVE_LOC", "DRIVE_LRF", "DRIVE_EXP", "DRIVE_HVY", "DRIVE_COM"]
#############################################################
# DISTANCE SKIM
#############################################################
# read distance matrix (DIST)
distmat = omx.open_file(distance_matrix_filename)["DIST"][:]
#############################################################
# EXPORT TABLES
#############################################################
# write tables for verification
tazs = pd.read_hdf(pipeline_filename, "land_use/initialize_landuse")
tazs["zone"] = tazs.index
tazs.to_csv(asim_zones_filename, index=False)
access = pd.read_hdf(pipeline_filename, "accessibility/compute_accessibility")
access.to_csv(asim_access_filename, index=False)
hh = pd.read_hdf(pipeline_filename, "households/joint_tour_frequency")
hh["household_id"] = hh.index
hh.to_csv(asim_hh_filename, index=False)
per = pd.read_hdf(pipeline_filename, "persons/non_mandatory_tour_frequency")
per["person_id"] = per.index
per.to_csv(asim_per_filename, index=False)
tours = pd.read_hdf(pipeline_filename, "tours/stop_frequency")
tours["tour_id"] = tours.index
tours.to_csv(asim_tour_filename, index=False)
trips = pd.read_hdf(pipeline_filename, "trips/trip_mode_choice")
trips["trip_id"] = trips.index
trips.to_csv(asim_trips_filename, index=False)
#############################################################
# AGGREGATE
#############################################################
# accessibilities
if process_tm1:
tm1_access = pd.read_csv(tm1_access_filename)
tm1_access.to_csv("outputs/tm1_access.csv", na_rep=0)
asim_access = pd.read_csv(asim_access_filename)
asim_access.to_csv("outputs/asim_access.csv", na_rep=0)
#############################################################
# HOUSEHOLD AND PERSON
#############################################################
# work and school location
if process_sp:
if process_tm1:
tm1_markets = ["work_low", "work_med", "work_high", "work_high", "work_very high", "university",
"school_high", "school_grade"]
tm1 = pd.read_csv(tm1_sp_filename)
tm1 = tm1.groupby(tm1["zone"]).sum()
tm1["zone"] = tm1.index
tm1 = tm1.loc[tm1["zone"] > 0]
ws_size = tm1[["zone"]]
for i in range(len(tm1_markets)):
ws_size[tm1_markets[i] + "_modeledDests"] = tm1[tm1_markets[i] + "_modeledDests"]
ws_size.to_csv("outputs/tm1_work_school_location.csv", na_rep=0)
asim_markets = ["work_low", "work_med", "work_high", "work_high", "work_veryhigh", "university",
"highschool", "gradeschool"]
asim = pd.read_csv(asim_sp_work_filename)
asim_sch = pd.read_csv(asim_sp_school_filename)
asim_sch_no_sp = pd.read_csv(asim_sp_school_no_sp_filename)
asim_sch["gradeschool"] = asim_sch_no_sp["gradeschool"] # grade school not shadow priced
asim = asim.set_index("TAZ", drop=False)
asim_sch = asim_sch.set_index("TAZ", drop=False)
asim["gradeschool"] = asim_sch["gradeschool"].loc[asim["TAZ"]].tolist()
asim["highschool"] = asim_sch["highschool"].loc[asim["TAZ"]].tolist()
asim["university"] = asim_sch["university"].loc[asim["TAZ"]].tolist()
ws_size = asim[["TAZ"]]
for i in range(len(asim_markets)):
ws_size[asim_markets[i] + "_asim"] = asim[asim_markets[i]]
ws_size.to_csv("outputs/asim_work_school_location.csv", na_rep=0)
# work county to county flows
tazs = pd.read_csv(asim_zones_filename)
counties = ["", "SF", "SM", "SC", "ALA", "CC", "SOL", "NAP", "SON", "MAR"]
tazs["COUNTYNAME"] = pd.Series(counties)[tazs["county_id"].tolist()].tolist()
tazs = tazs.set_index("zone", drop=False)
if process_tm1:
tm1_work = pd.read_csv(tm1_work_filename)
tm1_work["HomeCounty"] = tazs["COUNTYNAME"].loc[tm1_work["HomeTAZ"]].tolist()
tm1_work["WorkCounty"] = tazs["COUNTYNAME"].loc[tm1_work["WorkLocation"]].tolist()
tm1_work_counties = tm1_work.groupby(["HomeCounty", "WorkCounty"]).count()["HHID"]
tm1_work_counties = tm1_work_counties.reset_index()
tm1_work_counties = tm1_work_counties.pivot(index="HomeCounty", columns="WorkCounty")
tm1_work_counties.to_csv("outputs/tm1_work_counties.csv", na_rep=0)
asim_cdap = | pd.read_csv(asim_per_filename) | pandas.read_csv |
# ActivitySim
# See full license in LICENSE.txt.
import logging
import os
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
def undupe_column_names(df, template="{} ({})"):
"""
rename df column names so there are no duplicates (in place)
e.g. if there are two columns named "dog", the second column will be reformatted to "dog (2)"
Parameters
----------
df : pandas.DataFrame
dataframe whose column names should be de-duplicated
template : template taking two arguments (old_name, int) to use to rename columns
Returns
-------
df : pandas.DataFrame
dataframe that was renamed in place, for convenience in chaining
"""
new_names = []
seen = set()
for name in df.columns:
n = 1
new_name = name
while new_name in seen:
n += 1
new_name = template.format(name, n)
new_names.append(new_name)
seen.add(new_name)
df.columns = new_names
return df
def read_assignment_spec(fname,
description_name="Description",
target_name="Target",
expression_name="Expression"):
"""
Read a CSV model specification into a Pandas DataFrame or Series.
The CSV is expected to have columns for component descriptions
targets, and expressions,
The CSV is required to have a header with column names. For example:
Description,Target,Expression
Parameters
----------
fname : str
Name of a CSV spec file.
description_name : str, optional
Name of the column in `fname` that contains the component description.
target_name : str, optional
Name of the column in `fname` that contains the component target.
expression_name : str, optional
Name of the column in `fname` that contains the component expression.
Returns
-------
spec : pandas.DataFrame
dataframe with three columns: ['description' 'target' 'expression']
"""
cfg = pd.read_csv(fname, comment='#')
# drop null expressions
# cfg = cfg.dropna(subset=[expression_name])
cfg.rename(columns={target_name: 'target',
expression_name: 'expression',
description_name: 'description'},
inplace=True)
# backfill description
if 'description' not in cfg.columns:
cfg.description = ''
cfg.target = cfg.target.str.strip()
cfg.expression = cfg.expression.str.strip()
return cfg
class NumpyLogger(object):
def __init__(self, logger):
self.logger = logger
self.target = ''
self.expression = ''
def write(self, msg):
self.logger.error("numpy warning: %s" % (msg.rstrip()))
self.logger.error("expression: %s = %s" % (str(self.target), str(self.expression)))
def assign_variables(assignment_expressions, df, locals_dict, df_alias=None, trace_rows=None):
"""
Evaluate a set of variable expressions from a spec in the context
of a given data table.
Expressions are evaluated using Python's eval function.
Python expressions have access to variables in locals_d (and df being
accessible as variable df.) They also have access to previously assigned
targets as the assigned target name.
lowercase variables starting with underscore are temp variables (e.g. _local_var)
and not returned except in trace_restults
uppercase variables starting with underscore are temp variables (e.g. _LOCAL_SCALAR)
and not returned except in trace_assigned_locals
This is useful for defining general purpose local constants in expression file
Users should take care that expressions should result in
a Pandas Series (scalars will be automatically promoted to series.)
Parameters
----------
assignment_expressions : pandas.DataFrame of target assignment expressions
target: target column names
expression: pandas or python expression to evaluate
df : pandas.DataFrame
locals_d : Dict
This is a dictionary of local variables that will be the environment
for an evaluation of "python" expression.
trace_rows: series or array of bools to use as mask to select target rows to trace
Returns
-------
variables : pandas.DataFrame
Will have the index of `df` and columns named by target and containing
the result of evaluating expression
trace_df : pandas.DataFrame or None
a dataframe containing the eval result values for each assignment expression
"""
np_logger = NumpyLogger(logger)
def is_local(target):
return target.startswith('_') and target.isupper()
def is_temp(target):
return target.startswith('_')
def to_series(x, target=None):
if x is None or np.isscalar(x):
if target:
logger.warn("WARNING: assign_variables promoting scalar %s to series" % target)
return pd.Series([x] * len(df.index), index=df.index)
return x
trace_assigned_locals = trace_results = None
if trace_rows is not None:
# convert to numpy array so we can slice ndarrays as well as series
trace_rows = np.asanyarray(trace_rows)
if trace_rows.any():
trace_results = []
trace_assigned_locals = {}
# avoid touching caller's passed-in locals_d parameter (they may be looping)
locals_dict = locals_dict.copy() if locals_dict is not None else {}
if df_alias:
locals_dict[df_alias] = df
else:
locals_dict['df'] = df
local_keys = locals_dict.keys()
l = []
# need to be able to identify which variables causes an error, which keeps
# this from being expressed more parsimoniously
for e in zip(assignment_expressions.target, assignment_expressions.expression):
target, expression = e
if target in local_keys:
logger.warn("assign_variables target obscures local_d name '%s'" % str(target))
if is_local(target):
x = eval(expression, globals(), locals_dict)
locals_dict[target] = x
if trace_assigned_locals is not None:
trace_assigned_locals[target] = x
continue
try:
# FIXME - log any numpy warnings/errors but don't raise
np_logger.target = str(target)
np_logger.expression = str(expression)
saved_handler = np.seterrcall(np_logger)
save_err = np.seterr(all='log')
values = to_series(eval(expression, globals(), locals_dict), target=target)
np.seterr(**save_err)
np.seterrcall(saved_handler)
except Exception as err:
logger.error("assign_variables error: %s: %s" % (type(err).__name__, str(err)))
logger.error("assign_variables expression: %s = %s"
% (str(target), str(expression)))
# values = to_series(None, target=target)
raise err
l.append((target, values))
if trace_results is not None:
trace_results.append((target, values[trace_rows]))
# update locals to allows us to ref previously assigned targets
locals_dict[target] = values
# build a dataframe of eval results for non-temp targets
# since we allow targets to be recycled, we want to only keep the last usage
# we scan through targets in reverse order and add them to the front of the list
# the first time we see them so they end up in execution order
variables = []
seen = set()
for statement in reversed(l):
# statement is a tuple (<target_name>, <eval results in pandas.Series>)
target_name = statement[0]
if not is_temp(target_name) and target_name not in seen:
variables.insert(0, statement)
seen.add(target_name)
# DataFrame from list of tuples [<target_name>, <eval results>), ...]
variables = pd.DataFrame.from_items(variables)
if trace_results is not None:
trace_results = pd.DataFrame.from_items(trace_results)
trace_results.index = df[trace_rows].index
trace_results = undupe_column_names(trace_results)
# add df columns to trace_results
trace_results = | pd.concat([df[trace_rows], trace_results], axis=1) | pandas.concat |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
This file contains training and testing settings to be used in this benchmark,
mainly:
TRAIN_BASE_END: Base training end date common across all rounds
TRAIN_ROUNDS_ENDS: a set of dates denoting end of training period for each
of the 6 rounds of the benchmark
TEST_STARTS_ENDS: a set of dates denoting start and end of testing period
for each of the 6 rounds of the benchmark
"""
import pandas as pd
TRAIN_BASE_END = pd.to_datetime("2016-11-01")
TRAIN_ROUNDS_ENDS = pd.to_datetime(
["2016-12-01", "2016-12-01", "2017-01-01", "2017-01-01", "2017-02-01", "2017-02-01",]
)
TEST_STARTS_ENDS = [
pd.to_datetime(("2017-01-01", "2017-02-01")),
pd.to_datetime(("2017-02-01", "2017-03-01")),
| pd.to_datetime(("2017-02-01", "2017-03-01")) | pandas.to_datetime |
# flu prediction
import os
import pandas as pd
import feather
from utils.fastai.structured import *
from utils.fastai.column_data import *
from sklearn import preprocessing
from sklearn.metrics import classification_report, confusion_matrix
import keras
from keras.layers import Input, Embedding, Dense, Dropout
from keras.models import Model
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras import metrics
pd.set_option('display.width', 250)
data_path = os.environ['DATA_DIR'] + 'epidata_flu/'
def drop_columns(df, cols):
"""drop columns form dataframe"""
df = df.drop(cols, axis=1)
return df
def show_prediction(model, raw_df, epiyear):
"""
compare prediction from actual values given epiyear
"""
def proc_df(df, max_n_cat=None, mapper=None):
""" standardizes continuous columns and numericalizes categorical columns
Parameters:
-----------
df: The data frame you wish to process.
max_n_cat: The maximum number of categories to break into dummy values, instead
of integer codes.
mapper: calculates the values used for scaling of variables during training time(mean
and standard deviation).
Returns:
--------
x: x is the transformed version of df. x will not have the response variable
and is entirely numeric.
mapper: A DataFrameMapper which stores the mean and standard deviation of the
corresponding continous variables which is then used for scaling of during test-time.
"""
df = df.copy()
mapper = scale_vars(df, mapper)
for n, c in df.items():
numericalize(df, c, n, max_n_cat)
return pd.get_dummies(df, dummy_na=True), mapper
def concat_prior(df, cols, shift_num=4):
"""
shift dataframe forward to compute prior epiweek features
returns a dataframe with concatenated prior features
cols is a list of columns to shift
shift_num is how many prior weeks to shift
"""
# add concatenated features
df_grp = df.groupby(['region'])
df = []
for name, grp in df_grp:
grp = grp.sort_values(['epiyear', 'epiweeknum'], ascending=True).reset_index(drop=True)
grp_ = [grp]
for idx in range(1, shift_num+1):
grp_.append(grp[cols].shift(idx))
grp_[idx].columns = [c + '_prior_{}'.format(idx) for c, idx in zip(cols, len(cols)*[idx])]
grp = pd.concat(grp_, axis=1).loc[shift_num:]
df.append(grp)
return pd.concat(df).fillna(0)
def get_ml_data(recompute=False):
"""
preprocess data for ml training
num_shift is number of prior epiweeks to cancatenate for feature columns
"""
ml_data_path = data_path + 'ml_data/'
feather_names = ['train_x', 'train_y', 'val_x', 'val_y', 'test_x', 'test_y', 'train_xp']
mapper_names = ['mapper', 'le_wili', 'le_epi']
var_names = ['cat_vars', 'contin_vars', 'wiki_cols', 'pred_wili', 'pred_epi', 'pred_vars']
if not os.path.exists(ml_data_path):
os.makedirs(ml_data_path)
# read flu dataframe and shuffle
df = pd.read_feather(data_path + 'joined_df.feather')
#df = df.reindex(np.random.permutation(df.index))
# set categorial and continous data (prediction variables are also categorial)
wiki_cols = [c for c in df.columns.tolist() if 'wiki' in c]
cat_vars = ['region', 'epiweeknum', 'region_type']
contin_vars = ['epiyear', 'lag', 'ili', 'num_age_0', 'num_age_1', 'num_age_2', 'num_age_3',
'num_age_4', 'num_age_5', 'num_ili', 'num_patients', 'num_providers', 'wili',
'std_nc', 'value_nc'] + wiki_cols
pred_wili = ['week_ahead_wili_1', 'week_ahead_wili_2', 'week_ahead_wili_3',
'week_ahead_wili_4', 'peak_intensity']
pred_epi = ['peak_week']
pred_vars = pred_wili + pred_epi
# use data from 1997 to 2016 for train and 2017 for validation
# use 2018 for weekly forecast testing, we can't tes season onset
# and peak because data is not yet available for 2018
val_year = 2017
test_year = 2018
#df_ = df.copy()
#n = 50000 # len(df) # training data sample
#idxs = get_cv_idxs(n, val_pct=150000/n)
#df = df.iloc[idxs].reset_index(drop=True)
train_df = df[(df['epiyear'] != val_year) &
(df['epiyear'] != test_year)].reset_index(drop=True)
val_df = df[df['epiyear'] == val_year].reset_index(drop=True)
test_df = df[df['epiyear'] == test_year].reset_index(drop=True)
del df
if not os.listdir(ml_data_path) or recompute:
# split data into features and prediction variables
print ('Splitting data into features and prediction variables ...')
train_x = train_df[cat_vars + contin_vars].copy()
train_y = train_df[pred_vars].copy()
val_x = val_df[cat_vars + contin_vars].copy()
val_y = val_df[pred_vars].copy()
test_x = test_df[cat_vars + contin_vars].copy()
test_y = test_df[pred_vars].copy()
# numericalize and standardize training features / fit prediction values
# train_xp contains pre transformation values
print ('Numericalizing and Standardizing training data ...')
train_xp = train_x.copy()
for v in cat_vars:
train_xp[v] = train_x[v].astype('category').cat.as_ordered()
for v in contin_vars:
train_xp[v] = train_xp[v].astype('float32')
train_x, mapper = proc_df(train_xp)
print ('Applying label transformation ...')
bin_start_incl = np.round(np.r_[np.arange(0, 13.1, .1), 100], decimals=1)
le_wili = preprocessing.LabelEncoder() # wili forecast percentages
le_wili.fit(bin_start_incl)
le_epi = preprocessing.LabelEncoder() # wili peak weak
le_epi.fit(range(1,54))
for v in pred_wili:
train_y[v] = train_y[v].transform(le_wili.transform)
for v in pred_epi:
train_y[v] = train_y[v].transform(pd.to_numeric)
train_y[v] = train_y[v].transform(le_epi.transform)
# apply training transformation to validation and test data
print ('Numericalizing and Standardizing validatan and test data ...')
apply_cats(val_x, train_xp)
for v in contin_vars:
val_x[v] = val_x[v].astype('float32')
val_x, _ = proc_df(val_x, mapper=mapper)
for v in pred_wili:
val_y[v] = val_y[v].transform(le_wili.transform)
for v in pred_epi:
val_y[v] = val_y[v].transform(pd.to_numeric)
val_y[v] = val_y[v].transform(le_epi.transform)
if not test_df.empty:
apply_cats(test_x, train_xp)
for v in contin_vars:
test_x[v] = test_x[v].astype('float32')
test_x, _ = proc_df(test_x, mapper=mapper)
for v in pred_wili:
test_y[v] = test_y[v].transform(le_wili.transform)
# generate dictionary from processed data
ml_data = {}
for fname in feather_names + mapper_names:
ml_data[fname] = eval(fname)
# save processed data to disk
for fname in feather_names:
feather.write_dataframe(eval(fname), ml_data_path + fname + '.feather')
for fname in mapper_names:
pd.to_pickle(eval(fname), ml_data_path + fname + '.pkl')
for fname in var_names:
pd.to_pickle(eval(fname), ml_data_path + fname + '.pkl')
else:
# read data from desk
ml_data = {}
for fname in feather_names:
ml_data[fname] = pd.read_feather(ml_data_path + fname + '.feather')
for fname in mapper_names:
ml_data[fname] = pd.read_pickle(ml_data_path + fname + '.pkl')
for fname in var_names:
ml_data[fname] = pd.read_pickle(ml_data_path + fname + '.pkl')
return ml_data, cat_vars + contin_vars, pred_vars
# Embedding learning scripts
def cat_map_info(feat):
return feat[0], len(feat[1].classes_)
def my_init(scale):
return lambda shape, name=None: initializations.uniform(shape, scale=scale, name=name)
def emb_init(shape, name=None):
return initializations.uniform(shape, scale=2/(shape[1]+1), name=name)
def get_emb(feat):
name, c = cat_map_info(feat)
#c2 = cat_var_dict[name]
c2 = (c+1)//2
if c2>50: c2=50
inp = Input((1,), dtype='int64', name=name+'_in')
# , W_regularizer=l2(1e-6)
u = Flatten(name=name+'_flt')(Embedding0(c, c2, input_length=1, init=emb_init)(inp))
# u = Flatten(name=name+'_flt')(Embedding(c, c2, input_length=1)(inp))
return inp,u
def get_contin(feat):
name = feat[0][0]
inp = Input((1,), name=name+'_in')
return inp, Dense(1, name=name+'_d', init=my_init(1.))(inp)
def top1(y_true, y_pred):
return metrics.top_k_categorical_accuracy(y_true, y_pred, k=1)
def get_model(dim, num_classes_wili, num_classes_epi):
input_ = Input(shape=(dim, ), dtype='float32')
x = Dropout(0.02)(input_)
x = Dense(2000, activation='relu', init='uniform')(x)
x = Dense(1000, activation='relu', init='uniform')(x)
x = Dense(500, activation='relu', init='uniform')(x)
x = Dropout(0.2)(x)
y_wili_1 = Dense(num_classes_wili, activation='softmax', name='y_wili_1')(x) # 1 week ahead prediction
y_wili_2 = Dense(num_classes_wili, activation='softmax', name='y_wili_2')(x) # 2 week ahead prediction
y_wili_3 = Dense(num_classes_wili, activation='softmax', name='y_wili_3')(x) # 3 week ahead prediction
y_wili_4 = Dense(num_classes_wili, activation='softmax', name='y_wili_4')(x) # 4 week ahead prediction
y_peak_wili = Dense(num_classes_wili, activation='softmax', name='y_peak_wili')(x) # peak wili prediction
y_peak_week = Dense(num_classes_epi, activation='softmax', name='y_peak_week')(x) # peak wili predictio
model = Model(inputs=[input_], outputs=[y_wili_1, y_wili_2, y_wili_3, y_wili_4, y_peak_wili, y_peak_week])
return model
if __name__ == '__main__':
# generate ml data2
print ("Generating ML Data ...")
ml_data, x_vars, y_vars = get_ml_data()
train_xp = ml_data['train_xp']
mapper = ml_data['mapper']
le_wili = ml_data['le_wili']
le_epi = ml_data['le_epi']
train_x = ml_data['train_x']
val_x = ml_data['val_x']
test_x = ml_data['test_x']
train_y = ml_data['train_y']
val_y = ml_data['val_y']
test_y = ml_data['test_y']
# add time series variables
prior = 4
train_x = | pd.concat([train_x, train_y], axis=1) | pandas.concat |
from drain.aggregation import SimpleAggregation, SpacetimeAggregation, AggregationJoin, SpacetimeAggregationJoin
from drain.aggregate import Count
from drain import step
from datetime import date
import pandas as pd
import numpy as np
class SimpleCrimeAggregation(SimpleAggregation):
@property
def aggregates(self):
return [
Count(),
Count('Arrest'),
Count(lambda c: c['Primary Type'] == 'THEFT',
'theft', prop=True),
]
def test_simple_aggregation_parallel(drain_setup, crime_step):
s = SimpleCrimeAggregation(inputs=[crime_step],
indexes=['District', 'Community Area'], parallel=True)
s.execute()
print(s.result)
def test_simple_aggregation(drain_setup, crime_step):
s = SimpleCrimeAggregation(inputs=[crime_step],
indexes=['District', 'Community Area'], parallel=False)
s.execute()
print(s.result)
def test_simple_join(drain_setup, crime_step):
s = SimpleCrimeAggregation(inputs=[crime_step],
indexes=['District', 'Community Area'], parallel=True)
s.execute()
left = pd.DataFrame({'District':[1,2], 'Community Area':[1,2]})
print(s.join(left))
def test_simple_join_fillna(drain_setup, crime_step):
s = SimpleCrimeAggregation(inputs=[crime_step],
indexes=['District', 'Community Area'], parallel=True)
s.execute()
left = | pd.DataFrame({'District':[1,2], 'Community Area':[1,100]}) | pandas.DataFrame |
"""Bloch wave solver"""
import importlib as imp
import numpy as np,pandas as pd,pickle5,os,glob,tifffile
from typing import TYPE_CHECKING, Dict, Iterable, Optional, Sequence, Union
from subprocess import check_output#Popen,PIPE
from crystals import Crystal
from utils import glob_colors as colors,handler3D as h3d
from utils import displayStandards as dsp #;imp.reload(dsp)
from utils import physicsConstants as cst #;imp.reload(cst)
from EDutils import postprocess as pp #;imp.reload(pp)
from EDutils import structure_factor as sf #;imp.reload(sf)
from EDutils import scattering_factors as scatf #;imp.reload(scatf)
from EDutils import viewers #;imp.reload(viewers)
from EDutils import utilities as ut #;imp.reload(ut)
from EDutils import display as EDdisp #;imp.reload(EDdisp)
from . import util as bloch_util #;imp.reload(bloch_util)
class Bloch:
"""
Bloch wave simulation class
Parameters
---------
file
structure file (3D:cif_file or wallpaper info in 2D)
name
optional name (used to save object see set_name)
path
A path for the simulation folder
beam
dictionary passed to :meth:`~Bloch.set_beam`
keV
electron wavelength (see :meth:`~Bloch.set_beam`)
u
beam direction [ux,uy,uz] in reciprocal space (see :meth:`~Bloch.set_beam`)
Nmax
max order of reflections/resolution (see :meth:`~Bloch.update_Nmax`)
Smax
maximum excitation error (see :meth:`~Bloch.solve`)
solve
solve the system by diagonalizing the Bloch matrix
kwargs
arguments to be passed to :meth:`~Bloch.solve`
"""
def __init__(self,
cif_file:str,name:str='',path:str='',
beam:Optional[dict]={},keV:float=200,u:Sequence[float]=[0,0,1],
Nmax:int=1,
Smax:float=0.2,
solve:bool=True,
eps:float=1,
**kwargs,
):
self.solved = False
self.Nmax = 0
self.thick = 100
self.thicks = self._set_thicks((0,1000,1000))
self.eps=eps
self._set_structure(cif_file)
self.update_Nmax(Nmax)
beam_args={'keV':keV,'u':u}
beam_args.update(beam)
self.set_beam(**beam_args)
self.set_name(name,path)
self._set_excitation_errors(Smax)
self._set_Vg()
if solve:
self.solve(Smax=Smax,**kwargs)
# if show_thicks or 't' in opts:
# self.show_beams_vs_thickness(strong=['I'])
# if 's' in opts:
# self.save()
def set_name(self,name='',path=''):
"""
Set the basename for saving a Bloch obj as path+name+'.pkl'
Parameters
----------
name
The name of the simulation(can include the full path)
path
The path to the simulation folder
.. NOTE::
By default the simulation name is '<cif><zone axis>_<keV>keV_bloch'
"""
basefile = self.cif_file.replace('.cif','')
if not name:
u_str = ''.join(['%d' %np.round(u) for u in self.Kabc0])
name='%s%s_%dkeV_bloch' %(os.path.basename(basefile),u_str,np.round(self.keV))
if not path:path=os.path.dirname(basefile)
self.path = path #; print(self.path)
self.name = name #;print('name:',self.name)
def update_Nmax(self,Nmax:int):
"""
Update resolution/max order (lattice and Fhkl)
Parameters
----------
Nmax
maximum h,k,l order
"""
if type(Nmax) in [int,np.int64] :
if not Nmax==self.Nmax:
self.Nmax=Nmax
(h,k,l),(qx,qy,qz) = ut.get_lattice(self.lat_vec,self.Nmax)
self.lattice = [(h,k,l),(qx,qy,qz)]
self.hklF,self.Fhkl = sf.structure_factor3D(self.pattern, 2*np.pi*self.lat_vec,hklMax=2*self.Nmax)
def set_beam(self,
keV:float=200,
# lam:float=None,
u:Sequence[float]=[0,0,1],
K:Optional[Sequence[float]]=None,
u0:Optional[Sequence[int]]=None,
):
""" Set the beam direction and wavelength
Parameters
----------
keV
beam wavelength in keV
u
beam direction
K
update beam direction and wavelength (overwrites keV)
u0
beam direction zone axis notation
.. note::
The order of priority for setting up the beam is K,u0,u
"""
if isinstance(K,list) or isinstance(K,np.ndarray):
self.k0 = np.linalg.norm(K)
self.u = K/self.k0
else:
if isinstance(u0,list) or isinstance(u0,np.ndarray):
u = np.array(u0).dot(self.lat_vec)
self.u = np.array(u)/np.linalg.norm(u)
self.k0 = 1/cst.keV2lam(keV)
self.lam = 1/self.k0
self.keV = cst.lam2keV(self.lam)
self.sig = cst.keV2sigma(self.keV)
# self.u = self.K/self.k0
self.K = self.k0*self.u
self.Kuvw = self.lat_vec.dot(self.K) #projection in reciprocal crystal basis
self.Kabc = self.lat_vec0.dot(self.K) #projection in crystal basis
self.Kabc0 = self.Kabc/(abs(self.Kabc)[abs(self.Kabc)>0.01].min()) #;print(Kabc)
self.Kuvw0 = self.Kuvw/(abs(self.Kuvw)[abs(self.Kuvw)>0.01].min()) #;print(Kabc)
def set_thickness(self,thick:float=100):
"""set thickness and update beams
Parameters
----------
thick : thickness in A
"""
if type(thick) in [int,float] :self.thick=thick
self._set_kinematic()
if self.solved:self._set_intensities()
def solve(self,
Smax:Optional[float]=None,hkl:Optional[Iterable[int]]=None,
Nmax:Optional[int]=None,
beam:Optional[dict]={},
thick:float=None,thicks:Sequence[float]=None,
opts:str='sv0',
):
""" Diagonalize the Blochwave matrix
Parameters
----------
Smax
maximum excitation error to be included
hkl
Beams to include
Nmax
max order of reflections/resolution (see :meth:`~Bloch.update_Nmax` )
beam
dictionary passed to :meth:`~Bloch.set_beam`
thick
thickness of crystal (can be modified without resolving)
thicks
range of thickness [z_min,z_max,z_step]
opts
s(save) t(show intensities) z(show beam vs thickness) 0(Vopt0) v(verbose) H(show H)
.. note ::
If hkl is specified, Smax is not taken into account
"""
if Nmax : self.update_Nmax(Nmax)
if beam : self.set_beam(**beam)
if Smax or isinstance(hkl,np.ndarray):
self._set_excitation_errors(Smax,hkl)
self._set_Vg()
self._solve_Bloch(show_H='H' in opts,Vopt0='0' in opts,v='v' in opts)
##### postprocess
if thick or 't' in opts:
self.set_thickness(thick)
if not type(thicks)==type(None) or 'z' in opts:
self._set_beams_vs_thickness(thicks)#;print('thicks solved')
if 's' in opts:
self.save()
################################################################################
#### private
################################################################################
def _solve_Felix(self,felix_cif,npx=20,nbeams=200,thicks=(10,250,10),show_log=False):
felix='%s/bin/felix' %os.path.dirname(__file__)
if not os.path.exists(felix):
print('Running with Felix not available. You need to install felix in %s :')
print(felix)
inp = bloch_util.get_inp(npx,nbeams,self.u,self.keV,thicks)
with open("%s/felix.inp" %self.path,'w') as f:f.write(inp)
print(colors.blue+"preparing simulation"+colors.black)
cmd="""cd %s;
if [ ! -d felix ];then mkdir felix;fi;
cp %s felix/felix.cif;
mv felix.inp felix;
""" %(self.path,os.path.realpath(felix_cif))
# p=Popen(cmd,shell=True)#;p.wait();o,e = p.communicate();if e:print(e)
p=check_output(cmd,shell=True).decode();print(p)
print(colors.blue+"... running felix ..."+colors.black)
cmd="""cd %s;
cd felix;
%s > felix.log 2>&1;
""" %(self.path,felix)#os.path.dirname(__file__))
p=check_output(cmd,shell=True).decode() #;print(p)
if show_log:
print(colors.blue+"felix output"+colors.black)
with open('%s/felix/felix.log' %self.path,'r') as f:print('\n'.join(f.readlines()))
g = np.loadtxt(os.path.join(self.path,'felix/eigenvals.txt'))
C = np.loadtxt(os.path.join(self.path,'felix/eigenvec.txt'))
self.gammaj = g[:,3::2]+1J*g[:,4::2];g=g[:,0]
self.CjG = C[:,3::2]+1J*C[:,4::2]
self.invCjG=np.conj(C.T)
def _solve_Bloch(self,show_H=False,Vopt0=True,v=False):
''' Diagonalize the Hamiltonian'''
# Ug is a (2*Nmax+1)^3 tensor :
# Ug[l] = [U(-N,-N,l) .. U(-N,0,l) .. U(-N,N,l)
# U( 0,-N,l) .. U( 0,0,l) .. U( 0,N,l)
# U(-N,-N,l) .. U( N,0,l) .. U( N,N,l)]
hkl = self.df_G[['h','k','l']].values
Sg = self.df_G.Sw.values
pre = 1/np.sqrt(1-cst.keV2v(self.keV)**2)
Ug = pre*self.Fhkl/(self.crys.volume*np.pi)*self.eps #/3
#####################
# setting average potential to 0
# Ug[U0_idx] = U0
# and Ug(iG,jG) are obtained from Ug[h,k,l] where h,k,l = hlk_iG-hkl_jG
#####################
U0_idx = [2*self.Nmax]*3
Ug[tuple(U0_idx)] = 0
# if Vopt0 :Ug[tuple(U0_idx)] = 0 #setting average potential to 0
if v:print(colors.blue+'...assembling %dx%d matrix...' %((Sg.shape[0],)*2)+colors.black)
H = np.diag(Sg+0J)
for iG,hkl_G in enumerate(hkl) :
U_iG = np.array([Ug[tuple(hkl_J+U0_idx)] for hkl_J in hkl_G-hkl]) #;print(V_iG.shape)
# print('G : ',hkl_G)
# print('U_G:',Ug[tuple(U0_idx+hkl_G)])
# print('idx iG:',[tuple(hkl_J+U0_idx) for hkl_J in hkl_G-hkl])
# print(U_iG)
H[iG,:] += U_iG/(2*self.k0) #off diagonal terms as potential
self.H = H*2*np.pi #to get same as felix
if show_H:self.show_H()
if v:print(colors.blue+'...diagonalization...'+colors.black)
self.gammaj,self.CjG = np.linalg.eigh(self.H) #;print(red+'Ek',lk,black);print(wk)
self.invCjG = np.linalg.inv(self.CjG)
self.solved = True
def _set_structure(self,cif_file):
self.cif_file = cif_file
self.crys = ut.import_crys(cif_file)
self.lat_vec0 = np.array(self.crys.lattice_vectors)
self.lat_vec = np.array(self.crys.reciprocal_vectors)/(2*np.pi)
self.pattern = np.array([np.hstack([a.coords_fractional,a.atomic_number]) for a in self.crys.atoms] )
def _set_excitation_errors(self,Smax=0.02,hkl=None):
""" get excitation errors for Sg<Smax
- Smax : maximum excitation error to be included
- hkl : list of tuple or nbeams x 3 ndarray - beams to be included (for comparison with other programs)
"""
K,K0 = self.K,self.k0
if isinstance(hkl,list) or isinstance(hkl,np.ndarray):
hkl = np.array(hkl)
h,k,l = hkl.T
qx,qy,qz = hkl.dot(self.lat_vec).T
else:
(h,k,l),(qx,qy,qz) = self.lattice
Kx,Ky,Kz = K
Sw = K0-np.sqrt((Kx-qx)**2+(Ky-qy)**2+(Kz-qz)**2)
Swa = (K0**2-((Kx+qx)**2+(Ky+qy)**2+(Kz+qz)**2))/(2*K0)
if Smax:
idx = abs(Sw)<Smax
h,k,l = np.array([h[idx],k[idx],l[idx]],dtype=int)
qx,qy,qz,Sw,Swa = qx[idx],qy[idx],qz[idx],Sw[idx],Swa[idx]
q = np.linalg.norm(np.array([qx,qy,qz]).T,axis=1)
d = dict(zip(['h','k','l','qx','qy','qz','q','Sw','Swa'],[h,k,l,qx,qy,qz,q,Sw,abs(Sw)]))
self.Smax = Smax
self.nbeams = Sw.size
self.df_G = | pd.DataFrame.from_dict(d) | pandas.DataFrame.from_dict |
'''
'''
import spacy
import numpy as np
import pandas as pd
from pprint import pprint
import scipy.spatial.distance
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
import json
import re
import os
def normal(token):
# Should the token be kept? (=is normal)
# Spacy treats 'To' (title case) as *not a stop word*, but
# gensim will not compute tf-idf for 'To'. To remove 'To' as a stop word here, I
# do an extra test to see if the lower case token is a stop word.
return not token.is_stop and not token.is_punct and not nlp.vocab[token.lower_].is_stop
def tokenizer(input_string):
doc = nlp(input_string)
tokens = [token for token in doc if normal(token)]
return tokens
def lemmatizer(tokens):
lemmas = [t.lemma_ for t in tokens]
return lemmas
def vectorizer(tokens):
vectors = [t.vector for t in tokens]
return vectors
nlp = spacy.load('en_core_web_md', entity = False, parser = False)
# Connect to local PostgreSQL
user = 'ubuntu'
password = ''
dbname = 'congress'
host = 'localhost'
local_port = '5432'
es = "postgresql+psycopg2://"+user+":"+password+"@/"+dbname+"?host="+host+"&port="+local_port
engine = sqlalchemy.create_engine(es)
print(engine)
Session = sessionmaker(bind=engine)
session = Session()
print('Session created')
socialTagVectors = pd.read_csv('socialTagVectors.csv')
congressRareTags = session.execute("SELECT bill_id, social_tags FROM congress_tagging;")#pd.read_csv('congress_rare_tags.csv', header = 0)
congressRareTags = congressRareTags.fetchall()
congressbillid = [i[0] for i in congressRareTags]
congresstag = [i[1] for i in congressRareTags]
congressRareTags = pd.DataFrame({'bill_id': congressbillid, 'social_tags': congresstag})
billVectors = pd.read_csv('bill_document_vectors.csv')
print('read 3')
words_column = 'official_title'
tag_column = 'bill_id'
dfm = pd.read_csv('bill_metadata_andrew.csv')
dfm = dfm[['bill_id',words_column,'short_title']]
dfv = pd.read_csv('bill_document_vectors.csv',header=None,index_col=None)
bill_ids = dfm.bill_id.values.tolist()
vectors = dfv.values.tolist()
df = pd.DataFrame({'bill_id':bill_ids,'vector':vectors})
socialTagOrder = pd.read_csv('socialTagOrder.csv', encoding = 'cp1252')
socialtagsorder = socialTagOrder.values.tolist()
socialvectors = socialTagVectors.values.tolist()
socialtagsorder = [i[0] for i in socialtagsorder]
socialTagOrder = | pd.DataFrame({'social_tag': socialtagsorder, 'vector': socialvectors}) | pandas.DataFrame |
import os
import math
import warnings
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import shutil as sh
from glob import glob
from PIL import Image
from copy import copy
from tqdm import tqdm
from pathlib import Path
from datetime import datetime
import libs.dirs as dirs
import libs.commons as commons
import libs.utils as utils
from libs.index import IndexManager
from libs.get_frames_class import GetFramesFull
# User Input
def get_input_network_type(net_type_dict, message="network"):
'''
Select bewteen reference or semiauto network/dataset
'''
dictLen = len(net_type_dict)
print("\nEnter {} type code from list:\n".format(message))
print("Code\tName")
for i in range(dictLen):
print("{}:\t{}".format(i, net_type_dict[i]))
input_code = int(input())
if input_code < dictLen:
target_net = net_type_dict[input_code]
else:
target_net = "UNKNOWN"
while target_net not in net_type_dict.values():
input_code = input("Unknown network. Please select a network from the list.\n")
try:
input_code = int(input_code)
except ValueError:
continue
if input_code < dictLen:
target_net = net_type_dict[input_code]
return target_net
def get_input_target_class(net_class_dict):
'''
Get user input of net target class. Applicable to rede3 only.
'''
classLen = len(net_class_dict)
print("Enter the target class code from list:\n")
print("Code\tClass name")
for i in range(classLen):
print("{}:\t{}".format(i, net_class_dict[i]))
input_class_code = int(input())
if input_class_code < classLen:
event_class = net_class_dict[input_class_code]
else:
event_class = "UNKNOWN"
while event_class not in net_class_dict.values():
input_class_code = input("Unknown class. Please select a class from the list.\n")
try:
input_class_code = int(input_class_code)
except ValueError:
continue
if input_class_code < classLen:
event_class = net_class_dict[input_class_code]
return event_class
# Reports and logging
def get_class_counts(index, class_column, pos_label, neg_label):
'''
Returns index class counts according to input labels.
index: pandas DataFrame
DataFrame with the elements to be counted.
pos_label, neg_label: any object or list
labels to be compared to elements of index[class_column]. If any neg_label
is None, the count of negative elements will be <Total index size> - <positive count>.
'''
if isinstance(pos_label, str) or not(hasattr(pos_label, "__iter__")):
pos_label = [pos_label]
if isinstance(neg_label, str) or not(hasattr(neg_label, "__iter__")):
neg_label = [neg_label]
posCount = 0
for label in pos_label:
posCount += index.groupby(class_column).get_group(label).count()[0]
negCount = 0
for label in neg_label:
if label is None:
negCount = index.shape[0] - posCount
break
# Else, count normally
negCount += index.groupby(class_column).get_group(label).count()[0]
return posCount, negCount
def get_net_class_counts(index_path, net, target_class=None):
'''
Chooses correct class labels to use in a get_class_counts function call
according to input net and target_class.
'''
assert Path(index_path).is_file(), "Index path does not exist."
index = remove_duplicates(pd.read_csv(index_path, low_memory=False), "FrameHash")
if (net == 3) and (target_class not in commons.rede3_classes.values()):
raise ValueError("Net 3 requires a valid target_class.")
if net == 1:
classColumn = "rede1"
posLabel = commons.rede1_positive
negLabel = commons.rede1_negative
mask = None
elif net ==2:
classColumn = "rede2"
posLabel = commons.rede2_positive
negLabel = commons.rede2_negative
mask = (index["rede1"] == commons.rede1_positive)
elif net == 3:
classColumn = "rede3"
posLabel = target_class
negLabel = None
mask = (index["rede2"] == commons.rede2_positive)
if mask is not None:
# Pass only relevant fraction of index to get_class_counts
index = index.loc[mask, :]
# Translate to binary classes
index[classColumn] = translate_labels(index[classColumn], classColumn)
return get_class_counts(index, classColumn, posLabel, negLabel)
def save_seed_log(log_path, seed, id_string):
# Save sample seed
if Path(log_path).is_file():
f = open(log_path, 'a')
else:
f = open(log_path, 'w')
f.write("{}\n{}\n".format(id_string, seed))
f.close()
def get_loop_stats(loop_folder): # TODO: Finish function
statsDf = pd.DataFrame()
return statsDf
def make_report(report_path, sampled_path, manual_path, automatic_path, prev_unlabeled_path,
train_info, rede=1, target_class=None, show=False):
sampledIndex = pd.read_csv(sampled_path)
manualIndex = pd.read_csv(manual_path)
autoIndex = | pd.read_csv(automatic_path) | pandas.read_csv |
#distance to com plots
#first run dataprocessing.m
#then run this file
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import seaborn as sns
# %matplotlib inline
import scipy.io as sio
sns.set(style="white")
from pylab import rcParams
from matplotlib import rc
# rcParams.update({'figure.figsize': (6.5,3)})
storage = sio.loadmat('for_python_FigSI4.mat')
d = storage['storage']
df = pd.DataFrame(data=d,columns=['a','b','c','T_1','T_2','Stability','Branch'])
top = df[df['Branch']==3.0]
top = top.reset_index()
top.head()
middle = df[df['Branch']==2.0]
middle = middle.reset_index()
middle.head()
bottom = df[df['Branch']==1.0]
bottom = bottom.reset_index()
bottom.head()
topbar = top.mean()
middlebar = middle.mean()
bottombar = bottom.mean()
print(topbar,middlebar,bottombar)
top['Distance to COM'] = np.sqrt((top['a']-topbar['a'])**2 + (top['b']-topbar['b'])**2 \
+ (top['c']-topbar['c'])**2 + (top['T_1']-topbar['T_2'])**2 \
+ (top['T_2']-topbar['T_2'])**2)
middle['Distance to COM'] = np.sqrt((middle['a']-middlebar['a'])**2 + (middle['b']-middlebar['b'])**2 \
+ (middle['c']-middlebar['c'])**2 + (middle['T_1']-middlebar['T_2'])**2 \
+ (middle['T_2']-middlebar['T_2'])**2)
bottom['Distance to COM'] = np.sqrt((bottom['a']-bottombar['a'])**2 + (bottom['b']-bottombar['b'])**2 \
+ (bottom['c']-bottombar['c'])**2 + (bottom['T_1']-bottombar['T_2'])**2 \
+ (bottom['T_2']-bottombar['T_2'])**2)
frames = [top,middle,bottom]
dist_to_com = pd.concat(frames)
dist_to_com.loc[dist_to_com['Branch']==3.0, 'Branch'] = 'Rac Dominated'
dist_to_com.loc[dist_to_com['Branch']==2.0, 'Branch'] = 'Coexistence'
dist_to_com.loc[dist_to_com['Branch']==1.0, 'Branch'] = 'Rho Dominated'
dist_to_com.loc[dist_to_com['Stability']==-1, 'Stability'] = 'LPA Unstable'
dist_to_com.loc[dist_to_com['Stability']==1, 'Stability'] = 'LPA Stable'
g = sns.catplot(palette={"LPA Stable": [0.3,0.3,0.3], "LPA Unstable": [1,0,0]},x="Branch",y="Distance to COM",hue="Stability",kind="box",data=dist_to_com, height=3,aspect=1.3,legend=False)
plt.legend(loc='upper right')
plt.savefig('Distance to COM.eps')
n = 50000
inside = []
annulus = []
outside = []
for i in range(0,n):
x = 1.25*(2*np.random.rand(1,5)-1)
r = np.linalg.norm(x)
if r <= 1:
inside.append(x)
elif r <= 1.25:
annulus.append(x)
else:
outside.append(x)
inside = np.array(inside)
inside = inside[:,0,:]
insidedf = pd.DataFrame(data=inside,columns=['x1','x2','x3','x4','x5'])
insidedf['Location'] = 'Sphere'
insidedf.head()
annulus = np.array(annulus)
annulus = annulus[:,0,:]
annulusdf = | pd.DataFrame(data=annulus,columns=['x1','x2','x3','x4','x5']) | pandas.DataFrame |
import pathlib
import shutil
from loguru import logger
import types
import pathlib
import importlib
import pandas as pd
import datamol
def _introspect(module, parent_lib):
data = []
for attr_str in dir(module):
if attr_str.startswith("_"):
continue
# Get the attribute
attr = getattr(module, attr_str)
# Attribute type
attr_type = type(attr)
attr_type_str = attr_type.__name__
# Get the attribute's parent module (the first level module after `datamol`)
if isinstance(attr, types.ModuleType):
parent_module_str = [module.__name__]
is_module = True
else:
parent_module_str = attr.__module__.split(".")
is_module = False
datum = {}
datum["attr"] = attr_str
datum["attr_type"] = attr_type_str
datum["parent_list"] = parent_module_str
datum["parent"] = parent_module_str[-1]
datum["is_module"] = is_module
data.append(datum)
# Build dataframe
data = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import polling2
import requests
import json
from web3 import Web3
import pandas as pd
from decouple import config
from datetime import datetime
import logging
from collections import defaultdict
import time
from sqlalchemy import create_engine, desc
from sqlalchemy.orm import sessionmaker
from models import EdenBlock, Epoch, Base, Distribution, DistributionBalance
from apscheduler.schedulers.background import BackgroundScheduler
INFURA_ENDPOINT = config('INFURA_ENDPOINT')
PSQL_ENDPOINT = config('PSQL_ENDPOINT')
engine = create_engine(PSQL_ENDPOINT)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query_dict = {
'block': 'block.graphql',
'distribution': 'distribution.graphql',
'block_lookup': 'block_lookup.graphql',
'epoch_latest': 'epoch_latest.graphql',
'epoch': 'epoch.graphql'
}
eden_governance_api = 'https://api.thegraph.com/subgraphs/name/eden-network/governance'
eden_distribution_api = 'https://api.thegraph.com/subgraphs/name/eden-network/distribution'
eden_network_api = 'https://api.thegraph.com/subgraphs/name/eden-network/network'
def query_to_dict(rset):
result = defaultdict(list)
for obj in rset:
instance = inspect(obj)
for key, x in instance.attrs.items():
result[key].append(x.value)
return result
def get_web3_provider():
infura_endpoint = INFURA_ENDPOINT
my_provider = Web3.HTTPProvider(infura_endpoint)
w3 = Web3(my_provider)
return w3
def get_latest_eth_block():
eden_db_last_block = get_latest_eden_block_db()
w3 = get_web3_provider()
latest_eth_block = w3.eth.get_block('latest')['number']
if latest_eth_block > eden_db_last_block:
return latest_eth_block
else:
return None
def get_latest_eden_block_db():
eden_db_last_block = session.query(EdenBlock).order_by(desc(EdenBlock.block_number)).limit(1).all()
if eden_db_last_block != []:
eden_db_last_block = eden_db_last_block[0].block_number
else:
eden_db_last_block = 0
return eden_db_last_block
def clean_epoch_entry(epoch_string):
epoch_number = int(epoch_string.split('+')[1].replace('epoch', ''))
return int(epoch_number)
def get_latest_distribution_number():
eden_db_last_number_query = session.query(Distribution).order_by(desc(Distribution.distribution_number)).limit(1).all()
if eden_db_last_number_query != []:
eden_last_number = eden_db_last_number_query[0].distribution_number
return eden_last_number
else:
return 0
def ipfs_link_cleanup(raw_uri):
final_ipfs_link = "https://ipfs.io/ipfs/" + raw_uri.split('//')[1]
return final_ipfs_link
def graph_query_call(api, query, variables=None):
request = requests.post(api, json={'query': query, 'variables': variables})
if request.status_code == 200:
return request.json()
else:
Exception('Query failed. return code is {}. {}'.format(request.status_code, query))
def fetch_query(query):
query_file = query_dict.get(query)
with open(query_file, 'r') as file:
data = file.read()
return data
def get_epoch_number(block_number):
epoch_number_query = session.query(Epoch).filter(block_number >= Epoch.start_block_number, block_number <= Epoch.end_block_number).limit(1).all()
if epoch_number_query != []:
epoch_number = epoch_number_query[0].epoch_number
return epoch_number
else:
latest_epoch = get_latest_epoch()
return latest_epoch
def get_latest_epoch():
query = fetch_query('epoch_latest')
latest_epoch_result = graph_query_call(eden_governance_api, query)
latest_epoch_id = latest_epoch_result['data']['epoches'][0]['id']
latest_epoch_number = clean_epoch_entry(latest_epoch_id)
return latest_epoch_number
def get_block_number_from_id(block_id):
query = fetch_query('block_lookup')
variables = {'block_id': block_id}
block_result = graph_query_call(eden_governance_api, query, variables)
eden_block_number = int(block_result['data']['block']['number'])
return eden_block_number
def eden_block_call():
last_block = 0
last_block_current = get_latest_eth_block()
eden_blocks_df = pd.DataFrame()
while True:
query = fetch_query('block')
variables = {'number_gt': last_block}
block_result = graph_query_call(eden_governance_api, query, variables)
eden_blocks_df_temp = | pd.DataFrame.from_dict(block_result['data']['blocks']) | pandas.DataFrame.from_dict |
# utils for plotting behavior
# this should be renamed to plotting/figures
from scipy.stats import norm, sem
from scipy.optimize import minimize
from scipy import interpolate
from statsmodels.stats.proportion import proportion_confint
from utilsJ.regularimports import groupby_binom_ci
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import pandas as pd
import seaborn as sns
import warnings
import types
import swifter
import tqdm
from concurrent.futures import as_completed, ThreadPoolExecutor
from scipy.interpolate import interp1d
# from utilsJ.Models import alex_bayes as ab
# useless, use dir(plotting) instead
# def help():
# """should print at least available functions"""
# print("available methods:")
# print("distbysubj: grid of distributions")
# print("psych_curve")
# print("correcting_kiani")
# print("com_heatmap")
# print("binned curve: curve of means and err of y-var binning by x-var")
# class cplot(): # cplot stands for custom plot
# def __init__():
def distbysubj(
df,
data, # x?
by, #
grid_kwargs={},
dist_kwargs={},
override_defaults=False
):
"""
returns facet (g) so user can use extra methods
ie: .set_axis_labels('x','y')
. add_legend
data: what to plot (bin on) ~ str(df.col header)
by: sorter (ie defines #subplots) ~ str(df.col header)
returns sns.FacetGrid obj
"""
if override_defaults:
def_grid_kw = grid_kwargs
def_dist_kw = dist_kwargs
else:
def_grid_kw = dict(col_wrap=2, hue="CoM_sugg", aspect=2)
def_grid_kw.update(grid_kwargs)
def_dist_kw = dict(kde=False, norm_hist=True, bins=np.linspace(0, 400, 50))
def_dist_kw.update(dist_kwargs)
g = sns.FacetGrid(df, col=by, **def_grid_kw)
g = g.map(sns.distplot, data, **def_dist_kw)
return g
# def raw_rt(df)
# f, ax = plt.subplots(ncols=2, nrows=3,sharex=True, figsize=(16,9))
# ax = ax.flatten()
# for i, subj in enumerate([f'LE{x}' for x in range(82,88)]):
# vec_toplot = np.concatenate([df.loc[df.subjid==subj, 'sound_len'].values+300, 1000*np.concatenate(df.loc[df.subjid==subj,'fb'].values)])
# sns.distplot(vec_toplot[(vec_toplot<=700)], kde=False, ax=ax[i])
# ax[i].set_title(subj)
# ax[i].axvline(300, c='k', ls=':')
# plt.xlabel('Fixation + RT')
# plt.show()
def sigmoid(fit_params, x_data, y_data):
"""sigmoid functions for psychometric curves
fir params is a tupple: (sensitivity, bias, Right Lapse, Left lapse),
x_data = evidence/coherence whatever
y_data: hit or miss, right responses whatever"""
s, b, RL, LL = fit_params
ypred = RL + (1 - RL - LL) / (
1 + np.exp(-(s * x_data + b))
) # this is harder to interpret
# replacing function so it is meaningful
# ypred = RL + (1-RL-LL)/(1+np.exp(-s(x_data-b)))
return -np.sum(norm.logpdf(y_data, loc=ypred))
def sigmoid_no_lapses(fit_params, x_data, y_data):
"""same without lapses"""
s, b = fit_params
ypred = 1 / (1 + np.exp(-(s * x_data + b))) # this is harder to interpret
# replacing function so it is meaningful
# ypred = RL + (1-RL-LL)/(1+np.exp(-s(x_data-b)))
return -np.sum(norm.logpdf(y_data, loc=ypred))
def raw_psych(y, x, lapses=True):
"""returns params"""
if lapses:
loglike = minimize(sigmoid, [1, 1, 0, 0], (x, y))
else:
loglike = minimize(sigmoid_no_lapses, [0, 0], (x, y))
params = loglike["x"]
return params
def psych_curve(
target,
coherence,
ret_ax=None, # if none returns data
annot=False,
xspace=np.linspace(-1, 1, 50),
kwargs_plot={},
kwargs_error={},
):
"""
function to plot psych curves
target: binomial target (y), aka R_response, or Repeat
coherence: coherence (x)
ret_ax:
None: returns tupple of vectors: points (2-D, x&y) and confidence intervals (2D, lower and upper), fitted line[2D, x, and y]
True: returns matplotlib ax object
xspace: x points to retrieve y_pred from curve fitting
kwargs: aesthethics for each of the plotting
"""
# convert vect to arr if they are pd.Series
for item in [target, coherence]:
if isinstance(item, pd.Series):
item = item.values
if np.isnan(item).sum():
raise ValueError("Nans detected in provided vector")
if np.unique(target).size > 2:
raise ValueError("target has >2 unique values (invalids?!)")
""" not required
if not (coherence<0).sum(): # 0 in the sum means there are no values under 0
coherence = coherence * 2 - 1 # (from 01 to -11)
r_resp = np.full(hit.size, np.nan)
r_resp[np.where(np.logical_or(hit==0, hit==1)==True)[0]] = 0
r_resp[np.where(np.logical_and(rewside==1, hit==1)==True)[0]] = 1 # right and correct
r_resp[np.where(np.logical_and(rewside==0, hit==0)==True)[0]] = 1 # left and incorrect
"""
kwargs_plot_default = {"color": "tab:blue"}
kwargs_error_default = dict(
ls="none", marker="o", markersize=3, capsize=4, color="maroon"
)
kwargs_plot_default.update(kwargs_plot)
kwargs_error_default.update(kwargs_error)
tmp = | pd.DataFrame({"target": target, "coh": coherence}) | pandas.DataFrame |
import logging
import pickle
import os
import time
import threading
import urllib
from base64 import b64encode
from collections import defaultdict
from io import BytesIO
import matplotlib as mpl
mpl.use('Agg') # noqa
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import pyinotify
from jinja2 import Environment, PackageLoader
import numpy as np
import pytz
from numpy import rot90
from scipy.misc import imread, imsave
from datetime import datetime
from scipy.interpolate import interp1d
from bb_binary import parse_image_fname_beesbook, get_timezone
from pipeline import Pipeline
from pipeline.pipeline import get_auto_config
from pipeline.objects import Image, FinalResultOverlay, IDs, LocalizerInputImage, \
CrownOverlay, SaliencyOverlay
from pipeline.stages import ResultCrownVisualizer
sns.set_style('whitegrid')
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
logging.debug('{0} finished in {1:.2f} seconds'.format(method.__name__, te - ts))
return result
return timed
def get_b64_uri(bytes, format):
src = 'data:image/{0};base64,{1}'
return src.format(format, b64encode(bytes.getvalue()).decode('utf-8'))
def get_image_bytes(image, format='jpeg'):
b = BytesIO()
imsave(b, image, format)
return b
def get_fig_bytes(format='png', **kwargs):
b = BytesIO()
plt.savefig(b, bbox_inches='tight', format=format, **kwargs)
return b
def localize(dt):
return pytz.timezone('UTC').localize(dt).astimezone(get_timezone())
def get_localtime():
return localize(datetime.now())
class ImgTemplate:
def __init__(self, id=None, src=None, alt=None, header=None, reload_interval=30000):
self.id = id
self.src = src
self.alt = alt
self.header = header
self.reload_interval = reload_interval
class RowTemplate:
def __init__(self, num_cols):
self.images = [ImgTemplate() for _ in range(num_cols)]
class TabTemplate:
def __init__(self, name, num_rows, num_cols, active_on_load=False):
self.name = name
self.rows = [RowTemplate(num_cols) for _ in range(num_rows)]
self.set_active_on_load(active_on_load)
def set_active_on_load(self, active_flag=False):
self.div_class_extras = 'in active' if active_flag else ''
self.li_class_extras = 'active' if active_flag else ''
class SectionTemplate:
def __init__(self, name, header=None, tabs=None,
grid_class='col-xs-12 col-sm-12 col-md-6 col-lg-6'):
self.name = name
self.header = header
self.grid_class = grid_class
if tabs is not None:
self.tabs = tabs
else:
self.tabs = []
class SiteBuilder:
def __init__(self, output_path, template='index.html', min_interval=30):
env = Environment(loader=PackageLoader('bb_live', 'templates'), trim_blocks=True)
self.template = env.get_template(template)
self.min_interval = min_interval
self.output_path = output_path
self.output_fname = os.path.join(output_path, template)
self.uris = defaultdict(str)
image_tabs = [TabTemplate(name, 2, 2) for name in ('detections', 'decodings',
'inputs', 'saliencies')]
image_tabs[0].set_active_on_load(True)
image_section = SectionTemplate('images', header=None, tabs=image_tabs)
hive_tabs = [TabTemplate('bees', 1, 1, active_on_load=True),
TabTemplate('population', 1, 1),
TabTemplate('age', 1, 1)]
hive_section = SectionTemplate('hive', header='Hive statistics over time',
tabs=hive_tabs,
grid_class='col-xs-12 col-sm-12 col-md-12 col-lg-12')
metrics_tabs = [TabTemplate(name, 1, 1) for name in ('smd', 'variance',
'noise', 'contrast',
'cratio', 'cratioMinMax')]
metrics_tabs[0].set_active_on_load(True)
metrics_section = SectionTemplate('metrics', header='Image statistics over time',
tabs=metrics_tabs,
grid_class='col-xs-12 col-sm-12 col-md-12 col-lg-12')
self.sections = [image_section, hive_section, metrics_section]
def get_image_template(self, section_name, tab_name, idx):
section = [s for s in self.sections if s.name == section_name]
assert(len(section) == 1)
section = section[0]
tab = [t for t in section.tabs if t.name == tab_name]
assert(len(tab) == 1)
tab = tab[0]
row_idx, col_idx = divmod(int(idx), len(tab.rows))
return tab.rows[row_idx].images[col_idx]
def update_uri(self, key, value):
self.uris[key] = value
def save_image(self, name, im, format):
fname = name + '.' + format
imsave(os.path.join(self.output_path, fname), im)
def save_figure(self, name, format):
fname = name + '.' + format
plt.savefig(os.path.join(self.output_path, fname), dpi=300,
bbox_inches='tight', format=format)
def save_tab_image(self, name, tab, section, idx, format, header=None):
id = '{}_{}'.format(tab, name)
fname = '{}.{}'.format(id, format)
self.update_uri(name, '{}?{}'.format(fname, get_localtime().timestamp()))
image_template = self.get_image_template(section, tab, idx)
image_template.id = id
image_template.src = fname
image_template.alt = '{} live preview'.format(id)
image_template.header = header
def build(self):
html = self.template.render(last_updated=get_localtime(),
sections=self.sections,
**self.uris)
open(self.output_fname, 'w').write(html)
class ImageHandler:
def __init__(self, site_builder,
camera_rotations={0: 1, 1: -1, 2:1, 3:-1},
detections_path='detections.pkl'):
self.builder = site_builder
self.rotations = camera_rotations
self.pipeline = Pipeline([Image], [LocalizerInputImage, FinalResultOverlay, CrownOverlay,
IDs, SaliencyOverlay], **get_auto_config())
self.crown = ResultCrownVisualizer()
self.detections_path = detections_path
if os.path.isfile(self.detections_path):
self.detections = pickle.load(open(self.detections_path, 'rb'))
else:
self.detections = []
@timeit
def run_pipeline(self, image):
results = self.pipeline([image])
return results
@timeit
def plot_detections(self, num_plot_samples=1000, std_window_size=25):
detections = pd.DataFrame(self.detections, columns=('datetime', 'camIdx',
'id', 'confidence'))
minTs = detections.datetime.min().timestamp()
maxTs = detections.datetime.max().timestamp()
fig, ax = plt.subplots(1, figsize=(16, 4), facecolor='white')
sumValues = None
for camIdx in (0, 1, 2, 3):
x = np.array([ts.timestamp() for ts in
detections[detections.camIdx == str(camIdx)]
.groupby('datetime').id.agg(len).keys()])
y = detections[detections.camIdx == str(camIdx)].groupby('datetime').id.agg(len).values
if len(x) < 2:
continue
f = interp1d(x, y, fill_value='extrapolate')
dts = np.arange(minTs, maxTs, step=((maxTs - minTs) / num_plot_samples))
dts = dts.astype(np.float64)
values = f(dts)
df = pd.DataFrame(
[(localize(datetime.fromtimestamp(dt)), val) for dt, val in zip(dts, values)],
columns=('datetime', 'cam{}'.format(camIdx)))
df.plot(ax=ax)
if sumValues is None:
sumValues = values
else:
sumValues += values
if sumValues is not None:
df = pd.DataFrame(
[(datetime.fromtimestamp(dt), val) for dt, val in zip(dts, sumValues)],
columns=('datetime', 'combined'))
df.plot(ax=ax, yerr=df.combined.rolling(window=std_window_size, min_periods=0).std(),
ecolor=tuple(list(sns.color_palette()[4]) + [0.3]))
ax.legend(loc='upper left')
ax.set_xlabel('time')
ax.locator_params(nbins=12)
ax.set_xticklabels([dt.strftime('%a %H:%M:%S') for dt in df.datetime[ax.get_xticks()[:-1]]])
ax.set_title('Number of visible tagged bees in colony')
locs, labels = plt.xticks()
plt.setp(labels, rotation=45)
self.builder.update_uri('detections', get_b64_uri(get_fig_bytes(), format='png'))
self.builder.save_figure('bees_detections', format='png')
self.builder.save_tab_image('detections', 'bees', 'hive', idx=0, format='png')
plt.close('all')
@timeit
def process_image(self, path, fname):
im = imread(path)
camIdx = fname.split('.')[0][-1]
im = rot90(im, self.rotations[int(camIdx)]).astype(np.uint8).copy()
orig_im = np.copy(im)
results = self.run_pipeline(im)
dt = get_localtime()
for id in results[IDs]:
confidence = np.min(np.abs(0.5 - id)) * 2
self.detections.append((dt, camIdx, int(''.join([str(c) for c in
np.round(id).astype(np.int)])),
confidence))
pickle.dump(self.detections, open(self.detections_path, 'wb'))
self.plot_detections()
img_with_overlay = self.crown.add_overlay(im.astype(np.float64) / 255,
results[CrownOverlay])
saliency_overlay = results[SaliencyOverlay]
self.builder.save_image('detections_cam{}'.format(camIdx),
results[FinalResultOverlay],
format='jpeg')
self.builder.save_image('decodings_cam{}'.format(camIdx),
img_with_overlay,
format='jpeg')
self.builder.save_image('saliencies_cam{}'.format(camIdx),
saliency_overlay,
format='jpeg')
self.builder.save_image('inputs_cam{}'.format(camIdx),
orig_im,
format='jpeg')
self.builder.save_tab_image('cam{}'.format(camIdx), 'detections', 'images',
camIdx, format='jpeg', header='Cam{}'.format(camIdx))
self.builder.save_tab_image('cam{}'.format(camIdx), 'decodings', 'images',
camIdx, format='jpeg', header='Cam{}'.format(camIdx))
self.builder.save_tab_image('cam{}'.format(camIdx), 'saliencies', 'images',
camIdx, format='jpeg', header='Cam{}'.format(camIdx))
self.builder.save_tab_image('cam{}'.format(camIdx), 'inputs', 'images',
camIdx, format='jpeg', header='Cam{}'.format(camIdx))
class AnalysisHandler:
def __init__(self, source_dir, site_builder,
analysis_metrics=('filename', 'smd', 'variance', 'noise',
'contrast', 'cratio', 'cratioMinMax')):
self.builder = site_builder
self.analysis_metrics = analysis_metrics
self.analysis_paths = [os.path.join(source_dir, f) for f in
os.listdir(source_dir) if f.startswith('analysis')]
@timeit
def parse_analysis(self):
dfs = []
for fn in self.analysis_paths:
analysis = pd.read_csv(fn, sep='\t', names=self.analysis_metrics)
analysis['camIdx'] = [parse_image_fname_beesbook(s)[0] for s in analysis['filename']]
analysis['datetime'] = [localize(pd.datetime.fromtimestamp(
parse_image_fname_beesbook(s)[1])) for s in analysis['filename']]
dfs.append(analysis)
analysis = pd.concat(dfs)
analysis.sort_values('datetime', inplace=True)
return analysis
@timeit
def plot_analysis(self, analysis):
for column in self.analysis_metrics[1:]:
fig, ax = plt.subplots(1, figsize=(16, 4), facecolor='white')
for camIdx in (0, 1, 2, 3):
analysis[analysis.camIdx == camIdx].plot('datetime', column,
label='cam{}'.format(camIdx),
title=column, ax=ax)
ax.legend(loc='upper left')
ax.set_xlabel('time')
self.builder.update_uri(column, get_b64_uri(get_fig_bytes(), format='png'))
self.builder.save_figure('{}_metrics'.format(column), format='png')
self.builder.save_tab_image('metrics', column, 'metrics', idx=0, format='png')
plt.close('all')
def update(self):
self.plot_analysis(self.parse_analysis())
class PeriodicHiveAnalysis:
def __init__(self, builder, interval=3600,
detections_path='detections.pkl'):
self.builder = builder
self.interval = interval
self.detections_path = detections_path
def get_detected_ids(self, detections, confidence_treshhold=0.99):
# only use detections with very high confidence
detected_ids = [list([int(c) for c in str(id).rjust(12, '0')]) for id in
detections[detections.confidence > confidence_treshhold].id]
# convert ids from pipeline order to 'ferwar' order
adjusted_ids = np.roll(detected_ids, 3, axis=1)
# convert to decimal id using 11 least significant bits
decimal_ids = [int(''.join([str(c) for c in id[:11]]), 2) for id in adjusted_ids]
# determine what kind of parity bit was used and add 2^11 to decimal id
# uneven parity bit was used
decimal_ids = np.array(decimal_ids)
decimal_ids[(np.sum(adjusted_ids, axis=1) % 2) == 1] += 2048
return decimal_ids
def get_unique_ids(self, decimal_ids, min_detections=None, max_id=3000):
unique, counts = np.unique(decimal_ids, return_counts=True)
if min_detections is None:
min_detections = np.ceil(np.mean([count for id, count in
dict(zip(unique, counts)).items() if id > max_id]))
# determine approximate number of unique ids seen in last 24 hours
filtered = [(u, c) for u, c in zip(unique, counts) if u < max_id and c > min_detections]
unique, counts = zip(*filtered)
return unique, counts
@timeit
def plot_analysis(self, unique_detections_hourly, time_delta):
fig, ax = plt.subplots(1, 1, figsize=(16, 4), facecolor='white')
median_detections = unique_detections_hourly.rolling(
center=False, min_periods=2, window=10).median()
std_detections = unique_detections_hourly.rolling(
center=False, window=10, min_periods=2).std()
medians_flat = median_detections.Uniques.as_matrix().flatten()
stds_flat = std_detections.as_matrix().flatten()
ax.fill_between(
median_detections.index,
medians_flat - 2 * stds_flat,
medians_flat + 2 * stds_flat,
alpha=0.2)
median_detections.plot(ax=ax, legend=False,
title='Number of bees in colony'.format(
int(time_delta.total_seconds() // 3600)))
nticks = 12
xticks = pd.date_range(start=unique_detections_hourly.index[0],
end=unique_detections_hourly.index[-1],
freq=(unique_detections_hourly.index[-1] -
unique_detections_hourly.index[0]) / nticks)
ax.set_xticks(xticks)
ax.set_xticklabels([dt.strftime('%a %H:%M:%S') for dt in xticks])
ax.set_ylim((ax.get_ylim()[0] - 10, ax.get_ylim()[-1] + 10))
ax.set_xlim((ax.get_xticks()[1], ax.get_xlim()[-1]))
self.builder.update_uri('count', get_b64_uri(get_fig_bytes(), format='png'))
self.builder.save_figure('population_count', format='png')
self.builder.save_tab_image('count', 'population', 'hive', idx=0, format='png')
plt.close('all')
@timeit
def plot_age_distribution(self, ages):
fig, ax = plt.subplots(1, 1, figsize=(16, 4), facecolor='white')
sns.distplot(ages, bins=8, hist=True,
kde_kws={'bw': 0.4, 'shade': True, 'color': 'b', 'alpha': 0.3},
hist_kws={'alpha': 0.3, 'color': 'gray'},
ax=ax, label='age distribution')
ax.axvline(np.median(ages), color='gray', linestyle='dashed', linewidth=2,
label='median ({} days)'.format(int(np.median(ages))))
ax.set_xlabel('Age in days')
ax.set_ylabel('Proportion of detected bees')
ax.set_title('Age distribution in hive')
ax.set_xlim((np.min(ages), np.max(ages)))
ax.legend()
self.builder.update_uri('distribution', get_b64_uri(get_fig_bytes(), format='png'))
self.builder.save_figure('age_distribution', format='png')
self.builder.save_tab_image('distribution', 'age', 'hive', idx=0, format='png')
plt.close('all')
@timeit
def analyse_age_distribution(self, unique, counts):
urllib.request.urlretrieve(
'https://www.dropbox.com/s/ze3chu5mvetjwv2/TagsControl2016.xlsx?dl=1',
'TagsControl2016.xlsx')
age_data = pd.read_excel('TagsControl2016.xlsx')
age_data.drop('Unnamed: 0', axis=1, inplace=True)
age_data.Date = | pd.to_datetime(age_data.Date) | pandas.to_datetime |
import datetime as dt
import glob
import os
import shutil
import unittest
import numpy as np
import pandas as pd
import devicely
class EverionTestCase(unittest.TestCase):
READ_PATH = 'tests/Everion_test_data'
BROKEN_READ_PATH = 'tests/Everion_test_data_broken' #for testing with missing files
WRITE_PATH = 'tests/Everion_test_data_write'
def setUp(self):
self.reader = devicely.EverionReader(self.READ_PATH)
def test_basic_read(self):
self._test_read_individual_dataframes(self.reader)
expected_signal_tags = ['heart_rate', 'respiration_rate', 'heart_rate_variability',
'oxygen_saturation', 'gsr_electrode', 'temperature_object',
'barometer_pressure', 'temperature_local', 'ctemp',
'temperature_barometer']
expected_signal_quality_tags = ['heart_rate_quality', 'respiration_rate_quality',
'heart_rate_variability_quality', 'oxygen_saturation_quality',
'ctemp_quality']
expected_sensor_tags = ['accz_data', 'led2_data', 'led1_data', 'led4_data',
'accy_data', 'accx_data', 'led3_data', 'acc_mag']
expected_feature_tags = ['inter_pulse_interval', 'inter_pulse_interval_deviation']
expected_columns = set(expected_signal_tags + expected_signal_quality_tags +
expected_sensor_tags + expected_feature_tags)
self.assertEqual(set(self.reader.data.columns), expected_columns)
def test_read_with_non_default_tags(self):
signal_tags = [12, 15, 19, 119, 134]
sensor_tags = [80, 83, 84, 85, 92]
feature_tags = [17]
reader = devicely.EverionReader(self.READ_PATH,
signal_tags=signal_tags,
sensor_tags=sensor_tags,
feature_tags=feature_tags)
# The individual should dataframes contain all tags, regardless of the initialization parameters.
self._test_read_individual_dataframes(reader)
expected_singal_columns = ['respiration_rate', 'temperature_local',
'ctemp', 'temperature_barometer']
expected_signal_quality_columns = ['respiration_rate_quality', 'ctemp_quality']
# no acc_mag because 86 (accz_data) is missing
expected_sensor_columns = ['led1_data', 'led4_data', 'accy_data', 'accx_data']
#17 is a valid feature column, but it is not present in the testing csv
expected_feature_columns = []
expected_columns = set(expected_singal_columns + expected_signal_quality_columns +
expected_sensor_columns + expected_feature_columns)
self.assertEqual(set(reader.data.columns), expected_columns)
def test_read_with_invalid_tags(self):
signal_tags = [12, 15, 19, 119, 134, 80] #80 is not a signal tag
sensor_tags = [80, 83, 84, 85, 92, 70] #70 is not a sensor tag
feature_tags = [17, 86] #86 is not a sensor tag
call = lambda: devicely.EverionReader(self.READ_PATH,
signal_tags=signal_tags,
sensor_tags=sensor_tags,
feature_tags=feature_tags)
self.assertRaises(KeyError, call)
def test_read_with_missing_files(self):
print(os.listdir())
shutil.copytree(self.READ_PATH, self.BROKEN_READ_PATH)
signals_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*signals*")).pop()
attributes_dailys_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*attributes_dailys*")).pop()
os.remove(signals_path)
os.remove(attributes_dailys_path)
reader = devicely.EverionReader(self.BROKEN_READ_PATH)
self.assertIsNone(reader.signals)
self.assertIsNone(reader.attributes_dailys)
expected_sensor_tags = ['accz_data', 'led2_data', 'led1_data', 'led4_data',
'accy_data', 'accx_data', 'led3_data', 'acc_mag']
expected_feature_tags = ['inter_pulse_interval', 'inter_pulse_interval_deviation']
expected_columns = set(expected_sensor_tags + expected_feature_tags)
self.assertEqual(set(reader.data.columns), expected_columns)
shutil.rmtree(self.BROKEN_READ_PATH)
def test_read_with_all_join_files_missing(self):
#The signals-, sensors-, and features files are the three join files.
shutil.copytree(self.READ_PATH, self.BROKEN_READ_PATH)
signals_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*signals*")).pop()
sensors_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*sensor_data*")).pop()
features_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*features*")).pop()
os.remove(signals_path)
os.remove(sensors_path)
os.remove(features_path)
reader = devicely.EverionReader(self.BROKEN_READ_PATH)
self.assertIsNone(reader.signals)
self.assertIsNone(reader.sensors)
self.assertIsNone(reader.features)
pd.testing.assert_frame_equal(reader.data, pd.DataFrame())
shutil.rmtree(self.BROKEN_READ_PATH)
def test_timeshift_to_timestamp(self):
expected_aggregates_head = pd.DataFrame({
'count': 5 * [4468],
'streamType': 5 * [5],
'tag': [40, 18, 21, 7, 100],
'time': pd.to_datetime(5 * [1525200281], unit='s'),
'values': [-2.0, 0.76, 21.0, 60.0, 0.0],
'quality': [np.nan, 13.0, np.nan, 0.0, np.nan]
})
expected_analytics_events_head = pd.DataFrame({
"count": [5622, 5621, 5620, 5619, 5618],
"streamType": 5 * [7],
"tag": 5 * [1],
"time": pd.to_datetime([1525204397, 1525204397, 1525204148, 1525204131, 1525203790], unit='s'),
"values": [22.0, 2.0, 22.0, 22.0, 2.0]
})
expected_attributes_dailys_head = pd.DataFrame({
"count": [14577, 14576, 14575, 14574, 14573],
"streamType": 5 * [8],
"tag": 5 * [67],
"time": pd.to_datetime(5 * [1525207721], unit='s'),
"values": [2.0, 4.0, 3.0, 11.0, 12.0],
"quality": [15.0, 9.0, 8.0, 6.0, 5.0]
})
expected_everion_events_head = pd.DataFrame({
"count": 5 * [46912],
"streamType": 5 * [6],
"tag": [128, 131, 129, 132, 126],
"time": pd.to_datetime(5 * [1525192729], unit='s'),
"values": [65295.0, 900.0, 44310.0, 4096.0, 0.0]
})
expected_features_head = pd.DataFrame({
"count": [787000, 787001, 787002, 787003, 787004],
"streamType": 5 * [4],
"tag": 5 * [14],
"time": pd.to_datetime([1525192675, 1525192675, 1525192676, 1525192677, 1525192678], unit='s'),
"values": [950.0, 1085.0, 1074.0, 1021.0, 1056.0],
"quality": [12.0, 11.0, 12.0, 10.0, 11.0]
})
expected_sensors_head = pd.DataFrame({
"count": 5 * [22917264],
"streamType": 5 * [16],
"tag": [86, 81, 80, 83, 85],
"time": pd.to_datetime(5 * [1525192361], unit='s'),
"values": [2176.0, 51612.0, 668.0, 26377.0, 1232.0]
})
expected_signals_head = pd.DataFrame({
'count': 5 * [806132],
'streamType': 5 * [2],
'tag': [71, 13, 6, 66, 12],
'time': pd.to_datetime(5 * [1525192381], unit='s'),
'values': [0.0, 21.86422, 65.0, 1.5686275, 18.0],
'quality': [np.nan, 100.0, 85.0, np.nan, 93.0]
})
timestamp = pd.Timestamp('1 May 2018 16:32:41')
self.reader.timeshift(timestamp)
pd.testing.assert_frame_equal(self.reader.aggregates.head(), expected_aggregates_head)
pd.testing.assert_frame_equal(self.reader.analytics_events.head(), expected_analytics_events_head)
pd.testing.assert_frame_equal(self.reader.attributes_dailys.head(), expected_attributes_dailys_head)
pd.testing.assert_frame_equal(self.reader.everion_events.head(), expected_everion_events_head)
pd.testing.assert_frame_equal(self.reader.features.head(), expected_features_head)
pd.testing.assert_frame_equal(self.reader.signals.head(), expected_signals_head)
pd.testing.assert_frame_equal(self.reader.sensors.head(), expected_sensors_head)
new_joined_data_time_col = self.reader.data.index
self.assertTrue((new_joined_data_time_col.date == dt.date(2018, 5, 1)).all())
def test_timeshift_by_timedelta(self):
expected_aggregates_head = pd.DataFrame({
'count': 5 * [4468],
'streamType': 5 * [5],
'tag': [40, 18, 21, 7, 100],
'time': pd.to_datetime(5 * [1532216847], unit='s'),
'values': [-2.0, 0.76, 21.0, 60.0, 0.0],
'quality': [np.nan, 13.0, np.nan, 0.0, np.nan]
})
expected_analytics_events_head = pd.DataFrame({
"count": [5622, 5621, 5620, 5619, 5618],
"streamType": 5 * [7],
"tag": 5 * [1],
"time": pd.to_datetime([1532220716, 1532220716, 1532220467, 1532220450, 1532220109], unit='s'),
"values": [22.0, 2.0, 22.0, 22.0, 2.0]
})
expected_attributes_dailys_head = pd.DataFrame({
"count": [14577, 14576, 14575, 14574, 14573],
"streamType": 5 * [8],
"tag": 5 * [67],
"time": pd.to_datetime(5 * [1532220709], unit='s'),
"values": [2.0, 4.0, 3.0, 11.0, 12.0],
"quality": [15.0, 9.0, 8.0, 6.0, 5.0]
})
expected_everion_events_head = pd.DataFrame({
"count": 5 * [46912],
"streamType": 5 * [6],
"tag": [128, 131, 129, 132, 126],
"time": pd.to_datetime(5 * [1532220837], unit='s'),
"values": [65295.0, 900.0, 44310.0, 4096.0, 0.0]
})
expected_features_head = pd.DataFrame({
"count": [787000, 787001, 787002, 787003, 787004],
"streamType": 5 * [4],
"tag": 5 * [14],
"time": pd.to_datetime([1532216891, 1532216891, 1532216892, 1532216893, 1532216894], unit='s'),
"values": [950.0, 1085.0, 1074.0, 1021.0, 1056.0],
"quality": [12.0, 11.0, 12.0, 10.0, 11.0]
})
expected_sensors_head = pd.DataFrame({
"count": 5 * [22917264],
"streamType": 5 * [16],
"tag": [86, 81, 80, 83, 85],
"time": pd.to_datetime(5 * [1532220108], unit='s'),
"values": [2176.0, 51612.0, 668.0, 26377.0, 1232.0]
})
expected_signals_head = pd.DataFrame({
'count': 5 * [806132],
'streamType': 5 * [2],
'tag': [71, 13, 6, 66, 12],
'time': pd.to_datetime(5 * [1532216905], unit='s'),
'values': [0.0, 21.86422, 65.0, 1.5686275, 18.0],
'quality': [np.nan, 100.0, 85.0, np.nan, 93.0]
})
timedelta = - pd.Timedelta('222 days, 15 hours, 51 minutes, 33 seconds')
old_joined_data_time_col = self.reader.data.index.copy()
self.reader.timeshift(timedelta)
pd.testing.assert_frame_equal(self.reader.aggregates.head(), expected_aggregates_head)
pd.testing.assert_frame_equal(self.reader.analytics_events.head(), expected_analytics_events_head)
pd.testing.assert_frame_equal(self.reader.attributes_dailys.head(), expected_attributes_dailys_head)
pd.testing.assert_frame_equal(self.reader.everion_events.head(), expected_everion_events_head)
pd.testing.assert_frame_equal(self.reader.features.head(), expected_features_head)
pd.testing.assert_frame_equal(self.reader.signals.head(), expected_signals_head)
pd.testing.assert_frame_equal(self.reader.sensors.head(), expected_sensors_head)
new_joined_data_time_col = self.reader.data.index.copy()
| pd.testing.assert_index_equal(old_joined_data_time_col + timedelta, new_joined_data_time_col) | pandas.testing.assert_index_equal |
"""Live and historical flood monitoring data from the Environment Agency API"""
import requests
import pandas as pd
import flood_tool.geo as geo
import flood_tool.tool as tool
import numpy as np
import folium
__all__ = []
LIVE_URL = "http://environment.data.gov.uk/flood-monitoring/id/stations"
ARCHIVE_URL = "http://environment.data.gov.uk/flood-monitoring/archive/"
class get_map(object):
'''
class to retieve rainfall data from website to design alert system and find historical data for a specific date
'''
def __init__(self, date='2019-10-08'):
self.station_url = 'http://environment.data.gov.uk/flood-monitoring/id/stations.csv?parameter=rainfall'
self.value_url = 'http://environment.data.gov.uk/flood-monitoring/id/measures.csv?parameter=rainfall'
self.spe_date_url = 'http://environment.data.gov.uk/flood-monitoring/data/readings.csv?parameter=rainfall¶meter=rainfall&_view=full&date=' + str(date)
# for requirement 3
# read the latest values from website
# read the corresponding values of stations
# and it is updated once the new file is uploaded
self.date = date
DF_value = pd.read_csv(self.value_url)
self.value = DF_value.loc[:, ['stationReference', 'value', 'dateTime']]
# read the correspongding latitude and longitude of stations
DF_location = pd.read_csv(self.station_url)
self.location = DF_location.loc[:, ['stationReference', 'lat', 'long']]
# merge the two DataFrame to be used later.
# the purpose is to combine the location with value
self.DF3 = self.value.merge(self.location, left_on='stationReference', right_on='stationReference')
DF_readings = pd.read_csv(self.spe_date_url)
readings = DF_readings.loc[:, ['stationReference', 'value', 'dateTime']]
DF4 = self.location.merge(readings, left_on='stationReference', right_on='stationReference')
# turns all elements in value column into floats
DF4['value'] = pd.to_numeric(DF4['value'], errors='coerce')
# converts np.nan into 0
DF4['value'] = DF4['value'].fillna(0)
self.DF4 =DF4
def get_all_station(self, latitude, longitude, distance):
'''
get corresponding rainfall value aroung each Postcode from latitude and longitude of the Postcode
Parameters
----------
latitude: sequence of float
Ordered collection of latitude
longitude: sequence of float
Ordered collection of longitude
Returns
-------
numpy.ndarray
array of floats for rainfall value
'''
DF3 = self.DF3
DF3['lat_max'] = DF3['lat'] + distance
DF3['lat_min'] = DF3['lat'] - distance
DF3['long_max'] = DF3['long'] + distance
DF3['long_min'] = DF3['long'] - distance
rainfall = []
for point in range(len(latitude)):
stations_tem = DF3.loc[(DF3.lat_max >= latitude[point]) & (DF3.lat_min <= latitude[point])]
stations = stations_tem.loc[
(stations_tem.long_max >= longitude[point]) & (stations_tem.long_min <= longitude[point])]
rainfall_value = np.array(stations.value)
rainfall_value = np.nan_to_num(rainfall_value)
# if there is no station in the range of postcode, process the rainfall as 0
if len(rainfall_value) == 0:
rainfall_value = np.array([0])
# mean = rainfall_value.sum() / len(rainfall_value)
output = rainfall_value.max()
rainfall.append(output)
return rainfall
def get_Risk_Level(self, postcode, distance):
"""
Get an array of corresponding latitude, longitude and risk level from a sequence of postcodes
and distance range around every postcode
Parameters
----------
postcodes: sequence of strs
Ordered collection of postcodes
distance: a float number
Returns
-------
numpy.ndarray
array of floats for the latitude, longitude and risk level
"""
# create class Tool to use functions in tool.py
# for apply function in tool.py later
files = tool.Tool(postcode_file='flood_tool/resources/postcodes.csv', risk_file='flood_tool/resources/flood_probability.csv',
values_file='flood_tool/resources/property_value.csv')
#set the range of postcode to define stations and get latitude, longitude
lat_long = files.get_lat_long(postcode)
latitude = lat_long[:, 0]
longitude = lat_long[:, 1]
#get rainfall value for input Postcode
rainfall = np.array(self.get_all_station(latitude, longitude, distance))
#change numpy.array to 0 which is convenient to process later
rainfall = np.nan_to_num(rainfall)
EOS_NOS = geo.get_easting_northing_from_lat_long(latitude, longitude)
prob_band = files.get_easting_northing_flood_probability(EOS_NOS[0], EOS_NOS[1])
dataset = pd.DataFrame({'Postcode': postcode, 'Latitude': latitude, 'Longitude': longitude, 'Value': rainfall, 'Probability Band': prob_band})
#replace the probability band to threshold value
threshold_DF= dataset.replace({'High': 1.00, 'Medium': 1.25, 'Low': 1.5, 'Very Low': 1.75, 'Zero': 2.00, 'No Risk': 2.00})
first_level = 0.2
second_level = 0.4
third_level = 0.6
def to_mark(diff):
'''
get the corresponding risk level as str from the difference between threshold and rainfall value
Parameters
----------
diff: a float number recording difference between threshold and rainfall value
Returns
-------
str: risk level
'''
if diff < 0:
result = 'No Risk'
else:
if diff < first_level:
result = 'Low Risk'
elif (diff >= first_level) & (diff < second_level):
result = 'Medium Risk'
elif (diff >= second_level) & (diff < third_level):
result = 'High Risk'
else:
result = 'Dangerous'
return result
#comupute the difference between threshold and rainfall value
diff_DF = threshold_DF.assign(diff=threshold_DF['Value'] - threshold_DF['Probability Band'])
#change the difference to the corresponding risk level
Risk_Ser = diff_DF['diff'].apply(to_mark)
output = np.array(Risk_Ser)
return latitude, longitude, output
# work for Question 4
def get_spe_date(self):
"""
Get an array of corresponding latitude, longitude and risk level from a specific date
Parameters
----------
data: str as specific format
farmat: 'yyyy-mm-dd' such as '2019-10-08'
Returns
-------
numpy.ndarray
array of floats for the latitude, longitude and risk level
"""
files = tool.Tool(postcode_file='resources/postcodes.csv', risk_file='resources/flood_probability.csv',
values_file='resources/property_value.csv')
DF4 = self.DF4
daily_rainfall = DF4.groupby('stationReference').value.sum()
daily_rainfall = | pd.DataFrame(daily_rainfall) | pandas.DataFrame |
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key],name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.get_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if not len(name) == len(sample):
raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group.
If dropna, will not show nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not dropna: # good choice
m = self.grouper._max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Example
-------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _algos.groupsort_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif isinstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there any way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not call this method grouping by level')
else:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.groupby(self.grouper)
return self._groups
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.get_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.append(gpr)
name = gpr
gpr = obj[gpr]
if isinstance(gpr, Categorical) and len(gpr) != len(obj):
errmsg = "Categorical grouper must have len(grouper) == len(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindex(axis).values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
class SeriesGroupBy(GroupBy):
_apply_whitelist = _series_apply_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
# if string function
if isinstance(func, compat.string_types):
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if Index(self.grouper.group_info[0]).is_monotonic:
result = Series(values, index=self.obj.index)
else:
index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
result = Series(values, index=index).sort_index()
result.index = self.obj.index
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if isinstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if any(isinstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if isinstance(subset, DataFrame):
raise NotImplementedError
for fname, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.append(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.append(col)
if isinstance(list(result.values())[0], DataFrame):
from pandas.tools.merge import concat
result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
result = DataFrame(result)
elif isinstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
if isinstance(result.index, MultiIndex):
zipped = zip(result.index.levels, result.index.labels,
result.index.names)
for i, (lev, lab, name) in enumerate(zipped):
result.insert(i, name,
com.take_nd(lev.values, lab,
allow_fill=False))
result = result.consolidate()
else:
values = result.index.values
name = self.grouper.groupings[0].name
result.insert(0, name, values)
result.index = np.arange(len(result))
return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
if self.axis != 0:
raise NotImplementedError
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concat(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
# for name in self.indices:
# data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
# XXX
return DataFrame({})
key_names = self.grouper.names
if isinstance(values[0], DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != len(values):
v = next(v for v in values if v is not None)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if not _np_version_under1p7 or isinstance(v.index,MultiIndex) or key_index is None:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values,index=key_index,columns=index)
else:
# GH5788 instead of stacking; concat gets the dtypes correct
from pandas.tools.merge import concat
result = concat(values,keys=key_index,names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values.T,index=v.index,columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
if (self._selected_obj.ndim == 2
and self._selected_obj.dtypes.isin(_DATELIKE_DTYPES).any()):
cd = 'coerce'
else:
cd = True
return result.convert_objects(convert_dates=cd)
else:
# only coerce dates if we find at least 1 datetime
cd = 'coerce' if any([ isinstance(v,Timestamp) for v in values ]) else False
return Series(values, index=key_index).convert_objects(convert_dates=cd)
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.tools.merge import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
else:
res = path(group)
# broadcasting
if isinstance(res, Series):
if res.index.is_(obj.index):
group.T.values[:] = res
else:
group.values[:] = res
applied.append(group)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
concatenated.sort_index(inplace=True)
return concatenated
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed DataFrame on each group and
return a DataFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
"""
# try to do a fast transform via merge if possible
try:
obj = self._obj_with_exclusions
if isinstance(func, compat.string_types):
result = getattr(self, func)(*args, **kwargs)
else:
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
result = getattr(self, cyfunc)()
else:
return self._transform_general(func, *args, **kwargs)
except:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
# a grouped that doesn't preserve the index, remap index based on the grouper
# and broadcast it
if ((not isinstance(obj.index,MultiIndex) and
type(result.index) != type(obj.index)) or
len(result.index) != len(obj.index)):
results = obj.values.copy()
for (name, group), (i, row) in zip(self, result.iterrows()):
indexer = self._get_index(name)
results[indexer] = np.tile(row.values,len(indexer)).reshape(len(indexer),-1)
return DataFrame(results,columns=result.columns,index=obj.index).convert_objects()
# we can merge the result in
# GH 7383
names = result.columns
result = obj.merge(result, how='outer', left_index=True, right_index=True).iloc[:,-result.shape[1]:]
result.columns = names
return result
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notnull(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Example
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if (isinstance(res, (bool, np.bool_)) or
np.isscalar(res) and isnull(res)):
if res and notnull(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = _dataframe_apply_whitelist
_block_agg_axis = 1
def __getitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) %s already selected' % self._selection)
if isinstance(key, (list, tuple, Series, Index, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: %s"
% str(bad_keys)[1:-1])
return DataFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif not self.as_index:
if key not in self.obj.columns:
raise KeyError("Column not found: %s" % key)
return DataFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
else:
if key not in self.obj:
raise KeyError("Column not found: %s" % key)
# kind of a kludge
return SeriesGroupBy(self.obj[key], selection=key,
grouper=self.grouper,
exclusions=self.exclusions)
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if result:
if self.axis == 0:
result = DataFrame(result, index=obj.columns,
columns=result_index).T
else:
result = DataFrame(result, index=obj.index,
columns=result_index)
else:
result = DataFrame(result)
return result
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
group_levels = self.grouper.get_group_levels()
zipped = zip(self.grouper.names, group_levels)
for i, (name, labels) in enumerate(zipped):
result.insert(i, name, labels)
result = result.consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result).convert_objects()
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
group_levels = self.grouper.get_group_levels()
zipped = zip(self.grouper.names, group_levels)
for i, (name, labels) in enumerate(zipped):
result.insert(i, name, labels)
result = result.consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result).convert_objects()
def _reindex_output(self, result):
"""
if we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not participated in
the groupings (e.g. may have all been nan groups)
This can re-expand the output space
"""
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result
elif not any([ping._was_factor for ping in groupings]):
return result
levels_list = [ ping._group_index for ping in groupings ]
index = MultiIndex.from_product(levels_list, names=self.grouper.names)
return result.reindex(**{ self.obj._get_axis_name(self.axis) : index, 'copy' : False }).sortlevel()
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.tools.merge import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
from pandas.tools.plotting import boxplot_frame_groupby
DataFrameGroupBy.boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.apply. If
pass a dict, the keys must be DataFrame column names
Returns
-------
aggregated : Panel
"""
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = DataFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise NotImplementedError
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
class NDArrayGroupBy(GroupBy):
pass
#----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = com._ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return com.take_nd(self.labels, self.sort_idx, allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return _algos.groupsort_indexer(self.labels, self.ngroups)[0]
def __iter__(self):
sdata = self._get_sorted_data()
if self.ngroups == 0:
raise StopIteration
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
raise NotImplementedError
class ArraySplitter(DataSplitter):
pass
class SeriesSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._get_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except:
# fails when all -1
return [], True
sdata = self._get_sorted_data()
results, mutated = lib.apply_frame_axis0(sdata, f, names, starts, ends)
return results, mutated
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # ix[:, slice_obj]
class NDFrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
self.factory = data._constructor
def _get_sorted_data(self):
# this is the BlockManager
data = self.data._data
# this is sort of wasteful but...
sorted_axis = data.axes[self.axis].take(self.sort_idx)
sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
return sorted_data
def _chop(self, sdata, slice_obj):
return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
else:
klass = NDFrameSplitter
return klass(data, *args, **kwargs)
#----------------------------------------------------------------------
# Misc utilities
def get_group_index(label_list, shape):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations.
"""
if len(label_list) == 1:
return label_list[0]
n = len(label_list[0])
group_index = np.zeros(n, dtype=np.int64)
mask = np.zeros(n, dtype=bool)
for i in range(len(shape)):
stride = np.prod([x for x in shape[i + 1:]], dtype=np.int64)
group_index += com._ensure_int64(label_list[i]) * stride
mask |= label_list[i] < 0
np.putmask(group_index, mask, -1)
return group_index
_INT64_MAX = np.iinfo(np.int64).max
def _int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def _indexer_from_factorized(labels, shape, compress=True):
if _int64_overflow_possible(shape):
indexer = np.lexsort(np.array(labels[::-1]))
return indexer
group_index = get_group_index(labels, shape)
if compress:
comp_ids, obs_ids = _compress_group_index(group_index)
max_group = len(obs_ids)
else:
comp_ids = group_index
max_group = com._long_prod(shape)
if max_group > 1e6:
# Use mergesort to avoid memory errors in counting sort
indexer = comp_ids.argsort(kind='mergesort')
else:
indexer, _ = _algos.groupsort_indexer(comp_ids.astype(np.int64),
max_group)
return indexer
def _lexsort_indexer(keys, orders=None, na_position='last'):
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = Categorical(key,ordered=True)
if na_position not in ['last','first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
n = len(c.levels)
codes = c.codes.copy()
mask = (c.codes == -1)
if order: # ascending
if na_position == 'last':
codes = np.where(mask, n, codes)
elif na_position == 'first':
codes += 1
else: # not order means descending
if na_position == 'last':
codes = np.where(mask, n, n-codes-1)
elif na_position == 'first':
codes = np.where(mask, 0, n-codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return _indexer_from_factorized(labels, shape)
def _nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
This is intended to be a drop-in replacement for np.argsort which handles NaNs
It adds ascending and na_position parameters.
GH #6399, #5231
"""
# specially handle Categorical
if | is_categorical_dtype(items) | pandas.core.common.is_categorical_dtype |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
tm.assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
tm.assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
tm.assert_frame_equal(left, right)
def test_v12_compat(self, datapath):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
dirpath = datapath("io", "json", "data")
v12_json = os.path.join(dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
tm.assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
df_unser_iso = pd.read_json(v12_iso_json)
tm.assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
df_mixed = DataFrame(
OrderedDict(
float_1=[
-0.92077639,
0.77434435,
1.25234727,
0.61485564,
-0.60316077,
0.24653374,
0.28668979,
-2.51969012,
0.95748401,
-1.02970536,
],
int_1=[
19680418,
75337055,
99973684,
65103179,
79373900,
40314334,
21290235,
4991321,
41903419,
16008365,
],
str_1=[
"78c608f1",
"64a99743",
"13d2ff52",
"ca7f4af2",
"97236474",
"bde7e214",
"1a6bde47",
"b1190be5",
"7a669144",
"8d64d068",
],
float_2=[
-0.0428278,
-1.80872357,
3.36042349,
-0.7573685,
-0.48217572,
0.86229683,
1.08935819,
0.93898739,
-0.03030452,
1.43366348,
],
str_2=[
"14f04af9",
"d085da90",
"4bcfac83",
"81504caf",
"2ffef4a9",
"08e2f5c4",
"07e1af03",
"addbd4a7",
"1f6a09ba",
"4bfc4d87",
],
int_2=[
86967717,
98098830,
51927505,
20372254,
12601730,
20884027,
34193846,
10561746,
24867120,
76131025,
],
),
index=index,
)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype("unicode")
df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split")
tm.assert_frame_equal(
df_mixed,
df_roundtrip,
check_index_type=True,
check_column_type=True,
by_blocks=True,
check_exact=True,
)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
self.binary = bytes.fromhex(hexed)
def __str__(self) -> str:
return self.hexed
hexed = "574b4454ba8c5eb4f98a8f45"
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
result = df_nonprintable.to_json(default_handler=str)
expected = f'{{"A":{{"0":"{hexed}"}}}}'
assert result == expected
assert (
df_mixed.to_json(default_handler=str)
== f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
assert result == expected
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="index")
tm.assert_series_equal(
s, read_json(s.to_json(orient="split"), orient="split", typ="series")
)
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_default_orient(self, string_series):
assert string_series.to_json() == string_series.to_json(orient="index")
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_simple(self, orient, numpy, string_series):
data = string_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = string_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [False, None])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_object(self, orient, numpy, dtype, object_series):
data = object_series.to_json(orient=orient)
result = pd.read_json(
data, typ="series", orient=orient, numpy=numpy, dtype=dtype
)
expected = object_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_empty(self, orient, numpy, empty_series):
data = empty_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = empty_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
else:
expected.index = expected.index.astype(float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_timeseries(self, orient, numpy, datetime_series):
data = datetime_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = datetime_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.float64, np.int])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_numeric(self, orient, numpy, dtype):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
data = s.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = s.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", precise_float=True)
tm.assert_series_equal(result, s, check_index_type=False)
def test_series_with_dtype(self):
# GH 21986
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", dtype=np.int64)
expected = Series([4] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(True, Series(["2000-01-01"], dtype="datetime64[ns]")),
(False, Series([946684800000])),
],
)
def test_series_with_dtype_datetime(self, dtype, expected):
s = Series(["2000-01-01"], dtype="datetime64[ns]")
data = s.to_json()
result = pd.read_json(data, typ="series", dtype=dtype)
tm.assert_series_equal(result, expected)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
tm.assert_frame_equal(
result, df, check_index_type=False, check_column_type=False
)
def test_typ(self):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
result = read_json(s.to_json(), typ=None)
tm.assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
def test_path(self, float_frame):
with tm.ensure_clean("test.json") as path:
for df in [
float_frame,
self.intframe,
self.tsframe,
self.mixed_frame,
]:
df.to_json(path)
read_json(path)
def test_axis_dates(self, datetime_series):
# frame
json = self.tsframe.to_json()
result = read_json(json)
tm.assert_frame_equal(result, self.tsframe)
# series
json = datetime_series.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, datetime_series, check_names=False)
assert result.name is None
def test_convert_dates(self, datetime_series):
# frame
df = self.tsframe.copy()
df["date"] = Timestamp("20130101")
json = df.to_json()
result = read_json(json)
tm.assert_frame_equal(result, df)
df["foo"] = 1.0
json = df.to_json(date_unit="ns")
result = read_json(json, convert_dates=False)
expected = df.copy()
expected["date"] = expected["date"].values.view("i8")
expected["foo"] = expected["foo"].astype("int64")
tm.assert_frame_equal(result, expected)
# series
ts = Series(Timestamp("20130101"), index=datetime_series.index)
json = ts.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, ts)
@pytest.mark.parametrize("date_format", ["epoch", "iso"])
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize(
"date_typ", [datetime.date, datetime.datetime, pd.Timestamp]
)
def test_date_index_and_values(self, date_format, as_object, date_typ):
data = [date_typ(year=2020, month=1, day=1), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
result = ser.to_json(date_format=date_format)
if date_format == "epoch":
expected = '{"1577836800000":1577836800000,"null":null}'
else:
expected = (
'{"2020-01-01T00:00:00.000Z":"2020-01-01T00:00:00.000Z","null":null}'
)
if as_object:
expected = expected.replace("}", ',"a":"a"}')
assert result == expected
@pytest.mark.parametrize(
"infer_word",
[
"trade_time",
"date",
"datetime",
"sold_at",
"modified",
"timestamp",
"timestamps",
],
)
def test_convert_dates_infer(self, infer_word):
# GH10747
from pandas.io.json import dumps
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
expected = DataFrame(
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
)
result = read_json(dumps(data))[["id", infer_word]]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_frame(self, date, date_unit):
df = self.tsframe.copy()
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
if date_unit:
json = df.to_json(date_format="iso", date_unit=date_unit)
else:
json = df.to_json(date_format="iso")
result = read_json(json)
expected = df.copy()
expected.index = expected.index.tz_localize("UTC")
expected["date"] = expected["date"].dt.tz_localize("UTC")
tm.assert_frame_equal(result, expected)
def test_date_format_frame_raises(self):
df = self.tsframe.copy()
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_series(self, date, date_unit, datetime_series):
ts = Series(Timestamp(date), index=datetime_series.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format="iso", date_unit=date_unit)
else:
json = ts.to_json(date_format="iso")
result = read_json(json, typ="series")
expected = ts.copy()
expected.index = expected.index.tz_localize("UTC")
expected = expected.dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_date_format_series_raises(self, datetime_series):
ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
def test_date_unit(self, unit):
df = self.tsframe.copy()
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
df.iloc[4, dl] = pd.NaT
json = df.to_json(date_format="epoch", date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
tm.assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
tm.assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r"""{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}"""
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
dfj2["date"] = Timestamp("20130101")
dfj2["ints"] = range(5)
dfj2["bools"] = True
dfj2.index = pd.date_range("20130101", periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
tm.assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\['a', 'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with pytest.raises(AssertionError, match=error_msg):
tm.assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@tm.network
@pytest.mark.single
def test_round_trip_exception_(self):
# GH 3867
csv = "https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv"
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
tm.assert_frame_equal(result.reindex(index=df.index, columns=df.columns), df)
@tm.network
@pytest.mark.single
@pytest.mark.parametrize(
"field,dtype",
[
["created_at", pd.DatetimeTZDtype(tz="UTC")],
["closed_at", "datetime64[ns]"],
["updated_at", pd.DatetimeTZDtype(tz="UTC")],
],
)
def test_url(self, field, dtype):
url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5" # noqa
result = read_json(url, convert_dates=True)
assert result[field].dtype == dtype
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit="ms")
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1]))
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == "timedelta64[ns]"
tm.assert_frame_equal(frame, pd.read_json(frame.to_json()).apply(converter))
frame = DataFrame(
{
"a": [timedelta(days=23), timedelta(seconds=5)],
"b": [1, 2],
"c": pd.date_range(start="20130101", periods=2),
}
)
result = pd.read_json(frame.to_json(date_unit="ns"))
result["a"] = pd.to_timedelta(result.a, unit="ns")
result["c"] = pd.to_datetime(result.c)
tm.assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame(
{"a": [timedelta(23), pd.Timestamp("20130101")]}, dtype=object
)
expected = DataFrame(
{"a": [pd.Timedelta(frame.a[0]).value, pd.Timestamp(frame.a[1]).value]}
)
result = pd.read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"})
tm.assert_frame_equal(result, expected, check_index_type=False)
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize("date_format", ["iso", "epoch"])
@pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])
def test_timedelta_to_json(self, as_object, date_format, timedelta_typ):
# GH28156: to_json not correctly formatting Timedelta
data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
if date_format == "iso":
expected = (
'{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}'
)
else:
expected = '{"86400000":86400000,"172800000":172800000,"null":null}'
if as_object:
expected = expected.replace("}", ',"a":"a"}')
result = ser.to_json(date_format=date_format)
assert result == expected
def test_default_handler(self):
value = object()
frame = DataFrame({"a": [7, value]})
expected = DataFrame({"a": [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
tm.assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]
return str(obj)
df_list = [
9,
DataFrame(
{"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},
columns=["a", "b"],
),
]
expected = (
'[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]'
)
assert dumps(df_list, default_handler=default, orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame(
{"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},
columns=["a", "b"],
)
expected = (
'[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]'
)
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
msg = "raisin"
def my_handler_raises(obj):
raise TypeError(msg)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, object()]}).to_json(
default_handler=my_handler_raises
)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(
default_handler=my_handler_raises
)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype("category")
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern")
tz_naive = tz_range.tz_convert("utc").tz_localize(None)
df = DataFrame({"A": tz_range, "B": pd.date_range("20130101", periods=3)})
df_naive = df.copy()
df_naive["A"] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.astype("Sparse")
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.astype("Sparse")
expected = s.to_json()
assert expected == ss.to_json()
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-10 05:00:00Z"),
Timestamp("2013-01-10 00:00:00", tz="US/Eastern"),
Timestamp("2013-01-10 00:00:00-0500"),
],
)
def test_tz_is_utc(self, ts):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
@pytest.mark.parametrize(
"tz_range",
[
pd.date_range("2013-01-01 05:00:00Z", periods=2),
pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
pd.date_range("2013-01-01 00:00:00-0500", periods=2),
],
)
def test_tz_range_is_utc(self, tz_range):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = (
'{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}'
)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({"DT": dti})
result = dumps(df, iso_dates=True)
assert result == dfexp
def test_read_inline_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@td.skip_if_not_us_locale
def test_read_s3_jsonl(self, s3_resource):
# GH17200
result = read_json("s3n://pandas-test/items.jsonl", lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_local_jsonl(self):
# GH17200
with tm.ensure_clean("tmp_items.json") as path:
with open(path, "w") as infile:
infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
result = read_json(path, lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars(self):
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_json_large_numbers(self):
# GH18842
json = '{"articleId": "1404366058080022500245"}'
json = StringIO(json)
result = read_json(json, typ="series")
expected = Series(1.404366e21, index=["articleId"])
tm.assert_series_equal(result, expected)
json = '{"0": {"articleId": "1404366058080022500245"}}'
json = StringIO(json)
result = read_json(json)
expected = DataFrame(1.404366e21, index=["articleId"], columns=[0])
tm.assert_frame_equal(result, expected)
def test_to_jsonl(self):
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
tm.assert_frame_equal(pd.read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}'
assert result == expected
tm.assert_frame_equal(pd.read_json(result, lines=True), df)
# TODO: there is a near-identical test for pytables; can we share?
def test_latin_encoding(self):
# GH 13774
pytest.skip("encoding not implemented in .to_json(), xref #13774")
values = [
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
]
values = [
[x.decode("latin-1") if isinstance(x, bytes) else x for x in y]
for y in values
]
examples = []
for dtype in ["category", object]:
for val in values:
examples.append(Series(val, dtype=dtype))
def roundtrip(s, encoding="latin-1"):
with tm.ensure_clean("test.json") as path:
s.to_json(path, encoding=encoding)
retr = read_json(path, encoding=encoding)
tm.assert_series_equal(s, retr, check_categorical=False)
for s in examples:
roundtrip(s)
def test_data_frame_size_after_to_json(self):
# GH15344
df = DataFrame({"a": [str(1)]})
size_before = df.memory_usage(index=True, deep=True).sum()
df.to_json()
size_after = df.memory_usage(index=True, deep=True).sum()
assert size_before == size_after
@pytest.mark.parametrize(
"index", [None, [1, 2], [1.0, 2.0], ["a", "b"], ["1", "2"], ["1.", "2."]]
)
@pytest.mark.parametrize("columns", [["a", "b"], ["1", "2"], ["1.", "2."]])
def test_from_json_to_json_table_index_and_columns(self, index, columns):
# GH25433 GH25435
expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns)
dfjson = expected.to_json(orient="table")
result = pd.read_json(dfjson, orient="table")
tm.assert_frame_equal(result, expected)
def test_from_json_to_json_table_dtypes(self):
# GH21345
expected = | pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]}) | pandas.DataFrame |
from urllib.request import urlopen
import requests
import datetime
import pandas as pd
import pandas.io.sql as pd_sql
import json
import telepot
import sqlite3
# OPENDART 고유번호 저장
from io import BytesIO
from zipfile import ZipFile
import xml.etree.ElementTree as ET
crtfc_key = '개인이 각자 받은 API인증키' # 자신의 API키를 넣어야 됨.
# 텔레그램 셋팅
token = '<KEY>' # 텔레그램 Bot 토큰
bot = telepot.Bot(token)
Bot_ID = 'XXXXXXX' # 수신자 ID
# 텔레그램 메세지 전송
def Sendmsg(msg):
bot.sendMessage(Bot_ID, msg)
# OpenDART에 등록된 회사별 고유번호 저장 및 df 반환
def Get_Stocklist_by_DART():
url = 'https://opendart.fss.or.kr/api/corpCode.xml?crtfc_key={}'.format(crtfc_key)
with urlopen(url) as zipresp:
with ZipFile(BytesIO(zipresp.read())) as zfile:
zfile.extractall('corpcode')
tree = ET.parse('corpcode/CORPCODE.xml')
root = tree.getroot()
df_corp = pd.DataFrame(columns=['종목명', '고유번호', '종목코드'])
for company in root.iter('list'):
stock_code = company.findtext('stock_code')
stock_code = stock_code.strip()
if stock_code:
data = {
'종목명': company.findtext('corp_name'),
'고유번호': company.findtext('corp_code'),
'종목코드': company.findtext('stock_code')
}
df_corp = df_corp.append(data, ignore_index=True)
df_corp = df_corp.sort_values(by='종목명')
con = sqlite3.connect("DB.db")
df_corp.to_sql('LIST', con, if_exists='append', index=False)
con.close()
return df_corp
# 기존 DB에 저장된 공시정보 읽음
def Check_Dart(stock):
try:
con = sqlite3.connect("DB.db")
df = | pd.read_sql("SELECT 보고서번호 from DART WHERE 종목명='%s'"%(stock), con=con) | pandas.read_sql |
import streamlit as st
from PIL import Image
import pandas as pd
import subprocess
import os
import base64
import pickle
# Molecular descriptor calculator
def desc_calc():
# Performs the descriptor calculation
bashCommand = "java -Xms2G -Xmx2G -Djava.awt.headless=true -jar ./PaDEL-Descriptor/PaDEL-Descriptor.jar -removesalt -standardizenitro -fingerprints -descriptortypes ./PaDEL-Descriptor/PubchemFingerprinter.xml -dir ./ -file descriptors_output.csv"
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
os.remove('molecule.smi')
# File download
def filedownload(df):
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions
href = f'<a href="data:file/csv;base64,{b64}" download="prediction.csv">Download Predictions</a>'
return href
# Model building
def build_model(input_data):
# Reads in saved regression model
load_model = pickle.load(open('acetylcholinesterase_model.pkl', 'rb'))
# Apply model to make predictions
prediction = load_model.predict(input_data)
st.header('**Prediction output**')
prediction_output = pd.Series(prediction, name='pIC50')
molecule_name = pd.Series(load_data[1], name='molecule_name')
df = pd.concat([molecule_name, prediction_output], axis=1)
st.write(df)
st.markdown(filedownload(df), unsafe_allow_html=True)
# Logo image
image = Image.open('logo.png')
st.image(image, use_column_width=True)
# Page title
st.markdown("""
# Bioactivity Prediction App (Acetylcholinesterase)
This app allows you to predict the bioactivity towards inhibting the `Acetylcholinesterase` enzyme. `Acetylcholinesterase` is a drug target for Alzheimer's disease.
---
""")
# Sidebar
with st.sidebar.header('Upload your CSV data'):
uploaded_file = st.sidebar.file_uploader("Upload your input file", type=['txt'])
st.sidebar.markdown("""
[Example input file](https://raw.githubusercontent.com/aminbenmansour/bioinformatics-computational-drug-discovery/main/bioactivity-prediction-app/example_acetylcholinesterase.txt)
""")
if st.sidebar.button('Predict'):
load_data = pd.read_table(uploaded_file, sep=' ', header=None)
load_data.to_csv('molecule.smi', sep = '\t', header = False, index = False)
st.header('**Original input data**')
st.write(load_data)
with st.spinner("Calculating descriptors..."):
desc_calc()
# Read in calculated descriptors and display the dataframe
st.header('**Calculated molecular descriptors**')
desc = pd.read_csv('descriptors_output.csv')
st.write(desc)
st.write(desc.shape)
# Read descriptor list used in previously built model
st.header('**Subset of descriptors that most contribute to acetylcholinesterase inhibition**')
Xlist = list( | pd.read_csv('descriptor_list.csv') | pandas.read_csv |
#aggregation script
from distributed import wait
import pandas as pd
import geopandas as gpd
from panoptes_client import Panoptes
from shapely.geometry import box, Point
import json
import numpy as np
import os
from datetime import datetime
import utils
import extract
import start_cluster
def download_data(everglades_watch, min_version, generate=False):
#see https://panoptes-python-client.readthedocs.io/en/v1.1/panoptes_client.html#module-panoptes_client.classification
classification_export = everglades_watch.get_export('classifications', generate=generate)
rows = []
for row in classification_export.csv_dictreader():
rows.append(row)
df = pd.DataFrame(rows)
df["workflow_version"] = df.workflow_version.astype(float)
df = df[df.workflow_version > min_version]
df = df[df.workflow_name =="Counts and Behavior"]
return df
def download_subject_data(everglades_watch, savedir, generate=False):
#see https://panoptes-python-client.readthedocs.io/en/v1.1/panoptes_client.html#module-panoptes_client.classification
classification_export = everglades_watch.get_export('subjects', generate=generate)
rows = []
for row in classification_export.csv_dictreader():
rows.append(row)
df = | pd.DataFrame(rows) | pandas.DataFrame |
#!/usr/bin/env python3
"""
https://mor.nlm.nih.gov/download/rxnav/RxNormAPIs.html
https://www.nlm.nih.gov/research/umls/rxnorm/docs/
"""
###
import sys,os,re,json,logging,urllib.parse,tqdm
import pandas as pd
from ..util import rest
#
API_HOST='rxnav.nlm.nih.gov'
API_BASE_PATH='/REST'
BASE_URL='https://'+API_HOST+API_BASE_PATH
#
NDFRT_TYPES=('DISEASE','INGREDIENT','MOA','PE','PK') ## NDFRT drug class types
#
#############################################################################
def List_IDTypes(base_url=BASE_URL, fout=None):
rval = rest.Utils.GetURL(base_url+'/idtypes.json', parse_json=True)
logging.debug(json.dumps(rval, indent=2))
df = pd.DataFrame({"idTypeList":rval['idTypeList']['idName']})
logging.info(f"n_out: {df.shape[0]}")
if fout is not None: df.to_csv(fout, "\t", index=False)
else: return df
#############################################################################
def List_SourceTypes(base_url=BASE_URL, fout=None):
rval = rest.Utils.GetURL(base_url+'/sourcetypes.json', parse_json=True)
df = pd.DataFrame({"sourceType":rval['sourceTypeList']['sourceName']})
logging.info(f"n_out: {df.shape[0]}")
if fout is not None: df.to_csv(fout, "\t", index=False)
else: return df
#############################################################################
def List_RelationTypes(base_url=BASE_URL, fout=None):
rval = rest.Utils.GetURL(base_url+'/relatypes.json', parse_json=True)
df = pd.DataFrame({"relationType":rval['relationTypeList']['relationType']})
logging.info(f"n_out: {df.shape[0]}")
if fout is not None: df.to_csv(fout, "\t", index=False)
else: return df
#############################################################################
def List_TermTypes(base_url=BASE_URL, fout=None):
rval = rest.Utils.GetURL(base_url+'/termtypes.json', parse_json=True)
df = pd.DataFrame({"termType":rval['termTypeList']['termType']})
logging.info(f"n_out: {df.shape[0]}")
if fout is not None: df.to_csv(fout, "\t", index=False)
else: return df
#############################################################################
def List_PropNames(base_url=BASE_URL, fout=None):
rval = rest.Utils.GetURL(base_url+'/propnames.json', parse_json=True)
df = pd.DataFrame({"propName":rval['propNameList']['propName']})
logging.info(f"n_out: {df.shape[0]}")
if fout is not None: df.to_csv(fout, "\t", index=False)
else: return df
#############################################################################
def List_PropCategories(base_url=BASE_URL, fout=None):
rval = rest.Utils.GetURL(base_url+'/propCategories.json', parse_json=True)
df = pd.DataFrame({"propCategory":rval['propCategoryList']['propCategory']})
logging.info(f"n_out: {df.shape[0]}")
if fout is not None: df.to_csv(fout, "\t", index=False)
else: return df
#############################################################################
def List_ClassTypes(base_url=BASE_URL, fout=None):
rval = rest.Utils.GetURL(base_url+'/rxclass/classTypes.json', parse_json=True)
df = pd.DataFrame({"classType":rval['classTypeList']['classTypeName']})
logging.info(f"n_out: {df.shape[0]}")
if fout is not None: df.to_csv(fout, "\t", index=False)
else: return df
#############################################################################
def List_Classes(class_types, base_url=BASE_URL, fout=None):
n_out=0; tags=None; df=pd.DataFrame(); tq=None;
url = (f'{base_url}/rxclass/allClasses.json')
if class_types: url+=("?classTypes="+urllib.parse.quote(' '.join(class_types)))
rval = rest.Utils.GetURL(url, parse_json=True)
logging.debug(json.dumps(rval, indent=4))
clss = rval["rxclassMinConceptList"]["rxclassMinConcept"] if "rxclassMinConceptList" in rval and "rxclassMinConcept" in rval["rxclassMinConceptList"] else []
for cls in clss:
if not tq: tq = tqdm.tqdm(total=len(clss), unit="classes")
tq.update()
if not tags: tags = list(cls.keys())
df_this = pd.DataFrame({tags[j]:[cls[tags[j]]] for j in range(len(tags))})
if fout is None: df = pd.concat([df, df_this])
else: df_this.to_csv(fout, "\t", index=False, header=bool(n_out==0))
n_out += df_this.shape[0]
logging.info(f"n_out: {n_out}")
if fout is None: return df
#############################################################################
def Get_Name2RxCUI(names, base_url=BASE_URL, fout=None):
n_out=0; df= | pd.DataFrame() | pandas.DataFrame |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| :class:`pandas.Series` functions and operators implementations in HPAT
| Also, it contains Numba internal operators which are required for Series type handling
"""
import numpy
import operator
import pandas
from numba.errors import TypingError
from numba.extending import (types, overload, overload_method, overload_attribute)
from numba import types
import hpat
from hpat.hiframes.pd_series_ext import SeriesType
from hpat.str_arr_ext import StringArrayType
from hpat.utils import to_array
@overload(operator.getitem)
def hpat_pandas_series_getitem(self, idx):
"""
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if isinstance(idx, types.Integer):
def hpat_pandas_series_getitem_idx_integer_impl(self, idx):
"""
**Test**: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_iloc1
"""
result = self._data[idx]
return result
return hpat_pandas_series_getitem_idx_integer_impl
if isinstance(idx, types.SliceType):
def hpat_pandas_series_getitem_idx_slice_impl(self, idx):
"""
**Test**: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_iloc2
"""
result = pandas.Series(self._data[idx])
return result
return hpat_pandas_series_getitem_idx_slice_impl
if isinstance(idx, SeriesType):
def hpat_pandas_series_getitem_idx_series_impl(self, idx):
"""
**Test**: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_setitem_series_bool2
"""
super_index = idx._data
result = self._data[super_index]
return result
return hpat_pandas_series_getitem_idx_series_impl
raise TypingError('{} The index must be an Integer, Slice or a pandas.series. Given: {}'.format(_func_name, idx))
@overload_attribute(SeriesType, 'at')
@overload_attribute(SeriesType, 'iat')
@overload_attribute(SeriesType, 'iloc')
@overload_attribute(SeriesType, 'loc')
def hpat_pandas_series_iloc(self):
"""
Pandas Series operators :attr:`pandas.Series.at`, :attr:`pandas.Series.iat`, :attr:`pandas.Series.iloc`, :attr:`pandas.Series.loc` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_iloc2
Parameters
----------
series: :class:`pandas.Series`
input series
Returns
-------
:obj:`pandas.Series`
returns an object of :obj:`pandas.Series`
"""
_func_name = 'Operator at/iat/iloc/loc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iloc_impl(self):
return self
return hpat_pandas_series_iloc_impl
@overload_attribute(SeriesType, 'shape')
def hpat_pandas_series_shape(self):
"""
Pandas Series attribute :attr:`pandas.Series.shape` implementation
**Algorithm**: result = series.shape
**Test**: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_shape1
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:obj:`tuple`
a tuple of the shape of the underlying data
"""
_func_name = 'Attribute shape.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_shape_impl(self):
return self._data.shape
return hpat_pandas_series_shape_impl
@overload_attribute(SeriesType, 'values')
def hpat_pandas_series_iloc(self):
"""
Pandas Series attribute 'values' implementation.
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.values.html#pandas.Series.values
Algorithm: result = series.values
Where:
series: pandas.series
result: pandas.series as ndarray
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_values
"""
_func_name = 'Attribute values.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_values_impl(self):
return self._data
return hpat_pandas_series_values_impl
@overload_attribute(SeriesType, 'index')
def hpat_pandas_series_index(self):
"""
Pandas Series attribute :attr:`pandas.Series.index` implementation
**Algorithm**: result = series.index
**Test**: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_index1
python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_index2
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
the index of the Series
"""
_func_name = 'Attribute index.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_index_impl(self):
return self._index
return hpat_pandas_series_index_impl
@overload_attribute(SeriesType, 'size')
def hpat_pandas_series_size(self):
"""
Pandas Series attribute :attr:`pandas.Series.size` implementation
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_size
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
Return the number of elements in the underlying data.
"""
_func_name = 'Attribute size.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_size_impl(self):
return len(self._data)
return hpat_pandas_series_size_impl
@overload(len)
def hpat_pandas_series_len(self):
"""
Pandas Series operator :func:`len` implementation
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_len
Parameters
----------
series: :class:`pandas.Series`
Returns
-------
:obj:`int`
number of items in the object
"""
_func_name = 'Operator len().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_len_impl(self):
return len(self._data)
return hpat_pandas_series_len_impl
@overload_method(SeriesType, 'isin')
def hpat_pandas_series_isin(self, values):
"""
Pandas Series method :meth:`pandas.Series.isin` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_isin_list1
Parameters
-----------
values : :obj:`list` or :obj:`set` object
specifies values to look for in the series
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object indicating if each element of self is in values
"""
_func_name = 'Method isin().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not isinstance(values, (types.Set, types.List)):
raise TypingError(
'{} The argument must be set or list-like object. Given values: {}'.format(_func_name, values))
def hpat_pandas_series_isin_impl(self, values):
# TODO: replace with below line when Numba supports np.isin in nopython mode
# return pandas.Series(np.isin(self._data, values))
return pandas.Series([(x in values) for x in self._data])
return hpat_pandas_series_isin_impl
@overload_method(SeriesType, 'append')
def hpat_pandas_series_append(self, to_append):
"""
Pandas Series method :meth:`pandas.Series.append` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_append1
Parameters
-----------
to_append : :obj:`pandas.Series` object
input argument
ignore_index:
*unsupported*
verify_integrity:
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method append().'
if not isinstance(self, SeriesType) or not isinstance(to_append, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}, to_append: {}'.format(_func_name, self, to_append))
def hpat_pandas_series_append_impl(self, to_append):
return pandas.Series(self._data + to_append._data)
return hpat_pandas_series_append_impl
@overload_method(SeriesType, 'groupby')
def hpat_pandas_series_groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False):
"""
Pandas Series method :meth:`pandas.Series.groupby` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_groupby_count
Parameters
-----------
self: :class:`pandas.Series`
input arg
by: :obj:`pandas.Series` object
Used to determine the groups for the groupby
axis:
*unsupported*
level:
*unsupported*
as_index:
*unsupported*
sort:
*unsupported*
group_keys:
*unsupported*
squeeze:
*unsupported*
observed:
*unsupported*
Returns
-------
:obj:`pandas.SeriesGroupBy`
returns :obj:`pandas.SeriesGroupBy` object
"""
_func_name = 'Method Series.groupby().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if by is None and axis is None:
raise TypingError("{} You have to supply one of 'by' or 'axis' parameters".format(_func_name))
if level is not None and not isinstance(level, (types.Integer, types.NoneType, types.Omitted)):
raise TypingError("{} 'level' must be an Integer. Given: {}".format(_func_name, level))
def hpat_pandas_series_groupby_impl(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False):
# TODO Needs to implement parameters value check
# if level is not None and (level < -1 or level > 0):
# raise ValueError("Method Series.groupby(). level > 0 or level < -1 only valid with MultiIndex")
return pandas.core.groupby.SeriesGroupBy(self)
return hpat_pandas_series_groupby_impl
@overload_method(SeriesType, 'ne')
def hpat_pandas_series_ne(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.ne` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method ne().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_ne_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data != other._data)
return hpat_pandas_series_ne_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_ne_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data != other)
return hpat_pandas_series_ne_impl
raise TypingError('{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(_func_name, self, other))
@overload_method(SeriesType, 'add')
def hpat_pandas_series_add(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.add` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method add().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_add_impl(lhs, rhs):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(lhs._data + rhs._data)
return hpat_pandas_series_add_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_add_number_impl(lhs, rhs):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(lhs._data + rhs)
return hpat_pandas_series_add_number_impl
raise TypingError('{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(_func_name, self, other))
@overload_method(SeriesType, 'sub')
def hpat_pandas_series_sub(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.sub` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method sub().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_sub_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data - other._data)
return hpat_pandas_series_sub_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_sub_number_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return | pandas.Series(self._data - other) | pandas.Series |
# download.py : download am arff file from within a zipped file from a given url
# author: <NAME>, <NAME> and <NAME>
# date: 2020-01-15
"""Downloads .zip url to current folder, unzips arff file, loads data, splits data and saves original CSV, as well as train/test splits into a data folder. Currently supports only .zip URLs with a .arff file inside.
Usage: download.py --url=<url> --zip_folder=<zip_folder> --data_name=<data_name>
Options:
--url=<url> URL for the .zip file to be downloaded
--zip_folder=<zip_folder> name of the zip folder at the url
--data_name=<data_name> name of the file within the zip folder
"""
from docopt import docopt
import zipfile
import pandas as pd
import urllib
import requests
from sklearn.model_selection import train_test_split
import numpy as np
from scipy.io import arff
import os
import _io
# url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00426/Autism-Adult-Data%20Plus%20Description%20File.zip"
# zip_folder = "../data/autism_screening.zip"
# data_name = Austism-Adult-Data
opt = docopt(__doc__)
def main(url, zip_folder, data_name):
# send request and save object
# Test
# Throw error if URL is incorrect
try:
r = requests.get(url)
assert(r.status_code == 200)
except Exception as req:
print("You have entered an invalid URL")
print(req)
# extract content of response object 'r' and write to specified filename
new_directory = zip_folder.split('/')
new_directory = new_directory[0]
if not os.path.exists(new_directory):
os.makedirs(new_directory)
# open the zip folder and write binary content to disk
# Test to make sure file is a zipfile, raise comprehensable exception
try:
with open(zip_folder, 'wb') as f:
assert(type(f) == _io.BufferedWriter)
f.write(r.content)
except Exception as bad_zip:
print("This is not a zip file")
# Extract the arff file located in the zipped folder using python library zipfile
arff_file = data_name+".arff"
with zipfile.ZipFile(zip_folder, 'r') as myzip:
myzip.extract(arff_file)
# Use 'scipy.io.arff' library to read in arff file
data = arff.loadarff(arff_file)
# The arff file contains a csv in element 0 and a description of the variables in element 1
df = | pd.DataFrame(data[0], dtype='str') | pandas.DataFrame |
import pandas as pd
import os
'''
Label files from original dataset have following structure:
DepthVideoName, EnteringNumber, ExitingNumber, VideoType
DepthVideoName: the depth video name
EnteringNumber: the number of people entering the bus
ExitingNumber: the number of people exiting the bus
VideoType: the video type. There are 4 video types represented by the index (0: N-C-, 1: N-C+, 2: N+C-, 3: N+C+)
'''
HEADER = ['file_name', 'entering', 'exiting', 'video_type']
TOP_PATH = 'E:/pcds_downloads/'
def main():
for file_name in os.listdir(TOP_PATH):
if 'label' in file_name and not ('crowd' in file_name) and not (file_name == 'pcds_dataset_labels_united.csv'):
df_labels = load_labels(TOP_PATH)
labels_singlefile = get_labels(TOP_PATH, file_name)
labels_singlefile = correct_path(labels_singlefile, file_name)
unite_labels(TOP_PATH, df_labels, labels_singlefile)
def unite_labels(top_path, df_labels, labels_singlefile):
'''
Add content of all existing label.txt files to the pcds_dataset_labels_united.csv file
and removes duplicates
Arguments:
top_path: Path where pcds_dataset_labels_united.csv must be placed and where shall
be searched for label.txt files
'''
df_labels = pd.concat([df_labels, labels_singlefile],
axis=0).drop_duplicates(subset='file_name')
df_labels.to_csv(top_path + '/pcds_dataset_labels_united.csv', header = None, index=None)
def correct_path(df_labels, file_name):
'''
Due to the change of the folder structure it has to be added the 'front_in' or 'back_out' dir
Arguments:
df_labels: The dataframe with the labels
file_name: The filename of the label file
returns: Corrected df of labels
raises: ValueError if there is no back or front in the label path
'''
if 'front' in file_name:
df_labels['file_name'] = df_labels['file_name'].apply(lambda row: 'front_in' + row[1:-4].replace('Depth', 'Color') + '.npy')
return df_labels
elif 'back' in file_name:
df_labels['file_name'] = df_labels['file_name'].apply(lambda row: 'back_out' + row[1:-4].replace('Depth', 'Color') + '.npy')
return df_labels
else:
print(file_name)
raise ValueError('File {} not a valid label file'.format(file_name))
def correct_path_csv(df_labels, file_name, save_folder):
'''
Due to the change of the folder structure it has to be added the 'front_in' or 'back_out' dir
and saves to label csv file
Arguments:
df_labels: The dataframe with the labels
file_name: The filename of the label file
save_folder: Folder where labels_united.csv shall be saved
raises: ValueError if there is no back or front in the label path
'''
if 'front' in file_name:
df_labels['file_name'] = df_labels['file_name'].apply(lambda row: 'front_in' + row[1:-4] + '.npy')
elif 'back' in file_name:
df_labels['file_name'] = df_labels['file_name'].apply(lambda row: 'back_out' + row[1:-4] + '.npy')
else:
raise ValueError('File {} not a valid label file'.format(file_name))
df_labels.to_csv(os.path.join(save_folder, 'pcds_dataset_labels_united.csv'))
def load_labels(top_path):
'''
Checks in top path if there is an already existing 'pcds_dataset_labels_united.csv',
otherwise return an empty df with correct header names
Arguments:
top_path: Path where shall be searched for the pcds_dataset_labels_united.csv file
returns: The previously stored labels_united file as pandas Dataframe,
and an empty Dataframe if no such file exists
'''
files = os.listdir(top_path)
if 'pcds_dataset_labels_united.csv' in files:
return | pd.read_csv(top_path + 'pcds_dataset_labels_united.csv', names=HEADER) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # 1 Compiling notebook 2 outputs
# In[1]:
import configparser
import glob
import json
import math
import numpy as np
import pandas as pd
import re
from utils.misc.regex_block import MutationFinder, TmVar, CustomWBregex, normalize_mutations
with open("data/model_output/processed/temp_paper_mut_count.json", "w") as outfile:
json.dump(paper_mut_count, outfile)
print('All', ner_count, 'NER data rows were ignored. Only', regex_count, 'regex data rows were used.')
# saving things
data = pd.DataFrame(data[:], columns=['WBPaper ID', 'Method', 'Genes', '*Gene-Variant combo ', 'Mutations', 'Normalized Mutations', 'Sentence'])
data.to_csv("data/model_output/processed/snippets_2.csv", index=False, encoding='utf-8')
# # 3 Normalizing common gene name to its WormBase ID
# And getting the gene and mutation frequency in a paper.
# In[14]:
data = pd.read_csv("data/model_output/processed/snippets_2.csv")
data = data.to_numpy() # 'WBPaper ID', 'Method', 'Genes', '*Gene-Variant combo ', 'Mutations', 'Normalized Mutations', 'Sentence'
with open("data/model_output/processed/temp_paper_wbgene_count.json", "w") as outfile:
json.dump(paper_wbgene_count, outfile)
# Checking if any detected gene was NOT in the WB gene dictionary
# In[18]:
data = np.array(data)
data[len(data[:,2]) != len(data[:,3])]
# above cell takes a while to complete, so saving the data temporarily
data = pd.DataFrame(data[:], columns=['WBPaper ID', 'Method', 'Genes', 'WBGenes', '*Gene-Variant combo ', 'Mutations', 'Normalized Mutations', 'Sentence'])
data.to_csv("data/model_output/processed/snippets_3.csv", index=False, encoding='utf-8')
data = None
# # 5 Validation
# Finding the gene and mutation matches using the transcripts in c_elegans.PRJNA13758.WS281.protein.fa
# Get the file here - ftp://ftp.ebi.ac.uk/pub/databases/wormbase/releases/WS281/species/c_elegans/PRJNA13758/c_elegans.PRJNA13758.WS281.protein.fa.gz
data = pd.read_csv("data/model_output/processed/snippets_3.csv")
data = data.to_numpy() # 'WBPaper ID', 'Method', 'Genes', 'WBGenes', '*Gene-Variant combo ', 'Mutations', 'Normalized Mutations', 'Sentence'
proteinfa = Path('data/gsoc/proteinfa/c_elegans.PRJNA13758.WS281.protein.fa').read_text().split('>')[1:]
wb_gene_and_prot = dict() # {wbgene: [transcript, protein]}
for row in proteinfa:
wbgene = re.findall("WBGene[0-9]+", row)[0]
protein = "".join(re.findall("\n.*", row)).replace('\n', '')
transcript = row.split(' ')[0]
if wbgene not in wb_gene_and_prot.keys():
wb_gene_and_prot[wbgene] = []
wb_gene_and_prot[wbgene].append([transcript, protein])
len(wb_gene_and_prot)
# #### Create a pair of gene and mutation only when BOTH are present in same sentence.
# In[24]:
paper_raw_info_compiled = []
# 'WBPaper ID', 'Method', 'Genes', 'WBGenes', '*Gene-Variant combo ', 'Mutations', 'Normalized Mutations', 'Sentence'
for row in data:
ppr_id = row[0]
norm_muts = row[-2]
wbgenes = row[3]
sentence = row[-1]
gene_var = row[4]
# filtering out nan values
if type(norm_muts) != float and type(wbgenes) != float:
norm_muts = norm_muts[1:-1].split("', '")
wbgenes = wbgenes[1:-1].split("', '")
for m in norm_muts:
for g in wbgenes:
if len(m) and len(g):
paper_raw_info_compiled.append([ppr_id, g, m, sentence, gene_var])
# In[25]:
matches = []
final_sheet = [] # ppr_id, gene, transcript
for info_from_ppr in paper_raw_info_compiled:
ppr_id = info_from_ppr[0]
gene = info_from_ppr[1]
mut = info_from_ppr[2]
sent = info_from_ppr[3]
gene_var = info_from_ppr[4]
if not len(mut):
continue
if gene not in wb_gene_and_prot.keys():
continue
for row in wb_gene_and_prot[gene]:
transcript, protein_string = row
wt_res = mut[0]
pos = int(''.join(n for n in mut if n.isdigit()))
mut_res = mut[-1]
try:
if protein_string[pos-1] == wt_res:
matches.append([ppr_id, gene, mut, gene_var, transcript, sent])
except IndexError:
pass
for r in matches:
p = r[0]
p, wbg, mut, gene_var, transcript, sent = r
# Adding gene common names column, again
# Current code doesn't keep any link between the WB gene name and the common name
g_common_name = all_wb_genes[wbg]
g_common_name = ', '.join(g_common_name)
final_sheet.append([p, wbg, g_common_name, mut, gene_var, transcript, sent])
# In[26]:
len(final_sheet)
# #### Getting metadata on genes and mutations, and adding warnings column
# In[27]:
with open("data/model_output/processed/temp_paper_wbgene_count.json", "r") as f:
paper_wbgene_count = json.loads(f.read())
with open("data/model_output/processed/temp_paper_mut_count.json", "r") as f:
paper_mut_count = json.loads(f.read())
# In[28]:
final_sheet = np.array(final_sheet)
updated_sheet = []
for i, row in enumerate(final_sheet):
warnings = []
paper_id = row[0]
wbgene = row[1]
mut = row[3]
sentence = row[-1]
for ppr_mut, count in paper_mut_count[paper_id].items():
if mut == ppr_mut and count == 1:
warnings.append(f'{mut} mentioned only once in entire paper')
break
rows_with_same_mut = final_sheet[np.logical_and(final_sheet[:, 0] == paper_id, final_sheet[:,3] == mut)]
same_mut_all_genes = list(set(rows_with_same_mut[:, 1]))
# If the same variant is found in two different genes in the same paper - WARN!
# It is more likely to belong to the gene it is most frequently encountered
if len(same_mut_all_genes) > 1:
temp_warn_store = f'{mut} was paired with other genes too:'
for ppr_gene, count in paper_wbgene_count[paper_id].items():
if ppr_gene in same_mut_all_genes:
temp_warn_store += (f' {ppr_gene} (seen {count} times),')
warnings.append(temp_warn_store)
cut_mut = re.sub("([A-Z])([0-9]+)([A-Za-z]+)", r'\1\2', mut)
remaining_mut = mut.replace(cut_mut, "")
same_cut_muts = [i for i,m in enumerate(final_sheet[:,3]) if (m[:len(cut_mut)] == cut_mut and m[len(cut_mut):] != remaining_mut)]
if same_cut_muts:
temp_warn_store = f'{mut} similar to:'
for temp_i in same_cut_muts:
temp_warn_store += (f' {final_sheet[:,3][temp_i]} (line {temp_i}),')
warnings.append(temp_warn_store)
all_muts_in_sentence = data[np.logical_and(data[:, 0] == paper_id, data[:,-1] == sentence)][:,-2]
all_muts_in_sentence = all_muts_in_sentence[0][1:-1].split("', '")
all_matched_muts_in_sentence = final_sheet[np.logical_and(final_sheet[:, 0] == paper_id, final_sheet[:,-1] == sentence)][:,3]
all_matched_muts_in_sentence = list(set(all_matched_muts_in_sentence))
unmatched_muts_in_sentence = [m for m in all_muts_in_sentence if m not in all_matched_muts_in_sentence]
if len(unmatched_muts_in_sentence) >= 2:
temp_warn_store = f'Sentence has multiple mutations which did not match:'
for m in unmatched_muts_in_sentence:
temp_warn_store += (f' {m},')
warnings.append(temp_warn_store)
all_genes_with_this_mut = final_sheet[np.logical_and(final_sheet[:, 0] == paper_id, final_sheet[:, 3] == mut)][:, 1]
all_genes_with_this_mut = list(set(all_genes_with_this_mut))
if len(all_genes_with_this_mut) > 3:
temp_warn_store = f'{mut} was matched with {len(all_genes_with_this_mut)} genes:'
for g in all_genes_with_this_mut:
temp_warn_store += (f' {g},')
warnings.append(temp_warn_store)
if warnings:
warnings = " || ".join(warnings)
else:
warnings = ""
updated_sheet.append(np.insert(row, -1, warnings).tolist())
# In[29]:
# saving things
updated_sheet = pd.DataFrame(updated_sheet[:], columns=['WBPaper ID', 'WBGene', 'Gene', 'Mutation', 'Gene-Var combo', 'Transcript', 'Warnings', 'Sentence'])
updated_sheet.to_csv("data/model_output/processed/snippets_4.csv", index=False, encoding='utf-8')
updated_sheet = None
# # 6 Additional details
# ### 6.1 Strains
# In[30]:
data = pd.read_csv("data/model_output/processed/snippets_4.csv").to_numpy()
# In[31]:
strains = Path('data/gsoc/Strains.txt').read_text().split('\n')
strains = [r.split('\t') for r in strains][:-1]
all_wb_strains = dict()
for row in strains:
if row[0] not in all_wb_strains.keys():
all_wb_strains[row[0]] = []
for strain in row[1:]:
if len(strain) and strain.lower() not in all_wb_strains[row[0]]:
all_wb_strains[row[0]].append(strain.lower())
strains = [s for row in strains for s in row[1:] if len(s) and not s.isdigit()]
# In[32]:
OPENING_CLOSING_REGEXES = [r'(?:^|[^0-9A-Za-z])(', r')(?:^|[^0-9A-Za-z])']
all_strain = OPENING_CLOSING_REGEXES[0] + '|'.join(strains) + OPENING_CLOSING_REGEXES[1]
all_strain = [re.compile(r,re.IGNORECASE) for r in [all_strain]]
# 'WBPaper ID', 'WBGene', 'Gene', 'Mutation', 'Gene-Var combo', 'Transcript', 'Warnings', 'Sentence'
updated_data = []
total = len(data)
print('Total sentences: {}, processed count: '.format(total), end=' ')
for i, sent in enumerate(data[:, -1]):
if (i+1) % 100 == 0: print(f"{i+1}", end = " ")
paper_strains = []
for regex in all_strain:
for m in regex.finditer(sent):
span = (m.start(0), m.end(0))
raw = (sent[span[0]:span[1]]).strip()
raw = raw[1:] if not raw[0].isalnum() else raw
raw = raw[:-1] if not raw[-1].isalnum() else raw
if len(raw.strip()) > 1 and not raw.strip().isdigit(): paper_strains.append(raw.strip())
if paper_strains:
paper_strains = list(set(paper_strains))
col_wbid = []
for strain in paper_strains:
for key, value in all_wb_strains.items():
if strain.lower() in value:
col_wbid.append(key)
break
paper_strains = "'" + "', '".join(paper_strains) + "'"
if col_wbid:
col_wbid = list(set(col_wbid))
col_wbid = ", ".join(col_wbid)
else:
col_wbid = ''
# lazy way to deal with bad snippets due to special characters in the Strains.txt file
# which are caught in regex
paper_strains = ''
else:
paper_strains = ''
col_wbid = ''
updated_data.append([data[i,0], data[i,1], data[i,2], col_wbid, paper_strains, data[i,3], data[i,-4], data[i,-3], data[i,-2], data[i,-1]])
data = np.array(updated_data) # 'WBPaper ID', 'WBGene', 'Gene', 'WBStrain', 'Strains', 'Mutation', 'Gene-Var combo', 'Transcript', 'Warnings', 'Sentence'
updated_data = None
# ### 6.2 Variants
# In[33]:
OPENING_CLOSING_REGEXES = [r'(?:^|[^0-9A-Za-z])(', r')(?:^|[^0-9A-Za-z])']
# the allele regex and db idea was stolen from wbtools
allele_designations = np.load('data/gsoc/wbtools/wb_allele_designations.npy').astype('U6')
alleles_variations = np.load('data/gsoc/wbtools/wb_alleles_variations.npy').astype('U6')
DB_VAR_REGEX = r'({designations}|m|p|ts|gf|lf|d|sd|am|cs)([0-9]+)'
var_regex_1 = OPENING_CLOSING_REGEXES[0] + DB_VAR_REGEX.format(designations="|".join(allele_designations)) + OPENING_CLOSING_REGEXES[1]
all_var = OPENING_CLOSING_REGEXES[0] + '|'.join(alleles_variations) + '|' + var_regex_1 + OPENING_CLOSING_REGEXES[1]
all_var = [re.compile(r,re.IGNORECASE) for r in [all_var]]
# 'WBPaper ID', 'WBGene', 'Gene', 'WBStrain', 'Strains', 'Mutation', 'Transcript', 'Warnings', 'Sentence'
updated_data = []
total = len(data)
print('Total sentences: {}, processed count: '.format(total), end=' ')
for i, sent in enumerate(data[:, -1]):
if (i+1) % 100 == 0: print(f"{i+1}", end = " ")
variants = []
for regex in all_var:
for m in regex.finditer(sent):
span = (m.start(0), m.end(0))
raw = (sent[span[0]:span[1]]).strip()
raw = raw[1:] if not raw[0].isalnum() else raw
raw = raw[:-1] if not raw[-1].isalnum() else raw
if len(raw.strip()) > 1: variants.append(raw.strip())
if variants:
variants = list(set(variants))
variants = "'" + "', '".join(variants) + "'"
else:
variants = ''
updated_data.append([data[i,0], data[i,1], data[i,2], data[i,3], data[i,4], variants, data[i,-5], data[i,-4], data[i,-3], data[i,-2], data[i,-1]])
data = np.array(updated_data) # 'WBPaper ID', 'WBGene', 'Gene', 'WBStrain', 'Strains', 'Variants', 'Mutation', 'Gene-Var combo', 'Transcript', 'Warnings', 'Sentence'
updated_data = None
# ### 6.3 Variation type
# Extraction rate would be very low as most snippets from notebook 2 are discarded due to limitation in the mutation normalization block above.
# In[34]:
Variation_type = pd.read_csv("data/gsoc/Variation_type.csv").to_numpy()
Variation_type = [t.replace("_", " ") for t in Variation_type[:,2] if type(t)!=float]
# In[35]:
updated_sheet = []
# 'WBPaper ID', 'WBGene', 'Gene', 'WBStrain', 'Strains', 'Variants', 'Mutation', 'Gene-Var combo', 'Transcript', 'Warnings', 'Sentence'
for i, row in enumerate(data):
sent = row[-1]
col_var_type = []
for sub in Variation_type:
if re.search(sub, sent, re.IGNORECASE):
col_var_type.append(sub)
if col_var_type:
col_var_type = list(set(col_var_type))
col_var_type = ", ".join(col_var_type)
else:
col_var_type = ''
updated_sheet.append(np.insert(row, -3, col_var_type).tolist())
# In[36]:
data = np.array(updated_sheet)
updated_sheet = None
# ### 6.3 Functional effect & Generation method
# These type of data were in a few subset of papers tested during dev - expect these columns to be mostly empty.
# In[37]:
functional_effect = ['function uncertain', 'transcript function', 'translational product function', 'decreased transcript level', 'increased transcript level', 'decreased transcript stability', 'gain of function', 'dominant negative', 'dominant negativ', 'antimorphic', 'hypermorphic', 'neomorphic', 'conditional activity', 'hypomorphic', 'amorphic', 'repressible', 'misexpressed']
# In[38]:
common_gen_methods = ['CRISPR', 'ENU', 'EMS']
# In[39]:
updated_sheet = []
# 'WBPaper ID', 'WBGene', 'Gene', 'WBStrain', 'Strains', 'Variants', 'Mutation', 'Gene-Var combo', 'Variation type', 'Transcript', 'Warnings', 'Sentence'
for i, row in enumerate(data):
sent = row[-1]
col_functional_effect = []
col_gen_method = []
for sub in functional_effect:
if re.search(sub, sent, re.IGNORECASE):
col_functional_effect.append(sub)
for sub in common_gen_methods:
if re.search(sub, sent):
col_gen_method.append(sub)
if col_functional_effect:
col_functional_effect = list(set(col_functional_effect))
col_functional_effect = ", ".join(col_functional_effect)
else:
col_functional_effect = ''
if col_gen_method:
col_gen_method = list(set(col_gen_method))
col_gen_method = ", ".join(col_gen_method)
else:
col_gen_method = ''
row = np.insert(row, -3, col_functional_effect)
row = np.insert(row, -3, col_gen_method)
updated_sheet.append(row.tolist())
data = np.array(updated_sheet)
updated_sheet = None
# In[40]:
# saving things
updated_sheet = pd.DataFrame(data[:], columns=['WBPaper ID', 'WBGene', 'Gene', 'WBStrain', 'Strains', 'Variants', 'Mutation', 'Gene-Var combo', 'Variation type', 'Functional effect', 'Generation method', 'Transcript', 'Warnings', 'Sentence'])
updated_sheet.to_csv("data/model_output/processed/final.csv", index=False, encoding='utf-8')
updated_sheet = None
# # 7 Verification
# Finding precision by cross-checking with the manually curated data.
# In[41]:
data = | pd.read_csv("data/model_output/processed/final.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
tf.random.set_seed(2021)
from models import DNMC, NMC, NSurv, MLP, train_model, evaluate_model
FILL_VALUES = {
'alb': 3.5,
'pafi': 333.3,
'bili': 1.01,
'crea': 1.01,
'bun': 6.51,
'wblc': 9.,
'urine': 2502.
}
TO_DROP = ['aps', 'sps', 'surv2m', 'surv6m', 'prg2m', 'prg6m', 'dnr', 'dnrday']
TO_DROP = TO_DROP + ['sfdm2', 'hospdead']
# load, drop columns, fill using specified fill values
df = pd.read_csv('../datasets/support2.csv').drop(TO_DROP,axis=1).fillna(value=FILL_VALUES)
# get dummies for categorical vars
df = pd.get_dummies(df, dummy_na=True)
# fill remaining values to the median
df = df.fillna(df.median())
# standardize numeric columns
numrc_cols = df.dtypes == 'float64'
df.loc[:, numrc_cols] = (df.loc[:, numrc_cols] - df.loc[:, numrc_cols].mean()) / df.loc[:, numrc_cols].std()
OUTCOMES = ['death', 'd.time']
X = df.drop(OUTCOMES, axis=1).sample(frac=1, random_state=2021)
X = X.values
print('There are', X.shape[1], 'features')
from generate_data import generate_semi_synthetic, generate_synth_censoring, onehot
### BEGIN COLLECTING RESULTS HERE ###
all_results = []
all_weight_results = []
LEARNING_RATE = 1e-3
BATCH_SIZE = 100
N_BINS = 10
MAX_EPOCHS = 500
lr = 0.03
DATATYPE = 'synthetic'
RESULTS_NAME = '../results/SUPPORT_' + DATATYPE + '.csv'
assert DATATYPE in ['synth_censoring', 'synthetic', 'real']
# NOTE that we are skipping importance weights here.
for random_state in [2020, 2016, 2013]:
for num_distinct in [4, 8, 12, 16]:
num_shared = 20 - num_distinct
print('')
print('Starting runs with random state', random_state, 'and %i distinct features' % num_distinct)
print('')
if DATATYPE == 'synthetic':
synth = generate_semi_synthetic(
X, num_distinct, num_shared, N_BINS, random_state,
e_prob_spread=2.)
elif DATATYPE == 'synth_censoring':
synth = generate_synth_censoring(
X, df['d.time'].values, df['death'].values,
num_distinct, N_BINS, random_state,
e_prob_spread=2.)
x_train, x_val, x_test = X[:6000], X[6000:7500], X[7500:]
y = onehot(synth['y_disc'], ncategories=10)
y_train, y_val, y_test = y[:6000], y[6000:7500], y[7500:]
s_train, s_val, s_test = synth['s'][:6000], synth['s'][6000:7500], synth['s'][7500:]
e_train, e_val, e_test = synth['e'][:6000], synth['e'][6000:7500], synth['e'][7500:]
#for lr in np.logspace(-2, -1, 6):
# Run NMC
print('Running NMC with lr =', lr)
model = NMC(n_bins=N_BINS, lr=lr)
try:
train_model(
model, (x_train, y_train, s_train), (x_val, y_val, s_val),
MAX_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE)
all_results.append(
evaluate_model(
model, (x_test, y_test, s_test), e_test,
(synth['shared_features'], synth['tc_features'], synth['e_features']),
dataset='nacd', random_state=random_state))
except:
print('Run Failed')
# Run NSurv
print('Running NSurv with lr =', lr)
model = NSurv(n_bins=N_BINS, lr=lr)
try:
train_model(
model, (x_train, y_train, s_train), (x_val, y_val, s_val),
MAX_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE)
all_results.append(
evaluate_model(
model, (x_test, y_test, s_test), e_test,
(synth['shared_features'], synth['tc_features'], synth['e_features']),
dataset='nacd', random_state=random_state))
except:
print('Run Failed')
# Run MLP
print('Running MLP with lr =', lr)
model = MLP(lr=lr)
try:
train_model(
model, (x_train, y_train, s_train), (x_val, y_val, s_val),
MAX_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE)
all_results.append(
evaluate_model(
model, (x_test, y_test, s_test), e_test,
(synth['shared_features'], synth['tc_features'], synth['e_features']),
dataset='nacd', random_state=random_state))
except:
print('Run Failed')
# Run DNMC
for ld in [1., 10.]:
print('Running DNMC (with Psi) with lr =', lr, 'and ld =', ld)
model = DNMC(n_bins=N_BINS, lr=lr, ld=ld)
try:
train_model(
model, (x_train, y_train, s_train), (x_val, y_val, s_val),
MAX_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE)
all_results.append(
evaluate_model(
model, (x_test, y_test, s_test), e_test,
(synth['shared_features'], synth['tc_features'], synth['e_features']),
dataset='nacd', random_state=random_state))
except:
print('Run Failed')
print('Running DNMC (NO Psi) with lr =', lr, 'and ld =', ld)
model = DNMC(n_bins=N_BINS, lr=lr, ld=ld, include_psi=False)
try:
train_model(
model, (x_train, y_train, s_train), (x_val, y_val, s_val),
MAX_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE)
results = evaluate_model(
model, (x_test, y_test, s_test), e_test,
(synth['shared_features'], synth['tc_features'], synth['e_features']),
dataset='nacd', random_state=random_state)
results['model'] = 'DNMC_noPsi'
all_results.append(results)
except:
print('Run Failed')
| pd.DataFrame(all_results) | pandas.DataFrame |
# third-party libraries
import pandas as pd
import pytest
# local imports
from .. import lstm_preprocessing
class TestSpatialGrouping:
"""Tests the output of a single location for the record"""
def test_selection_1(self):
"""Select the default, which is choosing the location from dataset 1"""
data_list = [[11.22, 33.44, 55.66, 77.88], [99.00, 11.22, 33.44, 55.66]]
target_list = [[11.22, 33.44], [99.00, 11.22]]
data = pd.DataFrame(data_list, columns=['lat1', 'lon1', 'lat2', 'lon2'])
target = pd.DataFrame(target_list, columns=['lat', 'lon'])
test = lstm_preprocessing.spatial_grouping(data)
assert test.equals(target)
def test_selection_2(self):
"""Select the location from dataset 2"""
data_list = [[11.22, 33.44, 55.66, 77.88], [99.00, 11.22, 33.44, 55.66]]
target_list = [[55.66, 77.88], [33.44, 55.66]]
data = pd.DataFrame(data_list, columns=['lat1', 'lon1', 'lat2', 'lon2'])
target = pd.DataFrame(target_list, columns=['lat', 'lon'])
test = lstm_preprocessing.spatial_grouping(data, location_selection='2')
assert test.equals(target)
def test_selection_osm(self):
"""Select the location by finding the nearest OSM node to the average"""
data_list = [[44.594487, -123.262589, 44.562769, -123.267733],
[44.594528, -123.261476, 44.563046, -123.268784]]
target_list = [[36921149, 44.57822, -123.264745],
[36921149, 44.57822, -123.264745]]
data = pd.DataFrame(data_list, columns=['lat1', 'lon1', 'lat2', 'lon2'])
target = pd.DataFrame(target_list, columns=['osm_id', 'lat', 'lon'])
test = lstm_preprocessing.spatial_grouping(data, location_selection='osm')
assert test.equals(target)
class TestAssignOsm:
"""Find the nearest OSM node to the average of the two dataset locations"""
def test_find_nearest(self):
data_list = [[44.594487, -123.262589, 44.562769, -123.267733],
[44.594528, -123.261476, 44.563046, -123.268784]]
target_list = [[36921149, 44.57822, -123.264745],
[36921149, 44.57822, -123.264745]]
data = pd.DataFrame(data_list, columns=['lat1', 'lon1', 'lat2', 'lon2'])
target = pd.DataFrame(target_list, columns=['osm_id', 'lat', 'lon'])
test = lstm_preprocessing.assign_osm(data)
assert test.equals(target)
class TestOccupancyLevel:
"""Tests the output of occupancy levels for both grouped and single user data"""
def test_both_individual(self):
"""Both datasets have individual identifiers"""
data_list = [['bike1', 'scooter1']]
target_list = [['bike1', 'scooter1', 1, 1]]
data = pd.DataFrame(data_list, columns=['element_id1', 'element_id2'])
target = pd.DataFrame(target_list, columns=['element_id1', 'element_id2', 'occupancy1', 'occupancy2'])
test = lstm_preprocessing.occupancy_level(data)
assert test.equals(target)
def test_both_grouped(self):
"""Both datasets have grouped counts"""
data_list = [['bike1', 'scooter1', 1519330080, 1519330081, 2, 1, 3, 1],
['bike1', 'scooter1', 1519330085, 1519330086, 3, 0, 2, 1],
['bike1', 'scooter1', 1519430080, 1519430081, 3, 1, 4, 2],
['bike1', 'scooter1', 1519430085, 1519430086, 1, 2, 0, 1]]
target_list = [['bike1', 'scooter1', '2018-02-22 20:08:00', '2018-02-22 20:08:01', 1, 2],
['bike1', 'scooter1', '2018-02-22 20:08:05', '2018-02-22 20:08:06', 4, 3],
['bike1', 'scooter1', '2018-02-23 23:54:40', '2018-02-23 23:54:41', 2, 2],
['bike1', 'scooter1', '2018-02-23 23:54:45', '2018-02-23 23:54:46', 1, 1]]
data = pd.DataFrame(data_list, columns=['element_id1', 'element_id2', 'timestamp1', 'timestamp2', 'boardings1', 'alightings1', 'boardings2', 'alightings2'])
target = pd.DataFrame(target_list, columns=['element_id1', 'element_id2', 'timestamp1', 'timestamp2', 'occupancy1', 'occupancy2'])
target['timestamp1'] = pd.to_datetime(target['timestamp1'])
target['timestamp2'] = pd.to_datetime(target['timestamp2'])
test = lstm_preprocessing.occupancy_level(data)
assert test.equals(target)
class TestDailyCumulative:
"""Test the cumulative sum of grouped data to derive occupancy"""
def test_summing_1_timestamp(self):
"""Test cumulative sum for dataset 1 with timestamp"""
data_list = [['bob1', 1519330080, 2, 1], ['bob1', 1519330085, 3, 0], ['bob1', 1519430080, 3, 1], ['bob1', 1519430085, 1, 2]]
target_list = [['bob1', '2018-02-22 20:08:00', 1], ['bob1', '2018-02-22 20:08:05', 4], ['bob1', '2018-02-23 23:54:40', 2], ['bob1', '2018-02-23 23:54:45', 1]]
data = pd.DataFrame(data_list, columns=['element_id1', 'timestamp1', 'boardings1', 'alightings1'])
target = pd.DataFrame(target_list, columns=['element_id1', 'timestamp1', 'occupancy1'])
target['timestamp1'] = pd.to_datetime(target['timestamp1'])
test = lstm_preprocessing.daily_cumulative(data, '1')
assert test.equals(target)
def test_summing_2_timestamp(self):
"""Test cumulative sum for dataset 2 with timestamp"""
data_list = [['bob2', 1519330080, 2, 1], ['bob2', 1519330085, 3, 0], ['bob2', 1519430080, 3, 1], ['bob2', 1519430085, 1, 2]]
target_list = [['bob2', '2018-02-22 20:08:00', 1], ['bob2', '2018-02-22 20:08:05', 4], ['bob2', '2018-02-23 23:54:40', 2], ['bob2', '2018-02-23 23:54:45', 1]]
data = pd.DataFrame(data_list, columns=['element_id2', 'timestamp2', 'boardings2', 'alightings2'])
target = pd.DataFrame(target_list, columns=['element_id2', 'timestamp2', 'occupancy2'])
target['timestamp2'] = pd.to_datetime(target['timestamp2'])
test = lstm_preprocessing.daily_cumulative(data, '2')
assert test.equals(target)
def test_summing_1_session(self):
"""Test cumulative sum for dataset 1 with session times"""
data_list = [['bob1', 1519330080, 1519330081, 2, 1],
['bob1', 1519330085, 1519330086, 3, 0],
['bob1', 1519430080, 1519430081, 3, 1],
['bob1', 1519430085, 1519430086, 1, 2]]
target_list = [['bob1', '2018-02-22 20:08:00', '2018-02-22 20:08:01', 1],
['bob1', '2018-02-22 20:08:05', '2018-02-22 20:08:06', 4],
['bob1', '2018-02-23 23:54:40', '2018-02-23 23:54:41', 2],
['bob1', '2018-02-23 23:54:45', '2018-02-23 23:54:46', 1]]
data = pd.DataFrame(data_list, columns=['element_id1', 'session_start1', 'session_end1', 'boardings1', 'alightings1'])
target = pd.DataFrame(target_list, columns=['element_id1', 'session_start1', 'session_end1', 'occupancy1'])
target['session_start1'] = pd.to_datetime(target['session_start1'])
target['session_end1'] = pd.to_datetime(target['session_end1'])
test = lstm_preprocessing.daily_cumulative(data, '1')
assert test.equals(target)
def test_summing_2_session(self):
"""Test cumulative sum for dataset 2 with session times"""
data_list = [['bob2', 1519330080, 1519330081, 2, 1],
['bob2', 1519330085, 1519330086, 3, 0],
['bob2', 1519430080, 1519430081, 3, 1],
['bob2', 1519430085, 1519430086, 1, 2]]
target_list = [['bob2', '2018-02-22 20:08:00', '2018-02-22 20:08:01', 1],
['bob2', '2018-02-22 20:08:05', '2018-02-22 20:08:06', 4],
['bob2', '2018-02-23 23:54:40', '2018-02-23 23:54:41', 2],
['bob2', '2018-02-23 23:54:45', '2018-02-23 23:54:46', 1]]
data = pd.DataFrame(data_list, columns=['element_id2', 'session_start2', 'session_end2', 'boardings2', 'alightings2'])
target = pd.DataFrame(target_list, columns=['element_id2', 'session_start2', 'session_end2', 'occupancy2'])
target['session_start2'] = pd.to_datetime(target['session_start2'])
target['session_end2'] = pd.to_datetime(target['session_end2'])
test = lstm_preprocessing.daily_cumulative(data, '2')
assert test.equals(target)
def test_invalid_identifier(self):
"""Tests if exception is raised when identifier parameter is not valid"""
data_list = [['bob2', 1519330080, 1519330081, 2, 1],
['bob2', 1519330085, 1519330086, 3, 0],
['bob2', 1519430080, 1519430081, 3, 1],
['bob2', 1519430085, 1519430086, 1, 2]]
target_list = [['bob2', '2018-02-22 20:08:00', '2018-02-22 20:08:01', 1],
['bob2', '2018-02-22 20:08:05', '2018-02-22 20:08:06', 4],
['bob2', '2018-02-23 23:54:40', '2018-02-23 23:54:41', 2],
['bob2', '2018-02-23 23:54:45', '2018-02-23 23:54:46', 1]]
data = pd.DataFrame(data_list, columns=['element_id2', 'session_start2', 'session_end2', 'boardings2', 'alightings2'])
with pytest.raises(Exception):
lstm_preprocessing.daily_cumulative(data, '3')
class TestTimeGrouping:
"""Tests the grouping of records into specified time intervals"""
def test_timestamp1_session2_interval15_selection1(self):
data_list = [[1519330080, 1519330090, 44.44, 55.55, 2, 3],
[1519330081, 1519330030, 44.44, 55.55, 1, 4],
[1519430080, 1519430090, 44.44, 55.55, 3, 2],
[1519430081, 1519430030, 44.44, 55.55, 2, 6]]
target_list = [['2018-02-22 20:00:00', 44.44, 55.55, 3, 7],
['2018-02-23 23:45:00', 44.44, 55.55, 5, 8]]
data = pd.DataFrame(data_list, columns=['timestamp1', 'session_start2', 'lat', 'lon', 'occupancy1', 'occupancy2'])
target = pd.DataFrame(target_list, columns=['time', 'lat', 'lon', 'occupancy1', 'occupancy2'])
target['time'] = pd.to_datetime(target['time'])
target_multi = target.set_index(['time', 'lat', 'lon'])
test = lstm_preprocessing.time_grouping(data, interval='15T', time_selection='1')
assert test.equals(target_multi)
def test_timestamp1_session2_interval15_selection2(self):
data_list = [[1519330080, 1519330090, 44.44, 55.55, 2, 3],
[1519330081, 1519330030, 44.44, 55.55, 1, 4],
[1519430080, 1519430090, 44.44, 55.55, 3, 2],
[1519430081, 1519430030, 44.44, 55.55, 2, 6]]
target_list = [['2018-02-22 20:00:00', 44.44, 55.55, 3, 7],
['2018-02-23 23:45:00', 44.44, 55.55, 5, 8]]
data = pd.DataFrame(data_list, columns=['timestamp1', 'session_start2', 'lat', 'lon', 'occupancy1', 'occupancy2'])
target = pd.DataFrame(target_list, columns=['time', 'lat', 'lon', 'occupancy1', 'occupancy2'])
target['time'] = | pd.to_datetime(target['time']) | pandas.to_datetime |
import pandas as pd
import datetime
# create a variable with dates, and from that extract the weekday
# I create a list of dates with 20 days difference from today
# and then transform it into a dataframe
df_base = datetime.datetime.today()
df_date_list = [df_base - datetime.timedelta(days=x) for x in range(0, 20)]
df = | pd.DataFrame(df_date_list) | pandas.DataFrame |
"""
Module: Build_csv_files
=============================
A module for building the csv-files for GEOSeMOSYS https://github.com/KTH-dESA/GEOSeMOSYS to run that code
In this module the logic around electrified and un-electrified cells are implemented for the 378 cells
---------------------------------------------------------------------------------------------------------------------------------------------
Module author: <NAME> <<EMAIL>>
"""
import pandas as pd
import geopandas as gpd
import os
import fnmatch
pd.options.mode.chained_assignment = None
def renewableninja(path, dest):
"""
This function organize the data to the required format of a matrix with the
location name on the x axis and hourly data on the y axis so that it can be fed into https://github.com/KTH-dESA/GEOSeMOSYS code
the data is saved as capacityfactor_wind.csv and capacityfactor_solar.csv
:param path:
:param dest:
:return:
"""
files = os.listdir(path)
outwind = []
outsolar = []
for file in files:
if fnmatch.fnmatch(file, '*timezoneout_wind*'):
file = os.path.join(path,file)
wind = pd.read_csv(file, index_col='adjtime')
outwind.append(wind)
for file in files:
if fnmatch.fnmatch(file, '*timezoneout_solar*'):
file = os.path.join(path,file)
solar = pd.read_csv(file, index_col='adjtime')
outsolar.append(solar)
try:
solarbase = pd.concat(outsolar, axis=1)
windbase = | pd.concat(outwind, axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# ### For futher info on BorutaPy package see: <br><br>https://github.com/scikit-learn-contrib/boruta_py
# ##### Articles:
# #### BorutaPy: <br> https://www.jstatsoft.org/article/view/v036i11
# #### Robustness of RF-based feature selection: <br> https://bmcbioinformatics.biomedcentral.com/articles/10.1186/1471-2105-15-8 <br>
#
# In[1]:
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# In[2]:
os.chdir("/home/pchabets/Dropbox/STRESS_INDEX/")
# ### Load in transcriptomics data
# In[3]:
expr_train = pd.read_csv("data/blood_and_saliva_variables/W1/transcriptomics/transcriptomics_2ychronicity_TRAIN.csv")
expr_test = pd.read_csv("data/blood_and_saliva_variables/W1/transcriptomics/transcriptomics_2ychronicity_TEST.csv")
# ### Use Boruta for variabe selection
# #### Turn labels into 0's and 1's
# non-remitted = 0, remitted = 1
# In[4]:
#not_remitted = 0, remitted = 1
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
expr_train['Remitted_depression'] = lbl.fit_transform(expr_train['Remitted_depression'])
# In[5]:
# Base Boruta feature selection only on train set - X and y are from trainset only
X = expr_train.drop(['Remitted_depression', 'pident'], inplace=False, axis=1)
y = expr_train['Remitted_depression']
# #### initialize Boruta
# In[6]:
# set max depth and percentage
max_depth = 1
percentage = 98
# In[7]:
from boruta import BorutaPy
from sklearn.ensemble import RandomForestClassifier
# In[8]:
rf = RandomForestClassifier(
n_jobs=14,
max_depth = max_depth,
class_weight='balanced',
verbose=1
)
# In[9]:
feat_selector = BorutaPy(
estimator = rf,
n_estimators='auto',
max_iter=1000,
perc=percentage,
# random_state=101,
verbose=2
)
# #### fit Boruta
# In[10]:
feat_selector.fit(np.array(X), np.array(y))
# ### Check results
# In[11]:
# check selected features
feat_selector.support_
# In[12]:
# check ranking of features
feat_selector.ranking_
# In[13]:
strong = X.columns[feat_selector.support_].to_list()
weak = X.columns[feat_selector.support_weak_].to_list()
print('Selected features:', strong)
print('Potentially irrelevant features:', weak)
# ### Transform X to selected features X
# In[14]:
# only keep confirmed features, discard tentative features
X_filtered = X.loc[:,feat_selector.support_]
# In[15]:
X_filtered.shape
# ### Combine into new dataframe with labels and write to file
# #### Train set
# In[16]:
# participant IDs of train set
pid_train = expr_train['pident']
# In[17]:
# concatenate into 1 df
selected_set_TRAIN = | pd.concat([pid_train, y, X_filtered], axis=1) | pandas.concat |
from python_back_end.utilities.custom_multiprocessing import DebuggablePool
import numpy as np
import pandas as pd
from python_back_end.triangle_formatting.date_sorter import DateSorter
from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier
from python_back_end.data_cleaning.type_col_extracter import TypeColExtracter
from python_back_end.definitions import SheetTypeDefinitions
from python_back_end.exceptions import NonpermissibleDateColumnDetected
from python_back_end.program_settings import PROGRAM_STRINGS as ps, PROGRAM_PARAMETERS as pp
from python_back_end.utilities.help_functions import strict_index, sum_unique
from python_back_end.utilities.state_handling import DataHolder, DataStruct
from functools import partial
class TriangleFromTableBuilder:
@staticmethod
def build_triangle_from_table(dh):
new_dh = DataHolder(dh.name)
pool = DebuggablePool(pp.N_CORES)
# First find all date cols and see if one of them has target structure.
for dh_ind, ds in enumerate(dh.data_struct_list):
id_col, hori_date_col, vert_date_col = TriangleFromTableBuilder.do_the_magic(ds, pool)
# cut each id into one row
cut_list = TriangleFromTableBuilder.make_cut_list(ds.df_data[id_col])
# use the cut_list to insert all elements
tr_cols = pd.Series(ds.df_profiles.iloc[0, :] == SheetTypeDefinitions.TRIANGLE_ELEMENT, index=ds.df_profiles.columns)
pad_header_mapping = TriangleFromTableBuilder.make_pad_header_mapping(ds, hori_date_col)
vert_col_tup = (vert_date_col, ds.df_data[vert_date_col])
hori_col_tup = (hori_date_col, ds.df_data[hori_date_col])
id_col_tup = (id_col, ds.df_data[id_col])
func = partial(TriangleFromTableBuilder.apply_cuts, cut_list, vert_col_tup, hori_col_tup, id_col_tup, pad_header_mapping)
tr_col_tup_list = [(col_name, ds.df_data[col_name]) for col_name in tr_cols.index[tr_cols]]
out = pool.map(func, tr_col_tup_list)
#for name, tr_col in ds.df_data[tr_cols.index[tr_cols]].iteritems():
for temp_df_data, temp_df_profiles, name in out:
new_dh.add_sheet(name, temp_df_data, temp_df_profiles)
#new_dh.add_sheet(name, temp_df_data, temp_df_profiles)
pool.close()
return new_dh
@staticmethod
def make_pad_header_mapping(ds, hori_date_col):
start_pad = ds.df_data.columns[-1][:pp.N_DIGITS_HEADER_PADDING]
start_pad = int(start_pad)
temp = ds.df_data[hori_date_col].values
temp_headers = sorted(np.unique(temp))
pad_header_mapping = {head: str(ind).zfill(pp.N_DIGITS_HEADER_PADDING) + ". " + str(head)
for head, ind in zip(temp_headers, range(start_pad, len(temp_headers) + start_pad))}
return pad_header_mapping
@staticmethod
def apply_cuts(cut_list, vert_col_tup, hori_col_tup, id_col_tup, pad_header_mapping, tr_col_tup):
col_list = list()
for cut in cut_list:
# make unique column headers by summing
temp_headers = hori_col_tup[1][cut].values
temp_values = tr_col_tup[1][cut].values
temp_headers, temp_values = sum_unique(temp_headers, temp_values)
temp_headers = [pad_header_mapping[el] for el in temp_headers]
col_df = pd.Series(temp_values, index=temp_headers)
# add stuff to the series
temp_num = vert_col_tup[1][cut[0]]
temp_id = id_col_tup[1][cut[0]]
col_df.loc[vert_col_tup[0]] = temp_num
col_df.loc[id_col_tup[0]] = temp_id
col_list.append(col_df)
temp_df_data = pd.concat(col_list, axis=1, sort=True)
temp_df_data = temp_df_data.transpose()
temp_df_data = temp_df_data.fillna(0)
# get the year column for sorting
sorting_col = temp_df_data.loc[:, vert_col_tup[0]]
temp_df_data = DateSorter.append_and_sort(temp_df_data, sorting_col)
temp_df_profiles = pd.DataFrame(SheetTypeDefinitions.TRIANGLE_ELEMENT, columns=temp_df_data.columns,
index=temp_df_data.index)
temp_df_profiles.loc[:, vert_col_tup[0]] = SheetTypeDefinitions.STRING_DATE
temp_df_profiles.loc[:, id_col_tup[0]] = SheetTypeDefinitions.ID_ELEMENT
#temp_ds = DataStruct()
return temp_df_data, temp_df_profiles, tr_col_tup[0]
@staticmethod
def make_cut_list(id_col):
cut_list = []
uniques = id_col.unique()
index_form = pd.Index(id_col)
for id in uniques:
idxs = id_col.index[index_form.get_loc(id)]
if isinstance(idxs, pd.Index):
idxs = idxs.tolist()
else:
idxs = [idxs]
cut_list.append(idxs)
return cut_list
@staticmethod
def do_the_magic(ds, pool):
date_cols = DateColIdentifier.identify_marked_date_cols(ds)
id_cols = pd.Series(ds.df_profiles.iloc[0, :] == SheetTypeDefinitions.ID_ELEMENT, index=ds.df_profiles.columns)
horizontal_matches = pd.DataFrame(0, columns=date_cols.index[date_cols], index=id_cols.index[id_cols])
for date_name in date_cols.index[date_cols]:
date_col = ds.df_data[date_name]
id_date_match_part = partial(TriangleFromTableBuilder.id_date_match, date_col)
id_col_list = [ds.df_data[col] for col in id_cols.index[id_cols]]
temp = pool.map(id_date_match_part, id_col_list)
temp_list = list(temp)
horizontal_matches.loc[:, date_name] = temp_list
#for id_name in id_cols.index[id_cols]:
# horizontal_matches.loc[id_name, date_name] = TriangleFromTableBuilder.id_date_match(ds, id_name, date_name)
id_col_name = horizontal_matches.max(axis=1).idxmax()
hori_date_col = horizontal_matches.max(axis=0).idxmax()
print(hori_date_col)
# remove the chosen horizontal column
date_cols[hori_date_col] = False
id_col = ds.df_data[id_col_name]
vert_col_match_part = partial(TriangleFromTableBuilder.verti_col_match, id_col)
date_col_list = [ds.df_data[date_col_name] for date_col_name in date_cols.index[date_cols]]
vert_scores = pool.map(vert_col_match_part, date_col_list)
vertical_matches = pd.Series(vert_scores, index=date_cols.index[date_cols])
#for date_col_name in date_cols.index[date_cols]:
# vertical_matches[date_col_name] = TriangleFromTableBuilder.verti_col_match(ds, id_col_name, date_col_name)
vert_date_col = vertical_matches.idxmax()
return id_col_name, hori_date_col, vert_date_col
@staticmethod
def verti_col_match(id_col, date_col):
#id_col, date_col = ds.df_data[id_col_name], ds.df_data[date_col_name]
# get number of unique items in date_col
match = len(date_col.unique())
# now minus for each break within one id
uniques = id_col.unique()
index_form = | pd.Index(id_col) | pandas.Index |
'''
Project: WGU Data Management/Analytics Undergraduate Capstone
<NAME>
August 2021
GDELTbase.py
Class for creating/maintaining data directory structure, bulk downloading of
GDELT files with column reduction, parsing/cleaning to JSON format, and export
of cleaned records to MongoDB.
Basic use should be by import and implementation within an IDE, or by editing
section # C00 and running this script directly.
Primary member functions include descriptive docstrings for their intent and
use.
See license.txt for information related to each open-source library used.
WARNING: project file operations are based on relative pathing from the
'scripts' directory this Python script is located in, given the creation of
directories 'GDELTdata' and 'EDAlogs' parallel to 'scripts' upon first
GDELTbase and GDELTeda class initializations.
If those directories are not already present, a fallback method for
string-literal directory reorientation may be found in GDELTbase shared class
data at this tag: # A01a - backup path specification.
Any given user's project directory must be specified there.
See also GDELTeda.py, tag # A02b - Project directory path, as any given user's
project directory must be specified for that os.chdir() call, also.
Contents:
A00 - GDELTbase
A01 - shared class data (toolData, localDb)
A01a - backup path specification
Note: Specification at A01a should be changed to suit a user's desired
directory structure, given their local filesystem.
A02 - __init__ w/ instanced data (localFiles)
B00 - class methods
B01 - updateLocalFilesIndex
B02 - clearLocalFilesIndex
B03 - showLocalFiles
B04 - wipeLocalFiles
B05 - extensionToTableName
B06 - isFileDownloaded
B07 - downloadGDELTFile
B08 - downloadGDELTDay
B09 - cleanFile (includes the following field/subfield parser functions)
B09a - themeSplitter
B09b - locationsSplitter
B09c - personsSplitter
B09d - organizationsSplitter
B09e - toneSplitter
B09f - countSplitter
B09g - One-liner date conversion function for post-read_csv use
B09h - llConverter
B10 - cleanTable
B11 - mongoFile
B12 - mongoTable
C00 - main w/ testing
'''
import pandas as pd
import numpy as np
import os
import pymongo
import wget
import json
from time import time
from datetime import datetime, tzinfo
from zipfile import ZipFile as zf
from pprint import pprint as pp
from urllib.error import HTTPError
# A00
class GDELTbase:
'''Base object for GDELT data acquisition, cleaning, and storage.
Shared class data:
-----------------
toolData - dict with these key - value pairs:
URLbase - "http://data.gdeltproject.org/gdeltv2/"
path - os.path path objects, 'raw' and 'clean', per-table
names - lists of string column names, per-table, original and reduced
extensions - dict mapping table names to file extensions, per-table
columnTypes - dicts mapping table column names to appropriate types
localDb - dict with these key - value pairs:
client - pymongo.MongoClient()
database - pymongo.MongoClient().capstone
collections - dict mapping table names to suitable mongoDB collections
Instanced class data:
--------------------
localFiles - dict, per-table keys for lists of local 'raw' and 'clean'
filenames
Class methods:
-------------
updateLocalFilesIndex()
clearLocalFilesIndex()
showLocalFiles()
wipeLocalFiles()
extensionToTableName()
isFileDownloaded()
downloadGDELTFile()
downloadGDELTDay()
cleanFile()
cleanTable()
mongoFile()
mongoTable()
'''
# A01 - shared class data
toolData = {}
# A01a - backup path specification
# Failsafe path for local main project directory. Must be changed to suit
# location of any given end-user's 'script' directory in case directory
# 'GDELTdata' is not present one directory up.
toolData['projectPath'] = 'C:\\Users\\urf\\Projects\\WGU capstone'
# Controls generation of datafile download URLs in downloadGDELTDay()/File()
toolData['URLbase'] = "http://data.gdeltproject.org/gdeltv2/"
# Used in forming URLs for datafile download
toolData['extensions'] = {
'events' : "export.CSV.zip",
'gkg' : "gkg.csv.zip",
'mentions' : "mentions.CSV.zip",
}
# These paths are set relative to the location of this script, one directory
# up, in 'GDELTdata', parallel to the script directory.
toolData['path'] = {}
toolData['path']['base']= os.path.join(os.path.abspath(__file__),
os.path.realpath('..'),
'GDELTdata')
toolData['path']['events'] = {
'table': os.path.join(toolData['path']['base'], 'events'),
'raw': os.path.join(toolData['path']['base'], 'events', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'events', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'events',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'events',
'realtimeClean')
}
toolData['path']['gkg'] = {
'table': os.path.join(toolData['path']['base'], 'gkg'),
'raw': os.path.join(toolData['path']['base'], 'gkg', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'gkg', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'gkg',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'gkg',
'realtimeClean')
}
toolData['path']['mentions'] = {
'table': os.path.join(toolData['path']['base'], 'mentions'),
'raw': os.path.join(toolData['path']['base'], 'mentions', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'mentions', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'mentions',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'mentions',
'realtimeClean')
}
# These mappings and lists are for recognition of all possible
# column names, and the specific discarding of a number of columns
# which have been predetermined as unnecessary in the context of
# simple EDA.
toolData['names'] = {}
toolData['names']['events'] = {
'original' : [
'GLOBALEVENTID',
'Day',
'MonthYear',
'Year',
'FractionDate',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1KnownGroupCode',
'Actor1EthnicCode',
'Actor1Religion1Code',
'Actor1Religion2Code',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2KnownGroupCode',
'Actor2EthnicCode',
'Actor2Religion1Code',
'Actor2Religion2Code',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'GoldsteinScale',
'NumMentions',
'NumSources',
'NumArticles',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_CountryCode',
'Actor1Geo_ADM1Code',
'Actor1Geo_ADM2Code',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor1Geo_FeatureID',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_CountryCode',
'Actor2Geo_ADM1Code',
'Actor2Geo_ADM2Code',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'Actor2Geo_FeatureID',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_CountryCode',
'ActionGeo_ADM1Code',
'ActionGeo_ADM2Code',
'ActionGeo_Lat',
'ActionGeo_Long',
'ActionGeo_FeatureID',
'DATEADDED',
'SOURCEURL',
],
'reduced' : [
'GLOBALEVENTID',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_Lat',
'ActionGeo_Long',
'DATEADDED',
'SOURCEURL',
],
}
toolData['names']['gkg'] = {
'original' : [
'GKGRECORDID',
'V21DATE',
'V2SourceCollectionIdentifier',
'V2SourceCommonName',
'V2DocumentIdentifier',
'V1Counts',
'V21Counts',
'V1Themes',
'V2EnhancedThemes',
'V1Locations',
'V2EnhancedLocations',
'V1Persons',
'V2EnhancedPersons',
'V1Organizations',
'V2EnhancedOrganizations',
'V15Tone',
'V21EnhancedDates',
'V2GCAM',
'V21SharingImage',
'V21RelatedImages',
'V21SocialImageEmbeds',
'V21SocialVideoEmbeds',
'V21Quotations',
'V21AllNames',
'V21Amounts',
'V21TranslationInfo',
'V2ExtrasXML',
],
'reduced' : [
'GKGRECORDID',
'V21DATE',
'V2SourceCommonName',
'V2DocumentIdentifier',
'V1Counts',
'V1Themes',
'V1Locations',
'V1Persons',
'V1Organizations',
'V15Tone',
],
}
toolData['names']['mentions'] = {
'original' : [
'GLOBALEVENTID',
'EventTimeDate',
'MentionTimeDate',
'MentionType',
'MentionSourceName',
'MentionIdentifier',
'SentenceID', #
'Actor1CharOffset',#
'Actor2CharOffset',#
'ActionCharOffset',#
'InRawText',
'Confidence',
'MentionDocLen', #
'MentionDocTone',
'MentionDocTranslationInfo', #
'Extras', #
],
'reduced' : [
'GLOBALEVENTID',
'EventTimeDate',
'MentionTimeDate',
'MentionType',
'MentionSourceName',
'MentionIdentifier',
'InRawText',
'Confidence',
'MentionDocTone',
],
}
# These mappings are used in automated dtype application to Pandas
# DataFrame collections of GDELT records, part of preprocessing.
toolData['columnTypes'] = {}
toolData['columnTypes']['events'] = {
'GLOBALEVENTID' : type(1),
'Actor1Code': pd.StringDtype(),
'Actor1Name': pd.StringDtype(),
'Actor1CountryCode': pd.StringDtype(),
'Actor1Type1Code' : pd.StringDtype(),
'Actor1Type2Code' : pd.StringDtype(),
'Actor1Type3Code' : pd.StringDtype(),
'Actor2Code': pd.StringDtype(),
'Actor2Name': pd.StringDtype(),
'Actor2CountryCode': pd.StringDtype(),
'Actor2Type1Code' : pd.StringDtype(),
'Actor2Type2Code' : pd.StringDtype(),
'Actor2Type3Code' : pd.StringDtype(),
'IsRootEvent': type(True),
'EventCode': pd.StringDtype(),
'EventBaseCode': pd.StringDtype(),
'EventRootCode': pd.StringDtype(),
'QuadClass': type(1),
'AvgTone': type(1.1),
'Actor1Geo_Type': type(1),
'Actor1Geo_FullName': pd.StringDtype(),
'Actor1Geo_Lat': pd.StringDtype(),
'Actor1Geo_Long': pd.StringDtype(),
'Actor2Geo_Type': type(1),
'Actor2Geo_FullName': pd.StringDtype(),
'Actor2Geo_Lat': pd.StringDtype(),
'Actor2Geo_Long': pd.StringDtype(),
'ActionGeo_Type': type(1),
'ActionGeo_FullName': pd.StringDtype(),
'ActionGeo_Lat': pd.StringDtype(),
'ActionGeo_Long': pd.StringDtype(),
'DATEADDED' : pd.StringDtype(),
'SOURCEURL': pd.StringDtype(),
}
toolData['columnTypes']['gkg'] = {
'GKGRECORDID' : pd.StringDtype(),
'V21DATE' : pd.StringDtype(),
'V2SourceCommonName' : pd.StringDtype(),
'V2DocumentIdentifier' : pd.StringDtype(),
}
toolData['columnTypes']['mentions'] = {
'GLOBALEVENTID' : type(1),
'EventTimeDate' : pd.StringDtype(),
'MentionTimeDate' : pd.StringDtype(),
'MentionType' : pd.StringDtype(),
'MentionSourceName' : | pd.StringDtype() | pandas.StringDtype |
#!/usr/bin/env python
#CITATION https://www.biorxiv.org/content/10.1101/540229v1
#####IMPORT ALL NECESSARY MODULES#####
import sys, ast, json
import os
import argparse
import itertools
import ntpath
import numbers
import decimal
import sys, os
import pymol
from pymol import cmd
from pymol.cgo import *
from pymol.vfont import plain
import ntpath
import pandas as pd #Windows users: I copy-paste the pandas,dateutil and pyltz folders from anaconda 2!! into the site-packages folder of pymol(only for temporary use, other wise it gets confused with the paths of the packages)
import numpy as np
from pandas import Series
from math import isnan
#Biopython
from Bio.Alphabet import generic_protein
from Bio import SeqRecord,Alphabet,SeqIO
from Bio.SeqIO import SeqRecord
from Bio.Seq import Seq
from Bio import SeqIO
import Bio.PDB as PDB
from Bio.Seq import MutableSeq
from Bio.PDB.Polypeptide import is_aa
from Bio.SeqUtils import seq1
from Bio.SeqRecord import SeqRecord
from Bio.pairwise2 import format_alignment
#Readline
try:
import readline #Linux
except ImportError:
import pyreadline as readline #Windows
import rlcompleter
#Command Line Arguments
parser = argparse.ArgumentParser()
#COMPULSORY files: Give error if not present
parser.add_argument("--PDB",required = True,help ='Path to Protein DataBank file (use "" if errors)')
parser.add_argument('--Gene',required = True,help = 'Path to Single coding nucleotide/amino acid sequence or multiple alignment of coding nucleotide/amino acid sequences')
parser.add_argument('--M8',required =True,help ='Path to Out file from the Codeml model M8')
#parser.add_argument('--Full_PDB_sequence',help = 'Path to the Complete Fasta file of the PDB sequence (download from RCSB PDB as well), if we want to use the domain positions from here')
#OPTIONAL arguments:
parser.add_argument('--Full_PDB_sequence',help = 'Path to the Complete Fasta file of the PDB sequence (download from RCSB PDB as well), if we want to use the domain positions from here',default='no')
parser.add_argument('--prosite',help = 'Use prosite to identify domain positions in the PDB file sequence, default=yes', default='yes')
parser.add_argument('--domains',help = 'List of funcional domain positions retrieved from the PDB protein webpage starting in index 1 eg "[1,4,6]" or "list(range(1,30))+list(range(70,80))" (quotes are essential!), default=[]', default=[])
parser.add_argument('--file_list_domains',help = 'Path to file with functional domain positions in format 1\n1\n , default = []', default=[] )
parser.add_argument('--file_domains',help = 'Path to file with protein domains sequences (all in same format as the gene file), default = []', default=[] )
parser.add_argument('--format', help = 'Sequence or Multiple Alignment File Format (default fasta), do not write it with quotes',default = 'fasta')
parser.add_argument("--prob", help = 'Choice of level of posterior probability on the sites, 95% or 99% from M8 Out file', default= int(99)) # prob : Posterior probability percentage of the positive sites , 99 or 95, default= 99
parser.add_argument("--missing_data", help = 'Decide if the missing data("N") should be removed from the nucleotide sequence, default = yes, recommended for the alignment', default='yes') # missing_data : remove or not missing data from the sequence alignment: yes or no, default=yes
parser.add_argument("--sequence_number", help = 'Number of the sequence in the multiple alignment file', default=int(0)) # sequence_number : When using a multiple alignment file indicate with sequence number to use, if single sequence use 0, default = 0
parser.add_argument("--print_alignment",help= 'Choose to visualize the PDB file sequence aligned with the gene, default = no', default='no')
parser.add_argument("--PDB_chains",help= 'Chain(s) to extract from the PDB sequence(s), write separated by spaces: A B C, default = all ',nargs = '*',default='all')
args = parser.parse_args()
PDB_file,Full_PDB_sequence, Gene, M8,domains,List_domains,File_domains,Gene_file_format,prob,missing_data,sequence_number,print_alignment,chains,prosite = [args.PDB, args.Full_PDB_sequence,args.Gene, args.M8,args.domains,args.file_list_domains,args.file_domains,args.format,args.prob,args.missing_data,args.sequence_number,args.print_alignment,args.PDB_chains,args.prosite]
sequence_number = eval(sequence_number)
try:
sequence_number=eval(sequence_number)
except:
pass
try: #LINUX USERS
prob =eval(prob)
except:
pass
#Check all the necessary files are present and the right arguments are given:
File_formats = ['fasta','phylip_sequential','clustal','embl','genebank','gb','abi','ace','fastq-sanger','fastq','fastq-solexa','fastq-illumina','ig','imgt','pdb-seqres','pdb-atom','phd','phylip','pir','seqxml','sff','stockholm','swiss','tab','qual','uniprot-xml']
if not PDB_file or not Gene or not M8 :
parser.error('Missing one of the 43required arguments: PDB file / Gene / Out file ')
elif prob not in [95,99]:
parser.error('Probability values can only be 95 or 99')
elif missing_data not in ['no','yes']:
parser.error('Choose to keep missing data: yes or to remove it: no')
elif not isinstance(sequence_number, numbers.Number):
parser.error('The number of the sequence in the alignment needs to be an integer')
elif Gene_file_format not in File_formats:
parser.error('Invalid Gene File Format.Check available SeqIO biopython file formats ')
elif List_domains and File_domains:
parser.error('Duplicated information, choose either --domains or --file_domains')
else:
if not List_domains and not File_domains:
print('Not List of Domains or Path to Text file with domains specified, using Prosites')
print('Building dataframe...')
else:
print('Building dataframe...')
def Global_alignment(chain_A,chain_B):
'''Global alignment between 2 given chains(str format)'''
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
alignment_global = pairwise2.align.globalms(chain_A, chain_B, 2, -1, -5, -.1)
#print(format_alignment(*alignment_global[0])) #print the alignment if desired
alignment_info_global = alignment_global[0]
Aligned_3u84, Aligned_ENST00000337652, score, begin, end = alignment_info_global
return alignment_info_global,alignment_global
def Local_alignment(chain_A,chain_B):
'''Local alignment between 2 given chains'''
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
alignment_local = pairwise2.align.localms(chain_A, chain_B, 2, -1, -5, -.1) ##0.5 points are deducted when opening a gap, and 0.1 points are deducted when extending it.
#print(format_alignment(*alignment_global[0]))
alignment_info_local = alignment_local[0]
Aligned_A, Aligned_B, score, begin, end = alignment_info_local
return alignment_info_local,alignment_local
def fasta_to_sequence(Fasta_file,Format):
'''Extract single sequence from file'''
from Bio import SeqRecord, SeqIO
fasta_sequences = SeqIO.parse(open(Fasta_file), Format)
for fasta in fasta_sequences:
name, sequence = fasta.id, str(fasta.seq)
return sequence,name
def fasta_to_sequences(Fasta_file,Format):
'''Same as before, can be merged with previous function'''
from Bio import SeqRecord, SeqIO
List_of_sequences = []
List_of_names = []
fasta_sequences = SeqIO.parse(open(Fasta_file),Format)
for fasta in fasta_sequences:
name, sequence = fasta.id, str(fasta.seq)
List_of_sequences.append(sequence)
List_of_names.append(name)
return List_of_sequences,List_of_names
def validate(seq, alphabet='dna'):
"""
Check that a sequence only contains values from DNA alphabet """
import re
alphabets = {'dna': re.compile('^[acgtn]*$', re.I),
'protein': re.compile('^[acdefghiklmnpqrstvwy]*$', re.I)}
if alphabets[alphabet].search(seq) is not None:
return True
else:
return False
def Translate_sequence(Fasta_file,Format,sequence_number): #Only for coding nucleotide sequences!!!!
from Bio.Alphabet import IUPAC,ProteinAlphabet
if sequence_number != 0: #if is not the first sequence
sequence = fasta_to_sequences(Fasta_file,Format)[0][sequence_number]
if validate(sequence) == True: #Check if is a nucleotided sequence
aa_seq = Seq(sequence).translate(stop_symbol="X")
else:
aa_seq=Seq(sequence)
else: #first sequence in the alignment/file
sequence= fasta_to_sequence(Fasta_file,Format)[0]
if validate(sequence) == True:
aa_seq = Seq(sequence).translate(stop_symbol="X")
else:
aa_seq = Seq(sequence)
return aa_seq
def Translate_and_Remove_missing_data(Fasta_file,Format,sequence_number):
'''Remove the missing data ('X') from the sequences after being translated, otherwise the codons are affected'''
if sequence_number != 0:
sequence =fasta_to_sequences(Fasta_file,Format)[0][sequence_number]
#clean_sequence = Seq(sequence).translate(stop_symbol="X")
#clean_sequence = clean_sequence.ungap('X')
if validate(sequence) == True:
clean_sequence = Seq(sequence).translate(stop_symbol="X")
clean_sequence = clean_sequence.ungap('X')
else:
clean_sequence = Seq(sequence).ungap('X') #maybe ungap
else:
sequence=fasta_to_sequence(Fasta_file,Format)[0]
if validate(sequence) == True:
clean_sequence = Seq(sequence).translate(stop_symbol="X")
clean_sequence = clean_sequence.ungap('X')
else:
clean_sequence = Seq(sequence).ungap('X') #maybe ungap
return clean_sequence
def Extract_sequence_from_PDB(PDB_file,chains):
''' Returns both the sequence contained in the PDB file and the residues coordinates for the desired chains'''
parser = PDB.PDBParser()
Name = ntpath.basename(PDB_file).split('.')[0]
#Name = PDB_file.split('/')[-1].split('.')[0]
structure = parser.get_structure('%s' %(Name),PDB_file)
############## Iterating over residues to extract all of them even if there is more than 1 chain
sequence = []
Residues_ID = []
if chains == 'all':
for chain in structure.get_chains():
for residue in chain:
if is_aa(residue.get_resname(), standard=True):
sequence.append(residue.get_resname())
Residues_ID.append(residue.get_id()[1])
else :
accumulated_residues = []
accumulated_ids = []
for letter in chains:
try: #in case the chain requested does not exits
for residue in structure[0][letter]:
#print(letter)
if is_aa(residue.get_resname(), standard=True):
accumulated_residues.append(residue.get_resname())
accumulated_ids.append(residue.get_id()[1])
except:
pass
sequence.append(''.join(accumulated_residues))
Residues_ID.append(accumulated_ids)
joined_sequence = ''.join(sequence)
PDB_sequence = seq1(joined_sequence) #3 letter code into 1 letter code
try:
Residues_ID = list(itertools.chain.from_iterable(Residues_ID))
except:
pass
return Residues_ID,PDB_sequence
def equivalent_positions(chain_A, chain_B,Aligned_A, Aligned_B,Residues_ID = None, Domain_Positions =None): #chainA = PDB #chainB=Transcript #Positions = List of Positive Selected sites (optional argument because we will use this function in 2 manners)
''' This function returns the corresponding coordinates of the gene residues in the PDB sequence, the domain positions in the PDB sequence
or among the gene with missing data and without'''
import numpy as np
#OriginalA = np.array(range(1, len(chain_A))) #Array of positions of the first given sequence
##Equivalent positions
List = [] #Will store the Positions where there are no gaps in the aligned sequences, these are still not the PDB positions !!
for index,residue in enumerate(Aligned_A):
if Aligned_A[index] != '-' and Aligned_B[index] != '-':
#OriginalA[:index] += 1
List.append(index +1) #In index 1 for PDB file
else:
pass
Final_Positions = ['nan']*len(chain_B) #should have the lenght of the gene. If equivalence is found nana will be replaced with the equivalent PDB positions
#####Finding the equivalent PDB Position of the residues in the gene transcript ignoring the GAPS generated in the alignment#######
Position_in_PDB =[]
gaps_first_segment = ''.join(Aligned_A[0:List[0]]).count('-')
Position_in_PDB.append(List[0] - gaps_first_segment)
#Position_in_PDB.append(List[1] - gaps_first_segment - ''.join(Aligned_A[List[0]:List[1]]).count('-'))
accumulated_number_gaps_in_this_segment = gaps_first_segment
for i in range(0,len(List)): #we skip the first interval, already performed !
try:
accumulated_number_gaps_in_this_segment += ''.join(Aligned_A[List[i]:List[i+1]]).count('-')
Position_in_PDB.append(List[i+1] - accumulated_number_gaps_in_this_segment)
except:
pass
#####Finding out the equivalent positions in the gene transcript of the PDB residues######: Same thing the other way round
Position_in_Transcript = []
gaps_first_segment_Transcript = ''.join(Aligned_B[0:List[0]]).count('-')
Position_in_Transcript.append(List[0] - gaps_first_segment_Transcript)
#Position_in_Transcript.append(List[1] - gaps_first_segment_Transcript - ''.join(Aligned_B[List[0]:List[1]]).count('-'))
accumulated_number_gaps_in_this_segment_transcript = gaps_first_segment_Transcript
for i in range(0, len(List)): # we skip the first interval
try:
accumulated_number_gaps_in_this_segment_transcript += ''.join(Aligned_B[List[i]:List[i + 1]]).count('-')
Position_in_Transcript.append(List[i+1] - accumulated_number_gaps_in_this_segment_transcript) # plus on otherwise negative numbers
except:
pass
Equivalent_Domain_positions = []
if not Domain_Positions:
for position_transcript,position_PDB in zip(Position_in_Transcript,Position_in_PDB): #this are lists of the correspondance on both sides
if Residues_ID: #residues ID has the same length as the PDB crystallized sequence
Final_Positions[position_transcript - 1] = Residues_ID[position_PDB - 1]
else:
Final_Positions[position_transcript-1] = position_PDB
return Final_Positions
else:
for position_transcript, position_PDB in zip(Position_in_Transcript, Position_in_PDB):
Final_Positions[position_transcript - 1] = position_PDB
for residue_id, domain_position in zip( Residues_ID,Domain_Positions): #Finding the correspondant coordinates of the functional domains in the PDB file structure
try:
specific_corresponding_domain_position = Final_Positions[domain_position - 1]
try:
Equivalent_Domain_positions.append(Residues_ID[specific_corresponding_domain_position - 1])
except:
pass
except:
pass
return Equivalent_Domain_positions
def List_of_positions_of_Positive_Sites(file,prob): # file = Out file from M8 #prob = 95 or 99
''' Reads M8 from M7vsM8 Codeml test: Will return the Positions of the selected sites with regards to the ALIGNMENT, later use function to find the equivalent positions in the clean from missing data sequence'''
length_alignment = []
with open(file, 'r') as f:
data = f.read().split('\n')
positions = []
line_start = []
line_stop = [] # Line number reference to stop collecting the info related to positive selected sites
for number, line in enumerate(data, 1):
if 'Bayes Empirical Bayes (BEB) analysis (Yang, Wong & Nielsen 2005. Mol. Biol. Evol. 22:1107-1118)' in line:
line_start = number
if 'The grid' in line:
line_stop = number
if 'Bayes Empirical Bayes (BEB) analysis (Yang, Wong & Nielsen 2005. Mol. Biol. Evol. 22:1107-1118)' in data:
diff = line_stop - line_start - 3
for i in range(6, int(diff)): # Start at line 6 after match
position = data[data.index(
'Bayes Empirical Bayes (BEB) analysis (<NAME> & Nielsen 2005. Mol. Biol. Evol. 22:1107-1118)') + i]
if prob == 99 :
if str(position.split()[2]).endswith('**'): # * > 95% confidence ** > 99 confidence
# print(position)
position = position.split()[0]
positions.append(int(position)) # Needs to be an int for later comparison
else:
if str(position.split()[2]).endswith('*'): # * > 95% confidence ** > 99 confidence
# print(position)
position = position.split()[0]
positions.append(int(position)) # Needs to be an int for later comparison
return positions
def Corresponding_positions_missing_notmissing_data(Missing_data,Clean): #Missing_data = WITH missing data ; Clean = NO missing data
''' Returns list of the equivalent positions among 2 sequences, in this case the same sequence with and without missing data'''
alignment_info_global, alignment_global = Global_alignment(Missing_data,Clean)
Aligned_A, Aligned_B, score, begin, end = alignment_info_global #A = Missing data ('N'); B = Not missing data
#print(format_alignment(*alignment_global[0]))
List = equivalent_positions(Missing_data,Clean, Aligned_A, Aligned_B) #All corresponding positions of the sequence with missing data to the sequence without missing data
return List
def Corresponding_functional_positions(PDB,Full_PDB_sequence,Residues_ID,functional_list):
alignment_info_global, alignment_global = Global_alignment(PDB,Full_PDB_sequence) #CHECK THIS is in the right order and it should be the other way round
Aligned_A, Aligned_B, score, begin, end = alignment_info_global
# print(format_alignment(*alignment_global[0]))
List = equivalent_positions(PDB,Full_PDB_sequence,Aligned_A,Aligned_B,Residues_ID=Residues_ID,Domain_Positions=functional_list) # All corresponding functional domain positions in the PDB
return List
def Corresponding_Coordinates_and_labels_PDB_Gene(PDB,Gene,Full_PDB_sequence,List_of_Positive_Positions,functional_list,Residues_ID,basepath,print_alignment,Clean_positions = None):
'''Performs the alignments, retrieves the equivalent coordinates and labels among the PDB sequence and the gene for each of the PDB positions.
Produces a dataframe with different labels for each position, where 'Not'refers to not being positively selected and not functional domain,
'Selected' stands for positive selected, 'Domain', states that it belongs to the functional domain or 'Selected_and_Domain' which stands for both'''
alignment_info_global, alignment_global = Global_alignment(PDB, Gene)
Aligned_A, Aligned_B, score, begin, end = alignment_info_global #A = PDB; B = Gene(with or without missing data)
if print_alignment == 'yes':
print(format_alignment(*alignment_global[0]))
else:
pass
List_positions = list(range(1, len(Gene) + 1))
#Extract the corresponding positions of the positive selected sites in the clean of missing data gene sequence
List_positive_positions = []
if Clean_positions: #If the gene sequence has been cleaned and we have the corresponding positions
for element in List_of_Positive_Positions:
try:
List_positive_positions.append(Clean_positions.index(element)) #Append the index of the residue that has the same id as the one in positive selected sites
List_positive_positions.append(Clean_positions.index(element)) #Append the index of the residue that has the same id as the one in positive selected sites
except:
pass
else: #we don't do anything
List_positive_positions = List_of_Positive_Positions
#For the dataframe we can label the positions from 1 to length of gene sequence (with or without missing data)
positions_dataframe = pd.DataFrame(pd.Series(List_positions))
positions_dataframe.rename(columns={positions_dataframe.columns[0]: "Gene_Position"}, inplace=True)
List = equivalent_positions(PDB, Gene, Aligned_A, Aligned_B,Residues_ID = Residues_ID) #List of equivalent positions of each of the gene residues into the PDB structure(residues ID!)
###The functional domains require also processing to find their equivalent residues in the PDB
if prosite=='yes' or Full_PDB_sequence == 'no':
functional_list = [Residues_ID[index - 1] for index in functional_list]
else:
alignment_info_global, alignment_global = Global_alignment(PDB, Full_PDB_sequence)
Aligned_A, Aligned_B, score, begin, end = alignment_info_global # A = PDB; B = Gene(with or without missing data)
functional_list = equivalent_positions(PDB,Full_PDB_sequence, Aligned_A, Aligned_B, Residues_ID=Residues_ID,Domain_Positions=functional_list)
#Add the positions to the dataframe
positions_dataframe['PDB_Position'] = List
Label_1 = positions_dataframe['Gene_Position'].isin(List_positive_positions) #Check if the position in the gene sequence is + selected
Label_2 = positions_dataframe['PDB_Position'].isin(functional_list) #Check if the PDB position is a functional domain
positions_dataframe['Label_1'] = Label_1 # Create a Column were the positive positions have a label True
positions_dataframe['Label_2'] = Label_2 #Create a column were the functional domain positions recieve the label True
positions_dataframe['Label_1'] = positions_dataframe['Label_1'].replace(True, 'Selected')
positions_dataframe['Label_1'] = positions_dataframe['Label_1'].replace(False, 'Not')
positions_dataframe['Label_2'] = positions_dataframe['Label_2'].replace(True, 'Domain')
positions_dataframe['Label_2'] = positions_dataframe['Label_2'].replace(False, 'Not')
positions_dataframe['Label'] = pd.Series(List_positions)
for index, row in positions_dataframe.iterrows():
if positions_dataframe.iloc[index, positions_dataframe.columns.get_loc('Label_1')] == 'Selected' and positions_dataframe.iloc[index, positions_dataframe.columns.get_loc('Label_2')] == 'Domain':
positions_dataframe.iloc[index, positions_dataframe.columns.get_loc('Label')] = 'Selected_and_Domain'
elif positions_dataframe.iloc[index, positions_dataframe.columns.get_loc('Label_1')] == 'Selected':
positions_dataframe.iloc[index, positions_dataframe.columns.get_loc('Label')] = 'Selected'
elif positions_dataframe.iloc[index, positions_dataframe.columns.get_loc('Label_2')] == 'Domain':
positions_dataframe.iloc[index, positions_dataframe.columns.get_loc('Label')] = 'Domain'
else:
positions_dataframe.iloc[index, positions_dataframe.columns.get_loc('Label')] = 'Not'
positions_dataframe.drop(['Label_1', 'Label_2'], 1, inplace=True)
Directory = os.path.dirname(basepath) # Obtain absolute path from the gene file, to create the dataframe there
#Gene_name = ntpath.basename(Gene)
PDB_name = ntpath.basename(basepath)
positions_dataframe.to_csv(Directory + "/%s_Positions" % (PDB_name), index=False, sep='\t')
print('Dataframe Ready at %s!' % (Directory))
return positions_dataframe
def Read_List_of_domains(List_domains,domains,File_domains,prosite):
"""Handles 3 possible types of input of the residues that conform the protein domain and returns their positions with regards to the Full sequence of the protein provided in the PDB website"""
if prosite == 'yes':
PDB_sequence = Extract_sequence_from_PDB(PDB_file,chains)[1]
"Returns positions in index 1"
import itertools
from Bio.ExPASy import ScanProsite
handle = ScanProsite.scan(seq=PDB_sequence) # can use a sequence (seq=) or a pdbID
result = ScanProsite.read(handle)
domains = []
for index, segment in enumerate(result, 1):
#print(segment)
#if 'score' in segment:
domains.append(list(range(segment['start'], segment['stop'])))
#else:
#pass
List_of_domains= list(itertools.chain.from_iterable(domains))
elif not File_domains:
if not domains:
if not List_domains:
List_of_domains = []
else:
List_of_domains = [line.rstrip('\n') for line in open(List_domains)]
List_of_domains = list(map(int, List_of_domains))
else:
# Convert the input list in string format to a string by 'executing' it
List_of_domains = eval(domains)
else:#If there is a file with the domain sequences we get the equivalent positions with regards to the complete PDB sequence, not the one in the PDB file yet
Full_PDB = ''.join(fasta_to_sequences(Full_PDB_sequence, 'fasta')[0])
Sequences_domains = ''.join(fasta_to_sequences(File_domains,Gene_file_format)[0])
###GLOBAL AlIGNMENT: Worse performance
# alignment_info_global, alignment_global = Global_alignment(Full_PDB,Sequences_domains)
# Aligned_Full, Aligned_Domain, score, begin, end = alignment_info_global # A = PDB; B = Gene(with or without missing data)
###LOCAL ALIGNMENT: Better results, since the domains are usually small sequences
alignment_info_local, alignment_local = Local_alignment(Full_PDB,Sequences_domains)
Aligned_Full, Aligned_Domain, score, begin, end = alignment_info_local # A = PDB; B = Gene(with or without missing data)
List_of_domains = equivalent_positions(Full_PDB_sequence,Sequences_domains,Aligned_Full,Aligned_Domain)
return List_of_domains
List_domains = Read_List_of_domains(List_domains,domains,File_domains,prosite) #Choose upon the 3 options to get the domain positions
def Wrapper_of_all_functions(PDB_file,Gene,Full_PDB_sequence,M8,List_Domains,Format,prob,Sequence_number,missing_data,print_alignment,chains):
'''Calling all the functions in the corresponding order/combination according to the optional arguments'''
Residues_ID,PDB_sequence = Extract_sequence_from_PDB(PDB_file,chains)
List_of_Positive_Positions =List_of_positions_of_Positive_Sites(M8,prob)
# Extract sequence no matter if single sequence or multiple alignment
Gene_missing_data = fasta_to_sequences(Gene, Format)[0][Sequence_number]
if Full_PDB_sequence == 'no' or prosite == 'yes': #Using Prosite for domains, no need of Full_PDB_sequence
if missing_data == 'no':
Clean_protein_sequence = Translate_and_Remove_missing_data(Gene, Format,Sequence_number) # Translate and remove missing data from the gene
Protein_missing_data = Translate_sequence(Gene, Format, Sequence_number) # Translate gene
Clean_positions = Corresponding_positions_missing_notmissing_data(Protein_missing_data,Clean_protein_sequence) # Find the equivalent positions among the protein with and without missing data
Dataframe = Corresponding_Coordinates_and_labels_PDB_Gene(PDB_sequence, Clean_protein_sequence,
Full_PDB_sequence, List_of_Positive_Positions,
List_domains, Residues_ID, PDB_file,
print_alignment, Clean_positions)
else: # Gene_missing_data is our sequence
Protein_missing_data = Translate_sequence(Gene, Format, Sequence_number) # Translate
Dataframe = Corresponding_Coordinates_and_labels_PDB_Gene(PDB_sequence, Protein_missing_data,
Full_PDB_sequence, List_of_Positive_Positions,
List_domains, Residues_ID, PDB_file,
print_alignment)
else:
Full_PDB_sequence = ''.join(fasta_to_sequences(Full_PDB_sequence, 'fasta')[0]) # We use all the chains in the file in this case, can be changed easily
#Checking if the user wants to perform the alignment with or without missing data in the gene
if missing_data == 'no':
Clean_protein_sequence = Translate_and_Remove_missing_data(Gene,Format,Sequence_number) #Translate and remove missing data from the gene
Protein_missing_data = Translate_sequence(Gene, Format, Sequence_number) # Translate gene
Clean_positions = Corresponding_positions_missing_notmissing_data(Protein_missing_data,Clean_protein_sequence) #Find the equivalent positions among the protein with and without missing data
Dataframe =Corresponding_Coordinates_and_labels_PDB_Gene(PDB_sequence,Clean_protein_sequence,Full_PDB_sequence, List_of_Positive_Positions,List_domains,Residues_ID,PDB_file,print_alignment, Clean_positions)
else: #Gene_missing_data is our sequence
Protein_missing_data = Translate_sequence(Gene,Format,Sequence_number) #Translate
Dataframe =Corresponding_Coordinates_and_labels_PDB_Gene(PDB_sequence,Protein_missing_data,Full_PDB_sequence,List_of_Positive_Positions,List_domains,Residues_ID,PDB_file,print_alignment)
return Dataframe
def Pymol():
'''Visualization program'''
#LAUNCH PYMOL
readline.parse_and_bind('tab: complete') # Set up path to pymol environment (in case is not installed)
# pymol launching: quiet (-q), without GUI (-c) and with arguments from command line
pymol.pymol_argv = ['pymol', '-q'] + sys.argv[1:]
pymol.finish_launching()
# Read User Input
sname = ntpath.basename(PDB_file) # Extract the filename without the path
# Dataframe_path = os.path.abspath(sys.argv[2]) # Dataframe generated by LYS.py
# Load Structures
pymol.cmd.load(PDB_file, sname)
pymol.cmd.disable("all") # toggles off the display of all currently visible representations of an object. It is the equivalent of deselecting the object
pymol.cmd.enable(sname)
def Colour_by_Selection(selection="all",
Selected="orange",
Not='grey50',
Domain='lime',
Selected_and_Domain='magenta',
):
colors = {
'Selected': Selected,
'Not': Not,
'Domain': Domain,
'Selected_and_Domain': Selected_and_Domain
}
#BACKGROUND & SHAPES
cmd.bg_color('white')
cmd.show_as('cartoon', 'all')
cmd.color('gray', 'all')
#ROTATION
cmd.rotate([0,1,0], angle = 210,selection = "all") # Commands to rotate the structures to visualize some specific side of the protein [x,y,z]
cmd.rotate([1, 0, 0], angle=-50, selection="all") #-20 for HRT2B
#ELIMINATING CHAINS
# Eliminating chains in the structure if desired
# cmd.select('chainA', 'chain A')
# cmd.remove('chain A')
#LEGEND
###The text that appears in the image, change placement accordingly
cgo = []
axes = [[5.0, 0.0, 0.0], [0.0, 5.0, 0.0],[0.0, 0.0, 5.0]] # Change the values if the protein does not quite fall into place
cyl_text(cgo, plain, [70.0, 50.0, 80.0], '%s' % (sname.split('.')[0]), radius=0.6, color=[0.0, 0.0, 0.0],axes=axes) # x=60 for RIOK2, x=40 and z=60 for ROS1
cyl_text(cgo, plain, [70.0, 40.0, 80.0], 'Positively Selected', radius=0.6, color=[1.0, 0.5, 0.0], axes=axes)
cyl_text(cgo, plain, [70.0, 30.0, 80.0], 'Not selected', radius=0.6, color=[0.5, 0.5, 0.5], axes=axes)
cyl_text(cgo, plain, [70.0, 20.0, 80.0], 'Functional Domain', radius=0.6, color=[0.5, 1.0, 0.5], axes=axes)
cyl_text(cgo, plain, [70.0, 10.0, 80.0], 'Both', radius=0.6, color=[1.0, 0.0, 1.0], axes=axes)
cmd.set("cgo_line_radius", 0.03) # 0.03
cmd.load_cgo(cgo, 'txt')
#ZOOM
cmd.zoom("all", -5.0) # Higher and positive values zoom out, it accepts negative values
#READ PREVIOUSLY CREATED DATAFRAME:
Data = Wrapper_of_all_functions(PDB_file, Gene, Full_PDB_sequence, M8, List_domains, Gene_file_format, prob,sequence_number, missing_data, print_alignment, chains)
#Option A: best alternative
Data['PDB_Position'] = Data['PDB_Position'].astype(np.float64) #Need to convert to float to use isfinite
Data = Data[np.isfinite(Data['PDB_Position'])] # Remove the Residues that got 'nan' value in their equivalent positions
position_phenotype_dict = | Series(Data.Label.values, index=Data.PDB_Position) | pandas.Series |
''' CODE TO CLEAN AND STANDARDIZE ALL DATA '''
import glob
import os
import pandas as pd
# filepath expressions for data
data_path = 'data'
#PATH_ALL_AGE = 'data/clean/*_age.csv'
PATH_ALL_AGE = os.path.join(data_path, 'clean', '*_age.csv')
#PATH_ALL_SEX = 'data/clean/*_sexrace.csv'
PATH_ALL_SEX = os.path.join(data_path, 'clean', '*_sexrace.csv')
#PATH_LAWS = 'data/clean/suppression.csv'
PATH_LAWS = os.path.join(data_path, 'clean', 'suppression.csv')
# useful constants for renaming and removing columns
SEX_COLUMNS = [
'STATE', 'Group', 'Population (18+)', 'Total Citizen', 'Percent Citizen',
'CI Citizen', 'Total Registered', 'Percent Registered (18+)',
'CI Registered', 'Total Voted', 'Percent Voted (18+)', 'CI Voted', 'Year'
]
AGE_COLUMNS = [
'STATE', 'Age', 'Total', 'Total Registered', 'Percent registered (18+)',
'CI Registered', 'Total Voted', 'Percent voted (18+)', 'CI Voted', 'Year'
]
ORIGINAL_SEX_GROUPS = [
'Total', 'Male', 'Female', 'N-H White', 'N-H Black', 'API', 'Hispanic',
'Non-Hispanic White', 'Non-Hispanic Black', 'Asian and Pacific Islander',
'White non-Hispanic alone', 'Black alone', 'Asian alone',
'Hispanic (of any race)'
]
NEW_SEX_GROUPS = [
'Total', 'Male', 'Female', 'White', 'Black', 'Asian & Pacific Islander',
'Hispanic', 'White', 'Black', 'Asian & Pacific Islander', 'White',
'Black', 'Asian & Pacific Islander', 'Hispanic'
]
# useful constants for state names and IDs
STATE_NAMES = [
'ALABAMA', 'ALASKA', 'ARIZONA', 'ARKANSAS', 'CALIFORNIA',
'COLORADO', 'CONNECTICUT', 'DELAWARE', 'DISTRICT OF COLUMBIA', 'FLORIDA',
'GEORGIA', 'HAWAII', 'IDAHO', 'ILLINOIS', 'INDIANA', 'IOWA', 'KANSAS',
'KENTUCKY', 'LOUISIANA', 'MAINE', 'MARYLAND', 'MASSACHUSETTS',
'MICHIGAN', 'MINNESOTA', 'MISSISSIPPI', 'MISSOURI', 'MONTANA', 'NEBRASKA',
'NEVADA', 'NEW HAMPSHIRE', 'NEW JERSEY', 'NEW MEXICO', 'NEW YORK',
'NORTH CAROLINA', 'NORTH DAKOTA', 'OHIO', 'OKLAHOMA', 'OREGON',
'PENNSYLVANIA', 'RHODE ISLAND', 'SOUTH CAROLINA', 'SOUTH DAKOTA',
'TENNESSEE', 'TEXAS', 'UTAH', 'VERMONT', 'VIRGINIA', 'WASHINGTON',
'WEST VIRGINIA', 'WISCONSIN', 'WYOMING', 'NATIONAL'
]
# note these integers come from US Census Bureau ordering
STATE_NUMS = [
1, 2, 4, 5, 6, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 44, 45, 46, 47, 48, 49, 50, 51, 53, 54, 55, 56, 0
]
STATES_TABLE = list(zip(STATE_NAMES, STATE_NUMS))
# punctuation symbols to handle in data cleaning
COMMA_SYMBOL = ','
PERIOD_SYMBOL = '.'
EMPTY_STR = ''
def get_age_df(file_path):
'''
Load age data by file name into pd.DataFrame and
return DataFrame object with select columns and cleaned
values.
Note: NaN values are kept for possible use in viz.
Args:
file_path: str, designating file to retrieve for a given year
Returns:
pd.DataFrame, processed age data for that year
'''
# load and subset data
df_age = pd.read_csv(file_path, header=0, names=AGE_COLUMNS)
# clean up format and unwanted punctuation
df_age['STATE'] = df_age['STATE'].str.upper()
df_age['Year'] = df_age['Year'].astype(str)
df_age['Age'] = df_age['Age'].map(lambda x: x.lstrip(PERIOD_SYMBOL))
df_age['Total'] = df_age['Total'].replace(
COMMA_SYMBOL,
EMPTY_STR,
regex=True
)
df_age['Total Registered'] = df_age['Total Registered'].replace(
COMMA_SYMBOL,
EMPTY_STR,
regex=True
)
df_age['Total Voted'] = df_age['Total Voted'].replace(
COMMA_SYMBOL,
EMPTY_STR,
regex=True
)
# type cast numeric data
df_age['Total'] = df_age['Total'].apply(
pd.to_numeric,
errors='coerce'
)
df_age['Total Registered'] = df_age['Total Registered'].apply(
pd.to_numeric,
errors='coerce'
)
df_age['Total Voted'] = df_age['Total Voted'].apply(
pd.to_numeric,
errors='coerce'
)
# finish
return df_age
def get_sexrace_df(file_path):
'''
Load sexrace data by file name into pd.DataFrame
and return DataFrame object with select columns and cleaned
values.
Note: NaN values are kept for possible use in viz.
Args:
file_path: str, file for which to retrieve sexrace data
Returns:
pd.DataFrame, processed sexrace data for that year
'''
# load and subset data
df_sex = | pd.read_csv(file_path, header=0, names=SEX_COLUMNS) | pandas.read_csv |
import os
from pathlib import Path
import sys
from time import strptime
import path_config
import requests
from bs4 import BeautifulSoup
import pandas as pd
class EspnTournament():
def __init__(self) -> None:
self.tournament_info = {
"tournament_id":"",
"tournament_name":"",
"tournament_date":"",
"tournament_purse":"",
"win_total":"",
"tournament_size":"",
"winner_name":"",
"winner_id":"",
"season_id":"",
}
def __getitem__(self, i):
return self.tournament_info[i]
def set_all_w(self, w_name, w_id, w_total):
self.tournament_info["winner_name"] = w_name
self.tournament_info["winner_id"] = w_id
self.tournament_info["win_total"] = w_total
def set_all_missing(self):
self.tournament_info["win_total"] = None
self.tournament_info["tournament_size"] = None
self.tournament_info["winner_name"] = None
self.tournament_info["winner_id"] = None
def get_tournament_id(self):
return self.tournament_info["tournament_id"]
def set_tournament_id(self, url):
"""Set tournament id from a url.
Parameters
----------
url : str
ESPN tournament url.
Examples
--------
>>> espn_t = EspnTournament()
>>> t_url = "https://www.espn.com/golf/leaderboard?tournamentId=3802"
>>> espn_t.set_tournament_id(t_url)
"""
t_id = url[url.rfind("=") + 1:]
self.tournament_info["tournament_id"] = t_id
def get_tournament_name(self):
return self.tournament_info["tournament_name"]
def set_tournament_name(self, tourn_meta):
"""Set tournament name from a tournament meta.
Parameters
----------
tournament_meta : element.Tag
child of Leaderboard__Header class to find tournament name.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_tournament_id(tourn_meta)
"""
tourn_name = tourn_meta.find("h1").text
self.tournament_info["tournament_name"] = tourn_name
def parse_espn_dates(self, date, identifier, b_identifier=True):
"""Parse for subset date of the original date
Parameters
----------
date : str
ESPN tournament date to parse.
identifier : str
Identifier to be searched for.
b_identifier : bool, optional
Flag to tell where subset search begins.
Returns
-------
str
Parsed ESPN date.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.parse_espn_dates("Oct 5-8 2018", "-")
"Oct 5"
"""
if b_identifier:
if date.find(identifier) != -1:
b_idx = date.find(identifier)
# Should return month
n_date = date[:b_idx].rstrip()
return n_date
else:
# special case of only one date in link
b_idx = date.find(",")
n_date = date[:b_idx]
return n_date
else:
if date.find(identifier) != -1:
a_idx = date.find(identifier)
# Should return day
return date[a_idx: ]
else:
print("Did not find identifier in string for: ", date)
def date_parser(self, date):
"""Reformat ESPN tournament date.
Parameters
----------
date : str
Date to parse.
Returns
-------
str
Reformatted ESPN date.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.date_parser("Oct 5-8 2018")
"10/5/2018"
"""
year = date[date.rfind(" ")+1:]
month_and_day = self.parse_espn_dates(date, "-")
day = self.parse_espn_dates(month_and_day, " ", b_identifier=False)
day = day.lstrip()
month = self.parse_espn_dates(month_and_day, " ", b_identifier=True)
month_abr = month[:3]
month_number = strptime(month_abr, "%b").tm_mon
date_str = str(month_number) + "/" + day + "/" + year
return date_str
def get_date(self):
return self.tournament_info["tournament_date"]
def set_date(self, tourn_meta):
"""Set tournament date from a tournament meta.
Parameters
----------
tourn_meta : element.Tag
child of Leaderboard__Header class.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_date(tourn_meta)
"""
tourn_date = tourn_meta.find("span").text
t_date = self.date_parser(tourn_date)
self.tournament_info["tournament_date"] = t_date
def get_tournament_purse(self):
return self.tournament_info["tournament_purse"]
def set_tournament_purse(self, tourn_header):
"""Set tournament purse from a tournament header.
Parameters
----------
tourn_header : element.Tag
Leaderboard__Header class.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_tournament_purse(tourn_header)
"""
purse_class = tourn_header.find("div", class_="n7 clr-gray-04").text
# string find method
purse_start = purse_class.find("$") + 1
if purse_class.find("D") != -1:
purse_end = purse_class.find("D")
purse = purse_class[purse_start:purse_end]
else:
purse = purse_class[purse_start:]
purse = purse.replace(",", "")
self.tournament_info["tournament_purse"] = purse
def get_winning_score(self):
return self.tournament_info["win_total"]
def set_winning_score(self, t_body):
"""Set winning score total from tournament body.
Parameters
----------
t_body : element.Tag
Child of ResponsiveTable.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_winning_score(t_body)
"""
# tournament winner's total's data
tourn_totals = t_body.find("td", class_="Table__TD")
if tourn_totals:
totals = tourn_totals.find_next_siblings()
if len(totals) == 9:
# selects 4 round (72 hole) total
total = totals[-3].text
self.tournament_info["win_total"] = total
else:
total = totals[-3].text
if len(total) == 0:
self.tournament_info["win_total"] = None
else:
self.tournament_info["win_total"] = total
def get_tournament_size(self):
return self.tournament_info["tournament_size"]
def set_tournament_size(self, t_body):
"""Set tournament size from tournament body.
Parameters
----------
t_body : element.Tag
Child of ResponsiveTable.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_tournament_size(t_body)
"""
players = t_body.find_all("tr", class_="Table__TR Table__even")
if players is not None:
num_players = len(players)
self.tournament_info["tournament_size"] = num_players
def get_winner_name(self):
return self.tournament_info["winner_name"]
def set_winner_name(self, t_body):
"""Set winner name from tournament body.
Parameters
----------
t_body : element.Tag
Child of ResponsiveTable.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_winner_name(t_body)
"""
winner = t_body.find("a")
if winner:
name = winner.text
self.tournament_info["winner_name"] = name
else:
self.tournament_info["winner_name"] = None
def get_winner_id(self):
return self.tournament_info["winner_id"]
def set_winner_id(self, t_body):
"""Set winner id from tournament body.
Parameters
----------
t_body : element.Tag
Child of ResponsiveTable.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_winner_id(t_body)
"""
winner = t_body.find("a")
if winner:
winner_id = winner["href"]
# substring start and end indexes
start_winner = winner_id.find("id/") + 3
end_winner = winner_id.rfind("/")
id = winner_id[start_winner:end_winner]
self.tournament_info["winner_id"] = id
else:
self.tournament_info["winner_id"] = None
def get_season_id(self):
return self.tournament_info["season_id"]
def set_season_id(self, s_id):
"""Set season identifier from s_id.
Parameters
----------
s_id : int
Season identifier to set.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_season_id(2018)
"""
self.tournament_info["season_id"] = s_id
class EspnSeason():
def __init__(self, start, end=None) -> None:
b_url = "https://www.espn.com/golf/schedule/_/season/"
if end is not None:
season_urls = [b_url + str(season) for season in range(start, end+1)]
self.end = end
else:
season_urls = [f"{b_url}{start}"]
self.end = None
self.start = start
self.season_urls = season_urls
self.season_data = []
def retrieve_tournament_info(self, t_url, s_id):
"""Retrieve tournament information from tournament url and season id.
Parameters
----------
t_url : str
Tournament url to extract information.
s_id : int
Season identifier.
Examples
--------
>>> tournament_url = "https://www.espn.com/golf/leaderboard?tournamentId=3802"
>>> espn_t.retrieve_tournament_info(tournament_url, 2017)
"""
espn_t = EspnTournament()
with requests.Session() as session:
page = session.get(t_url)
if page.status_code == 200:
soup = BeautifulSoup(page.content, "html.parser")
header = soup.find("div", class_="Leaderboard__Header")
mt4 = header.find_all("div", class_="mt4")
tourn_meta = mt4[-1]
espn_t.set_tournament_id(t_url)
espn_t.set_tournament_name(tourn_meta)
espn_t.set_date(tourn_meta)
espn_t.set_tournament_purse(header)
# Table's on webpage. index with -1 in case of playoff table
tourn_tables = soup.select("div.ResponsiveTable")
if tourn_tables:
# win_total, tournamnet_size, winner_name, winner_id
tourn_table = tourn_tables[-1]
tourn_body = tourn_table.find("tbody", class_="Table__TBODY")
espn_t.set_winning_score(tourn_body)
espn_t.set_tournament_size(tourn_body)
espn_t.set_winner_name(tourn_body)
espn_t.set_winner_id(tourn_body)
espn_t.set_season_id(s_id)
if espn_t.get_tournament_id() == "2277":
espn_t.set_all_w("<NAME>", "1037", "265")
else:
print(f"No div.ResponsiveTable, (Tournament {espn_t.get_tournament_id()} Cancelled)")
espn_t.set_all_missing()
espn_t.set_season_id(s_id)
self.season_data.append(espn_t)
def retrieve_season(self, season_url):
"""Retrieve season from season url.
Parameters
----------
season_url : str
Season url to extract information.
Examples
--------
>>> espn_s = EspnSeason(2018)
>>> season_url = "https://www.espn.com/golf/schedule/_/season/2018"
>>> espn_s.retrieve_season(season_url)
"""
with requests.Session() as session:
page = session.get(season_url)
if page.status_code == 200:
soup = BeautifulSoup(page.content, "html.parser")
season_table = soup.select("div.ResponsiveTable")
if season_table is not None:
season_body = season_table[0].find("tbody", class_="Table__TBODY")
tournaments = season_body.find_all("div", class_="eventAndLocation__innerCell")
if tournaments is not None:
for tournament in tournaments:
tournament_url = tournament.find("a")
if tournament_url:
t_url = tournament_url["href"]
print(f"Fetching {t_url} data")
season_id = season_url[season_url.rfind("/")+1 :]
self.retrieve_tournament_info(t_url, season_id)
else:
print(f"Error retrieving page. page status code: {page.status_code}")
def retrieve_all_seasons(self):
"""Retrieve all seasons set from constructor.
Examples
--------
>>> espn_s = EspnSeason(2018)
>>> espn_s.retrieve_all_seasons()
"""
for season in self.season_urls:
self.retrieve_season(season)
def feed_season_data(self):
"""Feed all season data held.
Returns
-------
pd.DataFrame
Season data in dataframe.
Examples
--------
>>> e_season = EspnSeason(2018)
>>> e_season.retrieve_all_seasons()
>>> df = e_season.feed_season_data()
"""
if self.season_data is not None:
data = [tournament.tournament_info for tournament in self.season_data]
df = pd.DataFrame(data)
df["tournament_purse"] = pd.to_numeric(df["tournament_purse"], downcast="integer")
df["win_total"] = | pd.to_numeric(df["win_total"], downcast="integer") | pandas.to_numeric |
"""
This script uses geopandas to place all addresses into CSDs.
This uses the digital boundary files, which extend into the water. This is deliberate so that addresses on coastlines are not accidentally dropped.
"""
import pandas as pd
import geopandas as gpd
from hashlib import blake2b
import sys
name_in = sys.argv[1]
name_out = sys.argv[2]
print(name_in)
prefix="/home/jovyan/data-vol-1/ODA/processing/temporary_files/"
df = pd.read_csv("{}{}".format(prefix,name_in), low_memory=False, dtype='str')
N1=len(df)
gdf = gpd.GeoDataFrame(
df, geometry=gpd.points_from_xy(df.LON.astype(float), df.LAT.astype(float)))
gdf.crs="EPSG:4326"
#read in Statcan boundary file
CSD = gpd.read_file("/home/jovyan/data-vol-1/ODA/processing/3-Spatial_Group/CSD/fixed_CSD.shp")
CSD=CSD[['CSDUID', 'CSDNAME','PRUID', 'geometry']]
#convert geometry of addresses to statcan geometry
gdf=gdf.to_crs(CSD.crs)
#perform spatial merge
gdf_csd=gpd.sjoin(gdf,CSD, op='within', how='left')
df= | pd.DataFrame(gdf_csd) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#########################################################################################
# Name: <NAME>
# Student ID: 64180008
# Department: Computer Engineering
# Assignment ID: A3
#########################################################################################
# In[2]:
import pandas as pd
import random
import numpy as np
# In[3]:
#########################################################################################
# QUESTION I
# Description: The parts of question are solved with using pd.Series and pd.DataFrame functions.
#########################################################################################
print("\n")
print("SOLUTION OF QUESTION I:Perform the following tasks with pandas Series")
# In[4]:
print("1.a")
a = pd.Series([7,11,13,17])
print(a,"\n")
# In[5]:
print("1.b")
a = pd.Series([100 for i in range(5)])
print(a,"\n")
# In[6]:
print("1.c")
a = pd.Series([random.randint(0,100) for i in range(20)])
print(a,"\n")
# In[7]:
print("1.d")
temperatures = pd.Series([98.6,98.9,100.2,97.9],index=['Julie','Charlie','Sam','Andrea'])
print(temperatures,"\n")
# In[8]:
print("1.e")
dictionary = {'Julie':98.6,
'Charlie':98.9,
'Sam':100.2,
'Andrea':97.9}
a = pd.Series(dictionary)
print(a,"\n")
# In[9]:
#########################################################################################
# QUESTION II
# Description: Parts of this question are solved with index information of pandas library.
#########################################################################################
print("\n")
print("SOLUTION OF QUESTION II:Perform the following tasks with pandas DataFrames")
# In[10]:
print("2.a")
temp = {'Maxine':37.5,'James':37.3,'Amanda':39.9}
temperatures = | pd.DataFrame({'temp':temp}) | pandas.DataFrame |
from bookcut.mirror_checker import main as mirror_checker
from bookcut.downloader import filename_refubrished
from bookcut.settings import path_checker
from bs4 import BeautifulSoup as Soup
import mechanize
import pandas as pd
import os
import requests
from tqdm import tqdm
RESULT_ERROR = "\nNo results found or bad Internet connection."
def search_downloader(file, href):
# search_downloader downloads the book
response = requests.get(href, stream=True)
total_size = int(response.headers.get("content-length"))
inMb = total_size / 1000000
inMb = round(inMb, 2)
filename = file
print("\nDownloading...\n", "Total file size:", inMb, "MB")
path = path_checker()
filename = os.path.join(path, filename)
# progress bar
buffer_size = 1024
progress = tqdm(
response.iter_content(buffer_size),
f"{file}",
total=total_size,
unit="B",
unit_scale=True,
unit_divisor=1024,
)
with open(filename, "wb") as f:
for data in progress:
# write data read to the file
f.write(data)
# update the progress bar manually
progress.update(len(data))
print("================================\nFile saved as:", filename)
def link_finder(link, mirror_used):
# link_ finder is searching Libgen for download link and filename
page = requests.get(link)
soup = Soup(page.content, "html.parser")
searcher = [a["href"] for a in soup.find_all(href=True) if a.text]
try:
filename = soup.find("input")["value"]
except TypeError:
filename = None
if searcher[0].startswith("http") is False:
searcher[0] = mirror_used + searcher[0]
results = [filename, searcher[0]]
return results
def search(term):
# This function is used when searching to LibGen with the command
# bookcut search -t "keyword"
url = mirror_checker()
if url is not None:
br = mechanize.Browser()
br.set_handle_robots(False) # ignore robots
br.set_handle_refresh(False) #
br.addheaders = [("User-agent", "Firefox")]
br.open(url)
br.select_form("libgen")
input_form = term
br.form["req"] = input_form
ac = br.submit()
html_from_page = ac
soup = Soup(html_from_page, "html.parser")
table = soup.find_all("table")[2]
table_data = []
mirrors = []
extensions = []
for i in table:
j = 0
try:
td = i.find_all("td")
for tr in td:
# scrape mirror links
if j == 9:
temp = tr.find("a", href=True)
mirrors.append(temp["href"])
j = j + 1
row = [tr.text for tr in td]
table_data.append(row)
extensions.append(row[8])
except:
pass
# Clean result page
for j in table_data:
j.pop(0)
del j[8:15]
headers = [
"Author(s)",
"Title",
"Publisher",
"Year",
"Pages",
"Language",
"Size",
"Extension",
]
try:
tabular = pd.DataFrame(table_data)
tabular.index += 1
tabular.columns = headers
print(tabular)
choices = []
temp = len(mirrors) + 1
for i in range(1, temp):
choices.append(str(i))
choices.append("C")
choices.append("c")
while True:
tell_me = str(
input(
"\n\nPlease enter a number from 1 to {number}"
' to download a book or press "C" to abort'
" search: ".format(number=len(extensions))
)
)
if tell_me in choices:
if tell_me == "C" or tell_me == "c":
print("Aborted!")
return None
else:
c = int(tell_me) - 1
results = [mirrors[c], extensions[c]]
return results
except ValueError:
print("\nNo results found or bad Internet connection.")
print("Please,try again.")
return None
else:
print("\nNo results found or bad Internet connection.")
print("Please,try again.")
def single_search():
def search(term):
# This function is used when searching to LibGen with the command
# bookcut search -t "keyword"
url = mirror_checker()
if url is not None:
br = mechanize.Browser()
br.set_handle_robots(False) # ignore robots
br.set_handle_refresh(False) #
br.addheaders = [("User-agent", "Firefox")]
br.open(url)
br.select_form("libgen")
input_form = term
br.form["req"] = input_form
ac = br.submit()
html_from_page = ac
soup = Soup(html_from_page, "html.parser")
table = soup.find_all("table")[2]
table_data = []
mirrors = []
extensions = []
for i in table:
j = 0
try:
td = i.find_all("td")
for tr in td:
# scrape mirror links
if j == 9:
temp = tr.find("a", href=True)
mirrors.append(temp["href"])
j = j + 1
row = [tr.text for tr in td]
table_data.append(row)
extensions.append(row[8])
except:
pass
# Clean result page
for j in table_data:
j.pop(0)
del j[8:15]
headers = [
"Author(s)",
"Title",
"Publisher",
"Year",
"Pages",
"Language",
"Size",
"Extension",
]
try:
tabular = | pd.DataFrame(table_data) | pandas.DataFrame |
import util
import pandas as pd
import re
import pygsheets
import hashlib
import datetime
class SheetCreator: # refactor! its sheet creator
"""
Pandas Form with init
Try to solve problem with add formula
Now call add_objects and object come with default formula (what of protected range?/)
"""
def __init__(self, title: object, header: object = None, formulas: object = None, folder_id: object = None) -> object:
self.title = title
self.header = header # - top row in sheet
self.formulas = formulas
self.folder_id = folder_id
self.df = pd.DataFrame(columns=header)
def check_shet_in_folder(self, pg, sheet_name):
# TODO shet -> sheet title ->tittle
for exist in pg.list_ssheets(parent_id=self.folder_id):
if exist['name'] == sheet_name:
return True
return False
def sync_update_sheet(self, pg):
"""
find same key and apdate values or add new key
:param pg:
:return:
"""
# TODO open by key
wks = pg.open(self.title)
def create_or_update(self, pg):
if self.check_in_folder(self.title):
return self.update_sheet(pg)
else:
return self.create_sheet(pg)
def create_sheet(self, pg):
# TODO to GDrive
#if c.list_ssheets(parent_id=self.folder_id)
sh = pg.create(title=self.title, parent_id=self.folder_id)
sh.sheet1.set_dataframe(df=self.df, start='A1')
return sh.sheet1
def update_sheet(self, wks):
# TODO to GDrive
wks.set_dataframe(df=self.df, start='A1')
def init_from(self, wks_df):
self.df = wks_df
self.header = self.df.columns
def load_from_template(self):
"""
Init from frame contain header, formulas..
:param wks_df:
:return:
"""
pass
def create_df(self, header=None): #&&???????
self.header = header
self.df = pd.DataFrame(columns=header)
def add_objects(self, key_col, key_val):
"""
Paste value and default formulas
:param key_col: [1,2,3]
:param key_val: [[1,2,3]]
:return:
"""
last_row = self.df.shape[0]
for i, val in enumerate(key_val):
formula = self.put_ind_to_formuls(self.formulas.copy(), i + last_row + 2)
new_row = | pd.DataFrame(data=[formula], columns=self.header) | pandas.DataFrame |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
dense = arr.to_dense()
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype('float32'))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0., 2.],
dtype=dtype.subtype),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0, 2], dtype=np.int64),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with pytest.raises(ValueError, match='NA'):
arr.astype('Sparse[i8]')
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True],
dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray([True, False, False, True],
dtype=SparseDtype(bool, False))
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.to_dense()),
vals.astype(typ))
@pytest.mark.parametrize('array, dtype, expected', [
(SparseArray([0, 1]), 'float',
SparseArray([0., 1.], dtype=SparseDtype(float, 0.0))),
(SparseArray([0, 1]), bool, SparseArray([False, True])),
(SparseArray([0, 1], fill_value=1), bool,
SparseArray([False, True], dtype=SparseDtype(bool, True))),
pytest.param(
SparseArray([0, 1]), 'datetime64[ns]',
SparseArray(np.array([0, 1], dtype='datetime64[ns]'),
dtype=SparseDtype('datetime64[ns]',
pd.Timestamp('1970'))),
marks=[pytest.mark.xfail(reason="NumPy-7619")],
),
(SparseArray([0, 1, 10]), str,
SparseArray(['0', '1', '10'], dtype=SparseDtype(str, '0'))),
(SparseArray(['10', '20']), float, SparseArray([10.0, 20.0])),
(SparseArray([0, 1, 0]), object,
SparseArray([0, 1, 0], dtype=SparseDtype(object, 0))),
])
def test_astype_more(self, array, dtype, expected):
result = array.astype(dtype)
tm.assert_sp_array_equal(result, expected)
def test_astype_nan_raises(self):
arr = SparseArray([1.0, np.nan])
with pytest.raises(ValueError, match='Cannot convert non-finite'):
arr.astype(int)
def test_set_fill_value(self):
arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# XXX: this seems fine? You can construct an integer
# sparsearray with NaN fill value, why not update one?
# coerces to int
# msg = "unable to set fill_value 3\\.1 to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 3.1
assert arr.fill_value == 3.1
# msg = "unable to set fill_value nan to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
# msg = "unable to set fill_value 0 to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 0
assert arr.fill_value == 0
# msg = "unable to set fill_value nan to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
@pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])
def test_set_fill_invalid_non_scalar(self, val):
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
msg = "fill_value must be a scalar"
with pytest.raises(ValueError, match=msg):
arr.fill_value = val
def test_copy(self):
arr2 = self.arr.copy()
assert arr2.sp_values is not self.arr.sp_values
assert arr2.sp_index is self.arr.sp_index
def test_values_asarray(self):
assert_almost_equal(self.arr.to_dense(), self.arr_data)
@pytest.mark.parametrize('data,shape,dtype', [
([0, 0, 0, 0, 0], (5,), None),
([], (0,), None),
([0], (1,), None),
(['A', 'A', np.nan, 'B'], (4,), np.object)
])
def test_shape(self, data, shape, dtype):
# GH 21126
out = SparseArray(data, dtype=dtype)
assert out.shape == shape
@pytest.mark.parametrize("vals", [
[np.nan, np.nan, np.nan, np.nan, np.nan],
[1, np.nan, np.nan, 3, np.nan],
[1, np.nan, 0, 3, 0],
])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_dense_repr(self, vals, fill_value):
vals = np.array(vals)
arr = SparseArray(vals, fill_value=fill_value)
res = arr.to_dense()
tm.assert_numpy_array_equal(res, vals)
with tm.assert_produces_warning(FutureWarning):
res2 = arr.get_values()
tm.assert_numpy_array_equal(res2, vals)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.to_dense()[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.to_dense()[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.to_dense()[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.to_dense()[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ], fill_value=0)
tm.assert_sp_array_equal(res, exp)
with pytest.raises(IndexError):
sparse[4:, :]
with pytest.raises(IndexError):
# check numpy compat
dense[4:, :]
def test_boolean_slice_empty(self):
arr = pd.SparseArray([0, 1, 2])
res = arr[[False, False, False]]
assert res.dtype == arr.dtype
@pytest.mark.parametrize("op", ["add", "sub", "mul",
"truediv", "floordiv", "pow"])
def test_binary_operators(self, op):
op = getattr(operator, op)
data1 = np.random.randn(20)
data2 = np.random.randn(20)
data1[::2] = np.nan
data2[::3] = np.nan
arr1 = SparseArray(data1)
arr2 = SparseArray(data2)
data1[::2] = 3
data2[::3] = 3
farr1 = SparseArray(data1, fill_value=3)
farr2 = SparseArray(data2, fill_value=3)
def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(op(first.to_dense(), second.to_dense()),
fill_value=first.fill_value)
assert isinstance(res, SparseArray)
assert_almost_equal(res.to_dense(), exp.to_dense())
res2 = op(first, second.to_dense())
assert isinstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.to_dense(), second)
assert isinstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
assert isinstance(res4, SparseArray)
# Ignore this if the actual op raises (e.g. pow).
try:
exp = op(first.to_dense(), 4)
exp_fv = op(first.fill_value, 4)
except ValueError:
pass
else:
assert_almost_equal(res4.fill_value, exp_fv)
assert_almost_equal(res4.to_dense(), exp)
with np.errstate(all="ignore"):
for first_arr, second_arr in [(arr1, arr2), (farr1, farr2)]:
_check_op(op, first_arr, second_arr)
def test_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
tm.assert_sp_array_equal(unpickled, obj)
_check_roundtrip(self.arr)
_check_roundtrip(self.zarr)
def test_generator_warnings(self):
sp_arr = SparseArray([1, 2, 3])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(action='always',
category=DeprecationWarning)
warnings.filterwarnings(action='always',
category=PendingDeprecationWarning)
for _ in sp_arr:
pass
assert len(w) == 0
def test_fillna(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0])
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan])
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
# float dtype's fill_value is np.nan, replaced by -1
s = SparseArray([0., 0., 0., 0.])
res = s.fillna(-1)
exp = SparseArray([0., 0., 0., 0.], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
# int dtype shouldn't have missing. No changes.
s = SparseArray([0, 0, 0, 0])
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
tm.assert_sp_array_equal(res, s)
s = SparseArray([0, 0, 0, 0], fill_value=0)
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=0)
tm.assert_sp_array_equal(res, exp)
# fill_value can be nan if there is no missing hole.
# only fill_value will be changed
s = SparseArray([0, 0, 0, 0], fill_value=np.nan)
assert s.dtype == SparseDtype(np.int64, fill_value=np.nan)
assert np.isnan(s.fill_value)
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
def test_fillna_overlap(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
# filling with existing value doesn't replace existing value with
# fill_value, i.e. existing 3 remains in sp_values
res = s.fillna(3)
exp = np.array([1, 3, 3, 3, 3], dtype=np.float64)
tm.assert_numpy_array_equal(res.to_dense(), exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(3)
exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
def test_nonzero(self):
# Tests regression #21172.
sa = pd.SparseArray([
float('nan'),
float('nan'),
1, 0, 0,
2, 0, 0, 0,
3, 0, 0
])
expected = np.array([2, 5, 9], dtype=np.int32)
result, = sa.nonzero()
tm.assert_numpy_array_equal(expected, result)
sa = pd.SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])
result, = sa.nonzero()
tm.assert_numpy_array_equal(expected, result)
class TestSparseArrayAnalytics:
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0)
])
def test_all(self, data, pos, neg):
# GH 17570
out = SparseArray(data).all()
assert out
out = SparseArray(data, fill_value=pos).all()
assert out
data[1] = neg
out = SparseArray(data).all()
assert not out
out = SparseArray(data, fill_value=pos).all()
assert not out
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0)
])
@td.skip_if_np_lt("1.15") # prior didn't dispatch
def test_numpy_all(self, data, pos, neg):
# GH 17570
out = np.all(SparseArray(data))
assert out
out = np.all(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.all(SparseArray(data))
assert not out
out = np.all(SparseArray(data, fill_value=pos))
assert not out
# raises with a different message on py2.
msg = "the \'out\' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.all(SparseArray(data), out=np.array([]))
@pytest.mark.parametrize('data,pos,neg', [
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0)
])
def test_any(self, data, pos, neg):
# GH 17570
out = SparseArray(data).any()
assert out
out = SparseArray(data, fill_value=pos).any()
assert out
data[1] = neg
out = SparseArray(data).any()
assert not out
out = SparseArray(data, fill_value=pos).any()
assert not out
@pytest.mark.parametrize('data,pos,neg', [
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0)
])
@td.skip_if_np_lt("1.15") # prior didn't dispatch
def test_numpy_any(self, data, pos, neg):
# GH 17570
out = np.any(SparseArray(data))
assert out
out = np.any(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.any(SparseArray(data))
assert not out
out = np.any(SparseArray(data, fill_value=pos))
assert not out
msg = "the \'out\' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.any(SparseArray(data), out=out)
def test_sum(self):
data = np.arange(10).astype(float)
out = SparseArray(data).sum()
assert out == 45.0
data[5] = np.nan
out = SparseArray(data, fill_value=2).sum()
assert out == 40.0
out = SparseArray(data, fill_value=np.nan).sum()
assert out == 40.0
def test_numpy_sum(self):
data = np.arange(10).astype(float)
out = np.sum(SparseArray(data))
assert out == 45.0
data[5] = np.nan
out = np.sum(SparseArray(data, fill_value=2))
assert out == 40.0
out = np.sum(SparseArray(data, fill_value=np.nan))
assert out == 40.0
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.sum(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.sum(SparseArray(data), out=out)
@pytest.mark.parametrize("data,expected", [
(np.array([1, 2, 3, 4, 5], dtype=float), # non-null data
SparseArray(np.array([1.0, 3.0, 6.0, 10.0, 15.0]))),
(np.array([1, 2, np.nan, 4, 5], dtype=float), # null data
SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0])))
])
@pytest.mark.parametrize("numpy", [True, False])
def test_cumsum(self, data, expected, numpy):
cumsum = np.cumsum if numpy else lambda s: s.cumsum()
out = cumsum(SparseArray(data))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=np.nan))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=2))
tm.assert_sp_array_equal(out, expected)
if numpy: # numpy compatibility checks.
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(SparseArray(data), out=out)
else:
axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid.
msg = "axis\\(={axis}\\) out of bounds".format(axis=axis)
with pytest.raises(ValueError, match=msg):
SparseArray(data).cumsum(axis=axis)
def test_mean(self):
data = np.arange(10).astype(float)
out = SparseArray(data).mean()
assert out == 4.5
data[5] = np.nan
out = SparseArray(data).mean()
assert out == 40.0 / 9
def test_numpy_mean(self):
data = np.arange(10).astype(float)
out = np.mean(SparseArray(data))
assert out == 4.5
data[5] = np.nan
out = np.mean(SparseArray(data))
assert out == 40.0 / 9
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.mean(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.mean(SparseArray(data), out=out)
def test_ufunc(self):
# GH 13853 make sure ufunc is applied to fill_value
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([1, np.nan, 2, np.nan, 2])
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=-1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2]))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0))
tm.assert_sp_array_equal(np.sin(sparse), result)
def test_ufunc_args(self):
# GH 13853 make sure ufunc is applied to fill_value, including its arg
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([2, np.nan, 3, np.nan, -1])
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([2, 0, 3, -1], fill_value=2)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray([2, 0, 1, -1], fill_value=1)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
@pytest.mark.parametrize('fill_value', [0.0, np.nan])
def test_modf(self, fill_value):
# https://github.com/pandas-dev/pandas/issues/26946
sparse = pd.SparseArray([fill_value] * 10 + [1.1, 2.2],
fill_value=fill_value)
r1, r2 = np.modf(sparse)
e1, e2 = np.modf(np.asarray(sparse))
tm.assert_sp_array_equal(r1, pd.SparseArray(e1, fill_value=fill_value))
tm.assert_sp_array_equal(r2, pd.SparseArray(e2, fill_value=fill_value))
def test_nbytes_integer(self):
arr = SparseArray([1, 0, 0, 0, 2], kind='integer')
result = arr.nbytes
# (2 * 8) + 2 * 4
assert result == 24
def test_nbytes_block(self):
arr = SparseArray([1, 2, 0, 0, 0], kind='block')
result = arr.nbytes
# (2 * 8) + 4 + 4
# sp_values, blocs, blenghts
assert result == 24
def test_asarray_datetime64(self):
s = pd.SparseArray(
pd.to_datetime(['2012', None, None, '2013'])
)
np.asarray(s)
def test_density(self):
arr = SparseArray([0, 1])
assert arr.density == 0.5
def test_npoints(self):
arr = SparseArray([0, 1])
assert arr.npoints == 1
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestAccessor:
@pytest.mark.parametrize('attr', [
'npoints', 'density', 'fill_value', 'sp_values',
])
def test_get_attributes(self, attr):
arr = SparseArray([0, 1])
ser = pd.Series(arr)
result = getattr(ser.sparse, attr)
expected = getattr(arr, attr)
assert result == expected
@td.skip_if_no_scipy
def test_from_coo(self):
import scipy.sparse
row = [0, 3, 1, 0]
col = [0, 3, 1, 2]
data = [4, 5, 7, 9]
sp_array = scipy.sparse.coo_matrix((data, (row, col)))
result = pd.Series.sparse.from_coo(sp_array)
index = pd.MultiIndex.from_arrays([[0, 0, 1, 3], [0, 2, 1, 3]])
expected = pd.Series([4, 9, 7, 5], index=index, dtype='Sparse[int]')
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_to_coo(self):
import scipy.sparse
ser = pd.Series([1, 2, 3],
index=pd.MultiIndex.from_product([[0], [1, 2, 3]],
names=['a', 'b']),
dtype='Sparse[int]')
A, _, _ = ser.sparse.to_coo()
assert isinstance(A, scipy.sparse.coo.coo_matrix)
def test_non_sparse_raises(self):
ser = pd.Series([1, 2, 3])
with pytest.raises(AttributeError, match='.sparse'):
ser.sparse.density
def test_setting_fill_value_fillna_still_works():
# This is why letting users update fill_value / dtype is bad
# astype has the same problem.
arr = SparseArray([1., np.nan, 1.0], fill_value=0.0)
arr.fill_value = np.nan
result = arr.isna()
# Can't do direct comparison, since the sp_index will be different
# So let's convert to ndarray and check there.
result = np.asarray(result)
expected = np.array([False, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_setting_fill_value_updates():
arr = SparseArray([0.0, np.nan], fill_value=0)
arr.fill_value = np.nan
# use private constructor to get the index right
# otherwise both nans would be un-stored.
expected = SparseArray._simple_new(
sparse_array=np.array([np.nan]),
sparse_index=IntIndex(2, [1]),
dtype=SparseDtype(float, np.nan),
)
tm.assert_sp_array_equal(arr, expected)
@pytest.mark.parametrize("arr, loc", [
([None, 1, 2], 0),
([0, None, 2], 1),
([0, 1, None], 2),
([0, 1, 1, None, None], 3),
([1, 1, 1, 2], -1),
([], -1),
])
def test_first_fill_value_loc(arr, loc):
result = SparseArray(arr)._first_fill_value_loc()
assert result == loc
@pytest.mark.parametrize('arr', [
[1, 2, np.nan, np.nan],
[1, np.nan, 2, np.nan],
[1, 2, np.nan],
])
@pytest.mark.parametrize("fill_value", [
np.nan, 0, 1
])
def test_unique_na_fill(arr, fill_value):
a = pd.SparseArray(arr, fill_value=fill_value).unique()
b = pd.Series(arr).unique()
assert isinstance(a, SparseArray)
a = np.asarray(a)
tm.assert_numpy_array_equal(a, b)
def test_unique_all_sparse():
# https://github.com/pandas-dev/pandas/issues/23168
arr = SparseArray([0, 0])
result = arr.unique()
expected = SparseArray([0])
tm.assert_sp_array_equal(result, expected)
def test_map():
arr = SparseArray([0, 1, 2])
expected = SparseArray([10, 11, 12], fill_value=10)
# dict
result = arr.map({0: 10, 1: 11, 2: 12})
tm.assert_sp_array_equal(result, expected)
# series
result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))
tm.assert_sp_array_equal(result, expected)
# function
result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))
expected = SparseArray([10, 11, 12], fill_value=10)
tm.assert_sp_array_equal(result, expected)
def test_map_missing():
arr = SparseArray([0, 1, 2])
expected = SparseArray([10, 11, None], fill_value=10)
result = arr.map({0: 10, 1: 11})
tm.assert_sp_array_equal(result, expected)
def test_deprecated_values():
arr = SparseArray([0, 1, 2])
with | tm.assert_produces_warning(FutureWarning) | pandas.util.testing.assert_produces_warning |
from contextlib import contextmanager
from unittest.mock import patch
from zipfile import ZipFile
from pandas import DataFrame, read_csv
from pandas.util.testing import assert_frame_equal
from pytest import raises, fixture, warns, mark
from IPython import get_ipython
from data_vault import Vault, parse_arguments, VaultMagics
from data_vault.frames import frame_manager
@contextmanager
def file_from_storage(archive_path, file_path, pwd: str = None, mode='r'):
if pwd:
pwd = pwd.encode()
with ZipFile(archive_path) as archive:
yield archive.open(
file_path,
mode=mode,
pwd=pwd
)
ipython = get_ipython()
EXAMPLE_DATA_FRAME = DataFrame([{'a': 1, 'b': 1}, {'a': 1, 'b': 2}])
def patch_ipython_globals(dummy_globals):
return patch.object(frame_manager, 'get_ipython_globals', return_value=dummy_globals)
@fixture
def mock_key(monkeypatch):
monkeypatch.setenv('KEY', 'a_strong_password')
def test_open_vault_message():
with raises(Exception, match='Please setup the storage with %open_vault first'):
ipython.magic('vault del x')
def test_vault_security_alert(tmpdir):
# should warn if not encryption key provided
with warns(UserWarning, match='Encryption variable not set - no encryption will be used..*'):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip')
# should not warn if secure explicitly toggled off
with warns(None) as record:
ipython.magic(f'open_vault --path {tmpdir}/archive.zip --secure False')
assert not record.list
# should not warn if encryption key provided
with warns(None) as record:
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e SOME_KEY')
assert not record.list
def test_usage_help(tmpdir, mock_key):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e KEY')
x = EXAMPLE_DATA_FRAME
with patch_ipython_globals(locals()):
with raises(ValueError, match='No command matched. Did you mean:\n\t - store .*?'):
ipython.magic('vault store x')
def test_variable_not_defined(tmpdir, mock_key):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e KEY')
with patch_ipython_globals(locals()):
with raises(ValueError, match=".*variable 'x' is not defined in the global namespace.*"):
ipython.magic('vault store x')
def test_function_not_defined(tmpdir, mock_key):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e KEY')
x = EXAMPLE_DATA_FRAME
with patch_ipython_globals(locals()):
with raises(NameError, match="function 'pipe_delimited' is not defined in the global namespace"):
ipython.magic('vault store x in my_frames with pipe_delimited')
def test_store(tmpdir):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip --secure False')
x = EXAMPLE_DATA_FRAME
with patch_ipython_globals(locals()):
ipython.magic('vault store x in my_frames')
with file_from_storage(f'{tmpdir}/archive.zip', 'my_frames/x') as f:
data = read_csv(f, sep='\t', index_col=0)
assert x.equals(data)
def test_store_with_encryption(tmpdir, mock_key):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e KEY')
x = EXAMPLE_DATA_FRAME
with patch_ipython_globals(locals()):
ipython.magic('vault store x in my_frames')
with raises(RuntimeError, match="File 'my_frames/x' is encrypted, password required for extraction"):
with file_from_storage(f'{tmpdir}/archive.zip', 'my_frames/x') as f:
data = read_csv(f, sep='\t', index_col=0)
def test_store_import_del_using_path(tmpdir, mock_key):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip --secure False')
x = EXAMPLE_DATA_FRAME
with patch_ipython_globals(locals()):
ipython.magic('vault store x in "my_frames/custom_path.tsv"')
with file_from_storage(f'{tmpdir}/archive.zip', 'my_frames/custom_path.tsv') as f:
data = read_csv(f, sep='\t', index_col=0)
assert x.equals(data)
with patch_ipython_globals(globals()):
ipython.magic('vault import "my_frames/custom_path.tsv" as y')
assert_frame_equal(x, y, check_dtype=False)
# TODO: fails on Windows, paths thing
# ipython.magic('vault del "my_frames/custom_path.tsv"')
# with raises(KeyError, match="There is no item named 'my_frames/custom_path.tsv' in the archive"):
# ipython.magic('vault import "my_frames/custom_path.tsv" as z')
def test_store_with_exporter(tmpdir):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip --secure False')
x = EXAMPLE_DATA_FRAME
def pipe_delimited(df, path: str):
df.to_csv(path, sep='|')
with patch_ipython_globals(locals()):
ipython.magic('vault store x in my_frames with pipe_delimited')
with file_from_storage(f'{tmpdir}/archive.zip', 'my_frames/x') as f:
data = | read_csv(f, sep='|', index_col=0) | pandas.read_csv |
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function: batsman4s
# This function plots the number of 4s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman4s(file, name="A Hookshot"):
'''
Plot the numbers of 4s against the runs scored by batsman
Description
This function plots the number of 4s against the total runs scored by batsman. A 2nd order polynomial regression curve is also plotted. The predicted number of 4s for 50 runs and 100 runs scored is also plotted
Usage
batsman4s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsman6s
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
tendulkar = getPlayerData(35320,dir="../",file="tendulkar.csv",type="batting")
homeOrAway=[1,2],result=[1,2,4]
batsman4s("tendulkar.csv", "<NAME>")
'''
# Clean the batsman file and create a complete data frame
df = clean(file)
df['Runs'] = pd.to_numeric(df['Runs'])
df['4s'] = pd.to_numeric(df['4s'])
df1 = df[['Runs','4s']].sort_values(by=['Runs'])
# Set figure size
rcParams['figure.figsize'] = 10,6
# Get numnber of 4s and runs scored
runs = pd.to_numeric(df1['Runs'])
x4s = pd.to_numeric(df1['4s'])
atitle = name + "-" + "Runs scored vs No of 4s"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(runs, x4s, alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('4s')
plt.title(atitle)
# Create a polynomial of degree 2
poly = PolynomialFeatures(degree=2)
runsPoly = poly.fit_transform(runs.reshape(-1,1))
linreg = LinearRegression().fit(runsPoly,x4s)
plt.plot(runs,linreg.predict(runsPoly),'-r')
# Predict the number of 4s for 50 runs
b=poly.fit_transform((np.array(50)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=50, color='b', linestyle=':')
# Predict the number of 4s for 100 runs
b=poly.fit_transform((np.array(100)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=100, color='b', linestyle=':')
plt.text(180, 0.5,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the number of 6s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman6s(file, name="A Hookshot") :
'''
Description
Compute and plot the number of 6s in the total runs scored by batsman
Usage
batsman6s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
# batsman6s("tendulkar.csv","<NAME>")
'''
x6s = []
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
df = clean (file)
# Remove all rows where 6s are 0
a= df['6s'] !=0
b= df[a]
x6s=b['6s'].astype(int)
runs=pd.to_numeric(b['Runs'])
# Plot the 6s as a boxplot
atitle =name + "-" + "Runs scored vs No of 6s"
df1=pd.concat([runs,x6s],axis=1)
fig = sns.boxplot(x="6s", y="Runs", data=df1)
plt.title(atitle)
plt.text(2.2, 10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsGround(file, name="A Latecut"):
'''
Description
This function computed the Average Runs scored on different pitches and also indicates the number of innings played at these venues
Usage
batsmanAvgRunsGround(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
##tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
batsmanAvgRunsGround("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Ground']].groupby('Ground').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs at Ground"
plt.xticks(rotation="vertical",fontsize=8)
plt.axhline(y=50, color='b', linestyle=':')
plt.axhline(y=100, color='r', linestyle=':')
ax=sns.barplot(x='Ground', y="Runs_mean", data=df1)
plt.title(atitle)
plt.text(30, 180,'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsOpposition
# This function plots the average runs scored by batsman versus the opposition. The xlabels indicate
# the Opposition and the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsOpposition(file, name="A Latecut"):
'''
This function computes and plots the Average runs against different opposition played by batsman
Description
This function computes the mean runs scored by batsman against different opposition
Usage
batsmanAvgRunsOpposition(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanAvgRunsGround
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
batsmanAvgRunsOpposition("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Opposition']].groupby('Opposition').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs vs Opposition"
plt.xticks(rotation="vertical",fontsize=8)
ax=sns.barplot(x='Opposition', y="Runs_mean", data=df1)
plt.axhline(y=50, color='b', linestyle=':')
plt.title(atitle)
plt.text(5, 50, 'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanContributionWonLost
# This plots the batsman's contribution to won and lost matches
#
###########################################################################################
def batsmanContributionWonLost(file,name="A Hitter"):
'''
Display the batsman's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the batsman in matches that were won and lost as box plots
Usage
batsmanContributionWonLost(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarsp = getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanContributionWonLost("tendulkarsp.csv","<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create a column based on result
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack dataframes
df= pd.concat([won,lost])
df['Runs']= pd.to_numeric(df['Runs'])
ax = sns.boxplot(x='status',y='Runs',data=df)
atitle = name + "-" + "- Runs in games won/lost-drawn"
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeAverageRuns
# This function computes and plots the cumulative average runs by a batsman
#
###########################################################################################
def batsmanCumulativeAverageRuns(file,name="A Leg Glance"):
'''
Batsman's cumulative average runs
Description
This function computes and plots the cumulative average runs of a batsman
Usage
batsmanCumulativeAverageRuns(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeStrikeRate bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
batsmanCumulativeAverageRuns("tendulkar.csv", "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
# Compute cumulative average
cumAvg = runs.cumsum()/pd.Series(np.arange(1, len(runs)+1), runs.index)
atitle = name + "- Cumulative Average vs No of innings"
plt.plot(cumAvg)
plt.xlabel('Innings')
plt.ylabel('Cumulative average')
plt.title(atitle)
plt.text(200,20,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeStrikeRate
# This function computes and plots the cumulative average strike rate of a batsman
#
###########################################################################################
def batsmanCumulativeStrikeRate(file,name="A Leg Glance"):
'''
Batsman's cumulative average strike rate
Description
This function computes and plots the cumulative average strike rate of a batsman
Usage
batsmanCumulativeStrikeRate(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
batsmanCumulativeStrikeRate("tendulkar.csv", "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
strikeRate=pd.to_numeric(batsman['SR'])
# Compute cumulative strike rate
cumStrikeRate = strikeRate.cumsum()/pd.Series(np.arange(1, len(strikeRate)+1), strikeRate.index)
atitle = name + "- Cumulative Strike rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Strike Rate')
plt.title(atitle)
plt.plot(cumStrikeRate)
plt.text(200,60,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the batsman dismissals
#
###########################################################################################
def batsmanDismissals(file, name="A Squarecut"):
'''
Display a 3D Pie Chart of the dismissals of the batsman
Description
Display the dismissals of the batsman (caught, bowled, hit wicket etc) as percentages
Usage
batsmanDismissals(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanDismissals("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
d = batsman['Dismissal']
# Convert to data frame
df = pd.DataFrame(d)
df1=df['Dismissal'].groupby(df['Dismissal']).count()
df2 = pd.DataFrame(df1)
df2.columns=['Count']
df3=df2.reset_index(inplace=False)
# Plot a pie chart
plt.pie(df3['Count'], labels=df3['Dismissal'],autopct='%.1f%%')
atitle = name + "-Pie chart of dismissals"
plt.suptitle(atitle, fontsize=16)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 28 Aug 2019
# Function: batsmanMeanStrikeRate
# This function plot the Mean Strike Rate of the batsman against Runs scored as a continous graph
#
###########################################################################################
def batsmanMeanStrikeRate(file, name="A Hitter"):
'''
batsmanMeanStrikeRate {cricketr} R Documentation
Calculate and plot the Mean Strike Rate of the batsman on total runs scored
Description
This function calculates the Mean Strike Rate of the batsman for each interval of runs scored
Usage
batsmanMeanStrikeRate(file, name = "A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMeanStrikeRate("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs= pd.to_numeric(batsman['Runs'])
# Create the histogram
hist, bins = np.histogram(runs, bins = 20)
midBin=[]
SR=[]
# Loop through
for i in range(1,len(bins)):
# Find the mean of the bins (Runs)
midBin.append(np.mean([bins[i-1],bins[i]]))
# Filter runs that are are between 2 bins
batsman['Runs']= | pd.to_numeric(batsman['Runs']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA, NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA, u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA
])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(), None,
1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA, u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), u(' bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_args(self):
values = Series(['xxABCxx', 'xx BNSD', 'LDFJH xx'])
rs = values.str.strip('x')
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip('x')
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip('x')
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_strip_lstrip_rstrip_args_unicode(self):
values = Series([u('xxABCxx'), u('xx BNSD'), u('LDFJH xx')])
rs = values.str.strip(u('x'))
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip(u('x'))
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip(u('x'))
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_wrap(self):
# test values are: two words less than width, two words equal to width,
# two words greater than width, one word less than width, one word
# equal to width, one word greater than width, multiple tokens with
# trailing whitespace equal to width
values = Series([u('hello world'), u('hello world!'), u(
'hello world!!'), u('abcdefabcde'), u('abcdefabcdef'), u(
'abcdefabcdefa'), u('ab ab ab ab '), u('ab ab ab ab a'), u(
'\t')])
# expected values
xp = Series([u('hello world'), u('hello world!'), u('hello\nworld!!'),
u('abcdefabcde'), u('abcdefabcdef'), u('abcdefabcdef\na'),
u('ab ab ab ab'), u('ab ab ab ab\na'), u('')])
rs = values.str.wrap(12, break_long_words=True)
assert_series_equal(rs, xp)
# test with pre and post whitespace (non-unicode), NaN, and non-ascii
# Unicode
values = Series([' pre ', np.nan, u('\xac\u20ac\U00008000 abadcafe')
])
xp = Series([' pre', NA, u('\xac\u20ac\U00008000 ab\nadcafe')])
rs = values.str.wrap(6)
assert_series_equal(rs, xp)
def test_get(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.get(1)
expected = Series(['b', 'd', np.nan, 'g'])
tm.assert_series_equal(result, expected)
# mixed
mixed = Series(['a_b_c', NA, 'c_d_e', True, datetime.today(), None, 1,
2.])
rs = Series(mixed).str.split('_').str.get(1)
xp = Series(['b', NA, 'd', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.get(1)
expected = Series([u('b'), u('d'), np.nan, u('g')])
tm.assert_series_equal(result, expected)
# bounds testing
values = Series(['1_2_3_4_5', '6_7_8_9_10', '11_12'])
# positive index
result = values.str.split('_').str.get(2)
expected = Series(['3', '8', np.nan])
tm.assert_series_equal(result, expected)
# negative index
result = values.str.split('_').str.get(-3)
expected = Series(['3', '8', np.nan])
tm.assert_series_equal(result, expected)
def test_more_contains(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA,
'CABA', 'dog', 'cat'])
result = s.str.contains('a')
expected = Series([False, False, False, True, True, False, np.nan,
False, False, True])
assert_series_equal(result, expected)
result = s.str.contains('a', case=False)
expected = Series([True, False, False, True, True, False, np.nan, True,
False, True])
assert_series_equal(result, expected)
result = s.str.contains('Aa')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba', case=False)
expected = Series([False, False, False, True, True, False, np.nan,
True, False, False])
assert_series_equal(result, expected)
def test_contains_nan(self):
# PR #14171
s = Series([np.nan, np.nan, np.nan], dtype=np.object_)
result = s.str.contains('foo', na=False)
expected = Series([False, False, False], dtype=np.bool_)
assert_series_equal(result, expected)
result = s.str.contains('foo', na=True)
expected = Series([True, True, True], dtype=np.bool_)
assert_series_equal(result, expected)
result = s.str.contains('foo', na="foo")
expected = Series(["foo", "foo", "foo"], dtype=np.object_)
assert_series_equal(result, expected)
result = s.str.contains('foo')
expected = Series([np.nan, np.nan, np.nan], dtype=np.object_)
assert_series_equal(result, expected)
def test_more_replace(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA, 'CABA',
'dog', 'cat'])
result = s.str.replace('A', 'YYY')
expected = Series(['YYY', 'B', 'C', 'YYYaba', 'Baca', '', NA,
'CYYYBYYY', 'dog', 'cat'])
assert_series_equal(result, expected)
result = s.str.replace('A', 'YYY', case=False)
expected = Series(['YYY', 'B', 'C', 'YYYYYYbYYY', 'BYYYcYYY', '', NA,
'CYYYBYYY', 'dog', 'cYYYt'])
assert_series_equal(result, expected)
result = s.str.replace('^.a|dog', 'XX-XX ', case=False)
expected = Series(['A', 'B', 'C', 'XX-XX ba', 'XX-XX ca', '', NA,
'XX-XX BA', 'XX-XX ', 'XX-XX t'])
assert_series_equal(result, expected)
def test_string_slice_get_syntax(self):
s = Series(['YYY', 'B', 'C', 'YYYYYYbYYY', 'BYYYcYYY', NA, 'CYYYBYYY',
'dog', 'cYYYt'])
result = s.str[0]
expected = s.str.get(0)
assert_series_equal(result, expected)
result = s.str[:3]
expected = s.str.slice(stop=3)
assert_series_equal(result, expected)
result = s.str[2::-1]
expected = s.str.slice(start=2, step=-1)
assert_series_equal(result, expected)
def test_string_slice_out_of_bounds(self):
s = Series([(1, 2), (1, ), (3, 4, 5)])
result = s.str[1]
expected = Series([2, np.nan, 4])
assert_series_equal(result, expected)
s = Series(['foo', 'b', 'ba'])
result = s.str[1]
expected = Series(['o', np.nan, 'a'])
assert_series_equal(result, expected)
def test_match_findall_flags(self):
data = {'Dave': '<EMAIL>',
'Steve': '<EMAIL>',
'Rob': '<EMAIL>',
'Wes': np.nan}
data = Series(data)
pat = r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})'
result = data.str.extract(pat, flags=re.IGNORECASE, expand=True)
assert result.iloc[0].tolist() == ['dave', 'google', 'com']
result = data.str.match(pat, flags=re.IGNORECASE)
assert result[0]
result = data.str.findall(pat, flags=re.IGNORECASE)
assert result[0][0] == ('dave', 'google', 'com')
result = data.str.count(pat, flags=re.IGNORECASE)
assert result[0] == 1
with tm.assert_produces_warning(UserWarning):
result = data.str.contains(pat, flags=re.IGNORECASE)
assert result[0]
def test_encode_decode(self):
base = Series([u('a'), u('b'), u('a\xe4')])
series = base.str.encode('utf-8')
f = lambda x: x.decode('utf-8')
result = series.str.decode('utf-8')
exp = series.map(f)
tm.assert_series_equal(result, exp)
def test_encode_decode_errors(self):
encodeBase = Series([u('a'), u('b'), u('a\x9d')])
pytest.raises(UnicodeEncodeError, encodeBase.str.encode, 'cp1252')
f = lambda x: x.encode('cp1252', 'ignore')
result = encodeBase.str.encode('cp1252', 'ignore')
exp = encodeBase.map(f)
tm.assert_series_equal(result, exp)
decodeBase = Series([b'a', b'b', b'a\x9d'])
pytest.raises(UnicodeDecodeError, decodeBase.str.decode, 'cp1252')
f = lambda x: x.decode('cp1252', 'ignore')
result = decodeBase.str.decode('cp1252', 'ignore')
exp = decodeBase.map(f)
tm.assert_series_equal(result, exp)
def test_normalize(self):
values = ['ABC', u'ABC', u'123', np.nan, u'アイエ']
s = Series(values, index=['a', 'b', 'c', 'd', 'e'])
normed = [u'ABC', u'ABC', u'123', np.nan, u'アイエ']
expected = Series(normed, index=['a', 'b', 'c', 'd', 'e'])
result = s.str.normalize('NFKC')
tm.assert_series_equal(result, expected)
expected = Series([u'ABC', u'ABC', u'123', np.nan, u'アイエ'],
index=['a', 'b', 'c', 'd', 'e'])
result = s.str.normalize('NFC')
tm.assert_series_equal(result, expected)
with tm.assert_raises_regex(ValueError,
"invalid normalization form"):
s.str.normalize('xxx')
s = Index([u'ABC', u'123', u'アイエ'])
expected = Index([u'ABC', u'123', u'アイエ'])
result = s.str.normalize('NFKC')
tm.assert_index_equal(result, expected)
def test_cat_on_filtered_index(self):
df = DataFrame(index=MultiIndex.from_product(
[[2011, 2012], [1, 2, 3]], names=['year', 'month']))
df = df.reset_index()
df = df[df.month > 1]
str_year = df.year.astype('str')
str_month = df.month.astype('str')
str_both = str_year.str.cat(str_month, sep=' ')
assert str_both.loc[1] == '2011 2'
str_multiple = str_year.str.cat([str_month, str_month], sep=' ')
assert str_multiple.loc[1] == '2011 2 2'
def test_str_cat_raises_intuitive_error(self):
# https://github.com/pandas-dev/pandas/issues/11334
s = Series(['a', 'b', 'c', 'd'])
message = "Did you mean to supply a `sep` keyword?"
with tm.assert_raises_regex(ValueError, message):
s.str.cat('|')
with tm.assert_raises_regex(ValueError, message):
s.str.cat(' ')
def test_index_str_accessor_visibility(self):
from pandas.core.strings import StringMethods
if not compat.PY3:
cases = [(['a', 'b'], 'string'), (['a', u('b')], 'mixed'),
([u('a'), u('b')], 'unicode'),
(['a', 'b', 1], 'mixed-integer'),
(['a', 'b', 1.3], 'mixed'),
(['a', 'b', 1.3, 1], 'mixed-integer'),
(['aa', datetime(2011, 1, 1)], 'mixed')]
else:
cases = [(['a', 'b'], 'string'), (['a', u('b')], 'string'),
([u('a'), u('b')], 'string'),
(['a', 'b', 1], 'mixed-integer'),
(['a', 'b', 1.3], 'mixed'),
(['a', 'b', 1.3, 1], 'mixed-integer'),
(['aa', datetime(2011, 1, 1)], 'mixed')]
for values, tp in cases:
idx = Index(values)
assert isinstance(Series(values).str, StringMethods)
assert isinstance(idx.str, StringMethods)
assert idx.inferred_type == tp
for values, tp in cases:
idx = Index(values)
assert isinstance(Series(values).str, StringMethods)
assert isinstance(idx.str, StringMethods)
assert idx.inferred_type == tp
cases = [([1, np.nan], 'floating'),
([datetime(2011, 1, 1)], 'datetime64'),
([timedelta(1)], 'timedelta64')]
for values, tp in cases:
idx = Index(values)
message = 'Can only use .str accessor with string values'
with tm.assert_raises_regex(AttributeError, message):
Series(values).str
with tm.assert_raises_regex(AttributeError, message):
idx.str
assert idx.inferred_type == tp
# MultiIndex has mixed dtype, but not allow to use accessor
idx = MultiIndex.from_tuples([('a', 'b'), ('a', 'b')])
assert idx.inferred_type == 'mixed'
message = 'Can only use .str accessor with Index, not MultiIndex'
with | tm.assert_raises_regex(AttributeError, message) | pandas.util.testing.assert_raises_regex |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from backtesting.backtester.BackTest.backtest import Strategy, Portfolio
import backtesting.backtester.fuzzySystem.membership as fuzz
import backtesting.backtester.fuzzySystem.control as ctrl
from pandas import to_datetime
class FuzzyMovingAverageCrossStrategy(Strategy):
"""
Requires:
symbol - A stock symbol on which to form a strategy on.
bars - A DataFrame of bars for the above symbol.
short_window - Lookback period for short moving average.
long_window - Lookback period for long moving average."""
def __init__(self, symbol, bars, short_window, long_window):
self.symbol = symbol
self.bars = bars
self.short_window = int(short_window)
self.long_window = int(long_window)
def generate_signals(self):
"""Returns the DataFrame of symbols containing the signals
to go long, short or hold (1, -1 or 0)."""
signals = | pd.DataFrame(index=self.bars.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 14:55:21 2018
@author: <NAME>
"""
import pandas as pd
import scipy.sparse
import pytest
from sklearn.base import clone
from sklearn.exceptions import NotFittedError
from tests.helpers.testing_help import rec_assert_equal
from aikit.tools.data_structure_helper import convert_generic, get_type
from aikit.enums import DataTypes
def assert_raise_not_fitted(encoder, df):
with pytest.raises(NotFittedError):
encoder.transform(df)
def assert_raise_value_error(encoder, df):
with pytest.raises(ValueError):
encoder.transform(df)
def gen_slice(ob, sl):
""" generic column slicer """
t = get_type(ob)
if t in (DataTypes.DataFrame, DataTypes.SparseDataFrame):
return ob.iloc[:, sl]
elif t == DataTypes.SparseArray:
if isinstance(ob, scipy.sparse.coo_matrix):
ob = scipy.sparse.csc_matrix(ob.copy())
return ob[:, sl]
else:
return ob[:, sl]
def verif_encoder_static(klass, enc_kwargs):
""" does a bunch of static test on an encoder, ie: tests without any data, without fitting anything """
assert hasattr(klass, "fit")
assert hasattr(klass, "transform")
assert hasattr(klass, "fit_transform")
encoder0 = klass(**enc_kwargs) # Create an object ...
encoder1 = clone(encoder0) # then try to clone it
encoder2 = klass() # Create an empty object and then set its params
encoder2.set_params(**enc_kwargs)
df1 = | pd.DataFrame() | pandas.DataFrame |
"""unit test for loanpy.loanfinder.py (2.0 BETA) for pytest 7.1.1"""
from inspect import ismethod
from os import remove
from pathlib import Path
from unittest.mock import patch, call
from pandas import DataFrame, RangeIndex, Series, read_csv
from pandas.testing import (assert_frame_equal, assert_index_equal,
assert_series_equal)
from pytest import raises
from loanpy.loanfinder import Search, gen, read_data, NoPhonMatch
from loanpy import loanfinder as lf
def test_read_data():
"""test if data is being read correctly"""
# setup expected outcome, path, input-dataframe, mock pandas.read_csv
srsexp = Series(["a", "b", "c"], name="col1", index=[0, 1, 1])
path = Path(__file__).parent / "test_read_data.csv"
dfin = DataFrame({"col1": ["a", "b, c", "wrong clusters",
"wrong phonotactics"], "col2": [1, 2, 3, 4]})
with patch("loanpy.loanfinder.read_csv") as read_csv_mock:
read_csv_mock.return_value = dfin
# assert that the actual outcome equals the expected outcome
assert_series_equal(read_data(path, "col1"), srsexp)
# assert mock call to read_csv_mock was correct
assert read_csv_mock.call_args_list[0] == call(
path, encoding="utf-8", usecols=["col1"])
# test read recip
# setup: overwrite expected outcome and input-dataframe, mock
# pandas.read_csv
srsexp = Series(["(a)?", "(b|c)"], name="col1", index=[1, 3])
dfin = DataFrame({"col1": ["wrong vowel harmony", "(a)?",
"wrong phonotactics", "(b|c)"], "col2": [1, 2, 3, 4]})
with patch("loanpy.loanfinder.read_csv") as read_csv_mock:
read_csv_mock.return_value = dfin
# assert expected and actual outcome are the same pandas Series
assert_series_equal(read_data(path, "col1"), srsexp)
# assert mock was called with correct input
assert read_csv_mock.call_args_list[0] == call(
path, encoding="utf-8", usecols=["col1"])
# tear down
del path, dfin, srsexp
def test_gen():
"""test if generator yields the right things"""
# set up mock-tqdm (which is a progress bar)
def tqdm_mock(iterable, prefix):
"""this just returns the input and remembers it"""
tqdm_mock.called_with = (iterable, prefix)
return iterable
tqdm = lf.tqdm # remember the original tqdm to plug back in later
lf.tqdm = tqdm_mock # overwrite real tqdm with mock-tqdm function
# set up: create custom class
class SomeMonkeyClass:
def __init__(self):
self.somefunc_called_with = []
def somefunc(self, *args):
arglist = [*args]
self.somefunc_called_with.append(arglist)
return arglist[0] + arglist[1]
# set up: create instance of mock class
somemockclass = SomeMonkeyClass()
# assert generator yields/returns the expected outcome
assert list(gen([2, 3, 4], [4, 5, 6],
somemockclass.somefunc, "lol", "rofl")) == [6, 8, 10]
# assert 2 mock calls: tqdm and somefunc in SomeMonkeyClass
assert tqdm_mock.called_with == ([2, 3, 4], "lol")
assert somemockclass.somefunc_called_with == [
[2, 4, "rofl"], [3, 5, "rofl"], [4, 6, "rofl"]]
# tear down
lf.tqdm = tqdm # plug back in the original tqdm
del tqdm, somemockclass, tqdm_mock, SomeMonkeyClass
def test_init():
"""test if class Search is initiated correctly"""
# set up mock panphon class with mock edit distance
class DistanceMonkey:
def hamming_feature_edit_distance(): pass
# set up mock Adrc class for get_nse
class AdrcMonkey:
def get_nse(self, *args): pass
# set up mock function for semantic distance measure
def mock_gensim_mw():
return "sthsth"
# set up vars 4 exped outcome, set up mock instance of DistanceMonkey class
srsad = Series(["a", "b", "c"], name="adapted", index=[0, 1, 1])
srsrc = Series(["a", "b", "c"], name="adapted", index=[0, 1, 1])
dist_mockinstance = DistanceMonkey()
# set up: mock read_data, mock panphon.Distance mock loanpy.adrc.Adrc
with patch("loanpy.loanfinder.read_data", side_effect=[
srsad, srsrc]) as read_data_mock:
with patch("loanpy.loanfinder.Distance") as Distance_mock:
Distance_mock.return_value = dist_mockinstance
with patch("loanpy.loanfinder.Adrc") as Adrc_mock:
Adrc_mock.return_value = AdrcMonkey
# initiate Search() with mock parameters
mocksearch = Search(
path2donordf="got.csv",
path2recipdf="hun.csv",
donorcol="adapted",
recipcol="reconstructed",
scdictlist_ad="scad.txt",
scdictlist_rc="scrc.txt",
semsim_msr=mock_gensim_mw)
# assert initiation went properly
assert_series_equal(mocksearch.search_in, srsad)
assert_series_equal(mocksearch.search_for, srsrc)
assert mocksearch.phondist == 0
assert ismethod(mocksearch.phondist_msr)
assert mocksearch.donpath == "got.csv"
assert mocksearch.recpath == "hun.csv"
assert mocksearch.doncol == "adapted"
assert mocksearch.reccol == "reconstructed"
assert mocksearch.semsim == 1
assert mocksearch.semsim_msr.__name__ == "mock_gensim_mw"
assert mocksearch.get_nse_ad == AdrcMonkey.get_nse
assert mocksearch.get_nse_rc == AdrcMonkey.get_nse
# double check with __dict__
msdict = mocksearch.__dict__
assert len(msdict) == 12
for i in msdict:
if i in zip(["search_in", "search_for"], [srsad, srsrc]):
assert_series_equal(msdict[i], expsrs)
if i == "doncol":
assert msdict[i] == "adapted"
if i == "donpath":
assert msdict[i] == "got.csv"
if i == "get_nse_ad":
assert msdict[i
] == AdrcMonkey.get_nse
if i == "get_nse_rc":
assert msdict[i
] == AdrcMonkey.get_nse
if i == "phondist":
assert msdict[i] == 0
if i == "phondist_msr":
hmng = dist_mockinstance.hamming_feature_edit_distance
assert msdict[i] == hmng
if i == "reccol" == "reconstructed":
assert msdict[
i] == "reconstructed"
if i == "recpath":
assert msdict[i] == "hun.csv"
if i == "semsim":
assert msdict[i] == 1
if i == "semsim_msr":
assert msdict[i] == mock_gensim_mw
# assert calls
read_data_mock.assert_has_calls(
[call("got.csv", "adapted"),
call("hun.csv", "reconstructed")])
Distance_mock.assert_called_with()
Adrc_mock.assert_has_calls(
[call(scdictlist="scad.txt", mode='adapt'),
call(scdictlist="scrc.txt", mode='reconstruct')])
# assert init runs correctly without entering parameters as well
# set up: mock read_data, mock panphon.Distance mock loanpy.adrc.Adrc
with patch("loanpy.loanfinder.read_data", side_effect=[
srsad, srsrc]) as read_data_mock:
with patch("loanpy.loanfinder.Distance") as Distance_mock:
Distance_mock.return_value = dist_mockinstance
with patch("loanpy.loanfinder.Adrc") as Adrc_mock:
Adrc_mock.return_value = AdrcMonkey
# initiate Search() without any parameters (default params)
mocksearch = Search()
# assert initiation went properly
assert_series_equal(mocksearch.search_in, srsad)
assert_series_equal(mocksearch.search_for, srsrc)
assert mocksearch.phondist == 0
assert ismethod(mocksearch.phondist_msr)
assert mocksearch.donpath is None
assert mocksearch.recpath is None
assert mocksearch.doncol == "ad"
assert mocksearch.reccol == "rc"
assert mocksearch.semsim == 1
# sic!
assert mocksearch.semsim_msr.__name__ == "gensim_multiword"
# not "mock_gensim_mw" even though mock func is plugged in!
assert mocksearch.get_nse_ad == AdrcMonkey.get_nse
assert mocksearch.get_nse_rc == AdrcMonkey.get_nse
# double check with __dict__
msdict = mocksearch.__dict__
assert len(msdict) == 12
for i in msdict:
if i in zip(["search_in", "search_for"], [srsad, srsrc]):
| assert_series_equal(msdict[i], expsrs) | pandas.testing.assert_series_equal |
import pandas as pd
from sklearn import preprocessing
from sklearn.svm import SVC
import evaluateTask1
# import csv data
data = pd.read_csv('insurance-train.csv')
data_test = | pd.read_csv('insurance-test.csv') | pandas.read_csv |
from __future__ import division
import numpy as np
import datetime
import pandas as pd
from os.path import join, basename, exists
from os import makedirs
import matplotlib.pyplot as plt
from nilearn import input_data
from nilearn import datasets
import pandas as pd
from nilearn import plotting
from nilearn.image import concat_imgs
from nilearn.input_data import NiftiLabelsMasker
from nilearn.connectome import ConnectivityMeasure
import bct
anx_dir = '/home/data/nbc/physics-learning/anxiety-physics'
#anx_dir = '/Users/Katie/Dropbox/Projects/physics-anxiety'
labels = pd.read_csv(join(anx_dir, '17-networks-combo-ccn-5.14-regions-mni_labels.csv'), index_col=0, header=None, squeeze=True)
laird_2011_icn_regns = join(anx_dir, '17-networks-combo-ccn-5.14-regions-mni.nii.gz')
region_masker = input_data.NiftiLabelsMasker(laird_2011_icn_regns, standardize=True)
subjects = ["101", "102", "103", "104", "106", "107", "108", "110", "212",
"214", "215", "216", "217", "218", "219", "320", "321", "323",
"324", "325", "327", "328", "330", "331", "333", "334", "336",
"337", "338", "339", "340", "341", "342", "343", "345", "346",
"347", "348", "349", "350", "451", "455", "458", "459",
"460", "462", "463", "464", "467", "468", "469", "470", "502",
"503", "571", "572", "573", "574", "577", "578", "581", "582",
"584", "586", "588", "589", "591", "592", "593", "594", "595",
"596", "597", "598", "604", "605", "606", "607", "608", "609",
"610", "612", "613", "614", "615", "617", "618", "619", "620",
"621", "622", "623", "624", "625", "626", "627", "629", "630",
"631", "633", "634"]
#subjects = ['102','103']
data_dir = '/home/data/nbc/physics-learning/data/pre-processed'
pre_dir = '/home/data/nbc/physics-learning/anxiety-physics/pre'
post_dir = '/home/data/nbc/physics-learning/anxiety-physics/post'
sink_dir = '/home/data/nbc/physics-learning/anxiety-physics/output'
#data_dir = '/Users/Katie/Dropbox/Data'
#pre_dir = '/Users/Katie/Dropbox/Projects/physics-anxiety/test/pre'
#post_dir = '/Users/Katie/Dropbox/Projects/physics-anxiety/test/post'
#sink_dir = '/Users/Katie/Dropbox/Projects/physics-anxiety/test/output'
lab_notebook_dir = '/home/kbott006/lab_notebook/'
directories = [pre_dir, post_dir]
sessions = ['pre', 'post']
lab_notebook = pd.DataFrame(index=subjects, columns=['start', 'end', 'errors'])
intrantwk_conn = pd.DataFrame(index=subjects, columns=set(labels))
for i in np.arange(0, (len(sessions))):
print(sessions[i])
for s in subjects:
lab_notebook.at[s,'start'] = str(datetime.datetime.now())
try:
if not exists(join(sink_dir, sessions[i], s)):
makedirs(join(sink_dir, sessions[i], s))
fmri_file = join(directories[i], '{0}_filtered_func_data_mni.nii.gz'.format(s))
print(fmri_file)
confounds = join(sink_dir, sessions[i], s, '{0}_confounds.txt'.format(s))
#print confounds
#create correlation matrix from PRE resting state files
region_time_series = region_masker.fit_transform(fmri_file, confounds)
print(region_time_series.shape)
np.savetxt(join(sink_dir, sessions[i], s, '{0}_laird2011_ts_regions.csv'.format(s)), region_time_series, delimiter=",")
#region_time_series = region_masker.fit_transform (fmri_file, confounds)
#connectivity = ConnectivityMeasure(kind='correlation')
#network_correlation_matrix = connectivity.fit_transform([network_time_series])[0]
region_correlation_matrix = np.corrcoef(region_time_series.T)
corrmat = | pd.DataFrame(region_correlation_matrix, index=labels.index, columns=labels.index) | pandas.DataFrame |
# Build default model and do permutation feature importance (PFI)
import warnings
import pandas as pd
import numpy as np
from sklearn import ensemble, model_selection, metrics, inspection
from skopt import BayesSearchCV, space
import shap
import load_data
import misc_util
RANDOM_SEED = 11798
# A very repetitive BayesSearchCV warning I'd like to ignore
warnings.filterwarnings('ignore', message='The objective has been evaluated at this point before.')
print('Loading labels from original data')
label_map = {p: pdf.label.iloc[0] for p, pdf in load_data.train_full().groupby('STUDENTID')}
# Set up model training parameters
m = ensemble.ExtraTreesClassifier(500, bootstrap=True, random_state=RANDOM_SEED)
bayes_grid = {
'max_features': space.Real(.001, 1),
'max_samples': space.Real(.001, .999), # For bootstrapping
'ccp_alpha': space.Real(0, .004), # Range determined via ccp_alpha_explore.py
}
xval = model_selection.StratifiedKFold(4, shuffle=True, random_state=RANDOM_SEED)
scoring = metrics.make_scorer(misc_util.adjusted_thresh_kappa, needs_proba=True)
# Getting BayesSearchCV to work requires modifying site-packages/skopt/searchcv.py per:
# https://github.com/scikit-optimize/scikit-optimize/issues/762
gs = BayesSearchCV(m, bayes_grid, n_iter=100, n_jobs=3, cv=xval, verbose=0, scoring=scoring,
random_state=RANDOM_SEED, optimizer_kwargs={'n_initial_points': 20})
# Build model for 30m data as an example
train_result = []
print('Loading data')
feat_names = list(pd.read_csv('features_fe/filtered_features_30m.csv').feature)
train_df = pd.read_csv('features_fe/train_30m.csv')[['STUDENTID'] + feat_names]
holdout_df = pd.read_csv('features_fe/holdout_30m.csv')[['STUDENTID'] + feat_names]
for fset in ['features_tsfresh', 'features_featuretools']:
feat_names = list(pd.read_csv(fset + '/filtered_features_30m.csv').feature)
tdf = | pd.read_csv(fset + '/train_30m.csv') | pandas.read_csv |
from os import path
import pandas as pd
path = "../../data/processed/"
accounts_features = pd.read_csv(path+"accounts_features_2021.txt")
accounts_created_features = pd.read_csv(path+"Accounts2021_Created_Features.csv", nrows=37083)
accounts_labels = | pd.read_csv(path+"accounts_labels_2021.txt", nrows=37083) | pandas.read_csv |
# Copyright 2021 VicEdTools authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class for storing reporting data exported from Compass.
Aggregates data from Learning Tasks, Reports and Progress reports exports.
"""
from __future__ import annotations
import re
from typing import Callable
import pandas as pd
from pandas.errors import EmptyDataError
def class_code_parser(class_code: str, pattern: str) -> str:
"""A default class code to subject code parser.
Assumes that class codes contain their corresponding subject code.
Args:
class_code: The class code.
pattern: A regex pattern that matches subject codes to a named group
called 'code'.
Returns:
The subject code string.
"""
m = re.search(pattern, class_code)
if m:
subject_code = m.group('code')
return subject_code
else:
print(class_code + " class code not found")
return ""
class Reports:
'''A container class for reporting data from Compass.'''
def __init__(self, data=None, class_details=None) -> Reports:
columns = [
'Time', 'ClassCode', 'StudentCode', 'ResultName', 'ResultGrade',
'ResultScore', 'Type', 'SubjectCode', 'SubjectName', 'LearningArea',
'TeacherCode'
]
self.data = pd.DataFrame(columns=columns)
if type(data) == pd.core.frame.DataFrame:
self.data = pd.concat([self.data, data], ignore_index=True)
columns = ['Time', 'ClassCode', 'TeacherCode']
self.class_details = pd.DataFrame(columns=columns).dropna(
subset=["TeacherCode"])
if type(class_details) == pd.core.frame.DataFrame:
self.class_details = pd.concat([
self.class_details,
class_details.dropna(subset=["TeacherCode"])
],
ignore_index=True)
@classmethod
def fromReportsExport(cls,
filename: str,
year: str = None,
semester: str = None,
grade_score_mapper: Callable[[str], float] = None,
grade_dtype: pd.api.types.CategoricalDtype = None,
replace_values: dict[str, dict] = None) -> Reports:
"""Creates a new Reports instance from a Compass reports export."""
try:
temp_df = pd.read_csv(filename,
na_values=None,
keep_default_na=False)
except EmptyDataError:
return Reports()
temp_df = temp_df.loc[(temp_df["AssessmentType"] == "Work Habits"), :]
if not year:
# attempt to determine year from filename
year_pattern = "(?P<year>2[0-9][0-9][0-9])"
m = re.search(year_pattern, filename)
if m:
year = m.group('year')
else:
raise ValueError(
"Could not determine year for reports export file.]n"
"Try including the year in the filename.")
if not semester:
semester_pattern = "(?:[Ss][Ee][Mm][A-Za-z ]*)(?P<semester>[12]|[Oo]ne|[Tt]wo)"
m = re.search(semester_pattern, filename)
if m:
semester = m.group('semester')
else:
raise ValueError(
"Could not determine semester for reports export file.\n"
"Try including Sem 1, Sem 2 in the filenames.")
time = cls._semesterDateMapper(year, semester)
temp_df["Time"] = time
temp_df["Time"] = pd.to_datetime(temp_df["Time"])
if replace_values:
temp_df.replace(replace_values, inplace=True)
if grade_dtype:
temp_df["Result"] = temp_df["Result"].astype(grade_dtype)
temp_df.dropna(subset=["Result"], inplace=True)
if grade_score_mapper:
temp_df["ResultScore"] = temp_df["Result"].apply(grade_score_mapper)
else:
temp_df["ResultScore"] = None
temp_df.rename(columns={
"Result": "ResultGrade",
"AssessmentArea": "ResultName",
"AssessmentType": "Type"
},
inplace=True)
temp_df["SubjectName"] = None
temp_df["TeacherCode"] = None
columns = [
'Time', 'ClassCode', 'StudentCode', 'ResultName', 'ResultGrade',
'ResultScore', 'Type', 'SubjectName', 'TeacherCode'
]
return cls(temp_df[columns])
@classmethod
def fromLearningTasksExport(
cls,
filename: str,
year: str = None,
grade_score_mapper: Callable[[str], float] = None,
grade_dtype: pd.api.types.CategoricalDtype = None,
replace_values: dict[str, dict] = None) -> Reports:
"""Creates a new Reports instance from a Compass Learning Tasks export."""
try:
temp_df = pd.read_csv(filename,
na_values=None,
keep_default_na=False)
except EmptyDataError:
return Reports()
temp_df = temp_df.loc[temp_df["IsIncludedInReport"], :]
temp_df = temp_df.loc[(temp_df["ComponentType"] != "Comment"), :]
temp_df = temp_df.loc[temp_df["ReportCycleName"].
isin(["Semester One", "Semester Two"]), :]
if (not year):
# attempt to determine year from filename
year_pattern = "(?P<year>2[0-9][0-9][0-9])"
m = re.search(year_pattern, filename)
if m:
year = m.group('year')
else:
raise ValueError(
"Could not determine year for Learning Tasks export file.")
temp_df["Year"] = year
temp_df["Time"] = temp_df.apply(
lambda x: cls._semesterDateMapper(x["Year"], x["ReportCycleName"]),
axis=1,
result_type='reduce')
temp_df['Time'] = pd.to_datetime(temp_df['Time'])
temp_df["Type"] = "Academic" # differentiate from work habits
if replace_values:
temp_df.replace(replace_values, inplace=True)
if grade_dtype:
temp_df["Result"] = temp_df["Result"].astype(grade_dtype)
temp_df.dropna(subset=["Result"], inplace=True)
if grade_score_mapper:
temp_df["ResultScore"] = temp_df["Result"].apply(grade_score_mapper)
else:
temp_df["ResultScore"] = None
temp_df.rename(columns={
'Code': 'ClassCode',
'TaskName': 'ResultName',
'Result': 'ResultGrade',
"TeacherImportIdentifier": "TeacherCode"
},
inplace=True)
class_details_columns = ['Time', 'ClassCode', 'TeacherCode']
data_columns = [
'Time', 'ClassCode', 'StudentCode', 'ResultName', 'ResultGrade',
'ResultScore', 'Type', 'SubjectName', 'TeacherCode'
]
return cls(temp_df[data_columns], temp_df[class_details_columns])
@classmethod
def fromProgressReportsExport(
cls,
filename: str,
progress_report_items: list[str],
year: str = None,
term: str = None,
grade_score_mapper: Callable[[str], float] = None,
grade_dtype: pd.api.types.CategoricalDtype = None,
replace_values: dict[str, dict] = None) -> Reports:
"""Creates a new Reports instance from a Compass progress reports export."""
try:
temp_df = pd.read_csv(filename,
na_values=None,
keep_default_na=False)
except EmptyDataError:
return Reports()
temp_df.rename(columns={
"Id": "StudentCode",
"Subject": "ClassCode",
"Teacher": "TeacherCode"
},
inplace=True)
# unpivot progress report items
temp_df = temp_df.melt(
id_vars=["StudentCode", "ClassCode", "TeacherCode"],
value_vars=[
x for x in progress_report_items if x in temp_df.columns
],
var_name="ResultName",
value_name="ResultGrade")
if not year:
year_pattern = "(?P<year>2[0-9][0-9][0-9])"
m = re.search(year_pattern, filename)
if m:
year = m.group('year')
else:
raise ValueError("Could not determine year from filename.")
if not term:
term_pattern = "(?:[Tt][Ee][Rr][A-Za-z ]*)(?P<term>[1234])"
m = re.search(term_pattern, filename)
if m:
term = m.group('term')
else:
raise ValueError("Could not determine term from filename.")
time = cls._termDateMapper(year, term)
temp_df["Time"] = time
temp_df['Time'] = pd.to_datetime(temp_df['Time'])
temp_df["Type"] = "Work Habits"
if replace_values:
temp_df.replace(replace_values, inplace=True)
if grade_dtype:
temp_df["ResultGrade"] = temp_df["ResultGrade"].astype(grade_dtype)
temp_df.dropna(subset=["ResultGrade"], inplace=True)
if grade_score_mapper:
temp_df["ResultScore"] = temp_df["ResultGrade"].apply(
grade_score_mapper)
else:
temp_df["ResultScore"] = None
temp_df["SubjectName"] = None
class_details_columns = ['Time', 'ClassCode', 'TeacherCode']
data_columns = [
'Time', 'ClassCode', 'StudentCode', 'ResultName', 'ResultGrade',
'ResultScore', 'Type', 'SubjectName', 'TeacherCode'
]
return cls(temp_df[data_columns], temp_df[class_details_columns])
@classmethod
def _semesterDateMapper(cls, year: str, semester: str) -> str:
if semester in ["Semester One", "1", "One", "one"]:
return str(year) + "-06-30"
elif semester in ["Semester Two", "2", "Two", "two"]:
return str(year) + "-12-31"
else:
return str(year) + "-12-31"
@classmethod
def _termDateMapper(cls, year: str, term: str) -> str:
if term in ["1", "One", "one"]:
return str(year) + "-03-31"
elif term in ["2", "Two", "two"]:
return str(year) + "-06-30"
elif term in ["3", "Three", "three"]:
return str(year) + "-09-30"
elif term in ["4", "Four", "four"]:
return str(year) + "-12-31"
else:
return None
def addLearningTasksExport(
self,
filename: str,
grade_score_mapper: Callable[[str], float] = None,
grade_dtype: pd.api.types.CategoricalDtype = None,
replace_values: dict[str, dict] = None) -> None:
"""Adds data from a Compass Learning Tasks export."""
temp = Reports.fromLearningTasksExport(
filename,
grade_score_mapper=grade_score_mapper,
grade_dtype=grade_dtype,
replace_values=replace_values)
self.data = pd.concat([self.data, temp.data], ignore_index=True)
self.data.drop_duplicates(
subset=["Time", "StudentCode", "ClassCode", "ResultName"],
inplace=True)
self.class_details = pd.concat([
self.class_details,
temp.class_details.dropna(subset=["TeacherCode"])
],
ignore_index=True)
self.class_details.drop_duplicates(inplace=True)
def addReportsExport(self,
filename: str,
grade_score_mapper: Callable[[str], float] = None,
grade_dtype: pd.api.types.CategoricalDtype = None,
replace_values: dict[str, dict] = None) -> None:
"""Adds data from a Compass reports export."""
temp = Reports.fromReportsExport(filename,
grade_score_mapper=grade_score_mapper,
grade_dtype=grade_dtype,
replace_values=replace_values)
self.data = | pd.concat([self.data, temp.data], ignore_index=True) | pandas.concat |
"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from pandas._libs.tslibs import timezones
from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import pandas._testing as tm
from pandas.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=pd.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
date_range(start=pd.NaT, end="2016-01-01", freq="D")
def test_date_range_multiplication_overflow(self):
# GH#24255
# check that overflows in calculating `addend = periods * stride`
# are caught
with tm.assert_produces_warning(None):
# we should _not_ be seeing a overflow RuntimeWarning
dti = date_range(start="1677-09-22", periods=213503, freq="D")
assert dti[0] == Timestamp("1677-09-22")
assert len(dti) == 213503
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("1969-05-04", periods=200000000, freq="30000D")
def test_date_range_unsigned_overflow_handling(self):
# GH#24255
# case where `addend = periods * stride` overflows int64 bounds
# but not uint64 bounds
dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
dti2 = date_range(start=dti[0], periods=len(dti), freq="D")
assert dti2.equals(dti)
dti3 = date_range(end=dti[-1], periods=len(dti), freq="D")
assert dti3.equals(dti)
def test_date_range_int64_overflow_non_recoverable(self):
# GH#24255
# case with start later than 1970-01-01, overflow int64 but not uint64
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start="1970-02-01", periods=106752 * 24, freq="H")
# case with end before 1970-01-01, overflow int64 but not uint64
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1969-11-14", periods=106752 * 24, freq="H")
def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
# cases where stride * periods overflow int64 and stride/endpoint
# have different signs
start = Timestamp("2262-02-23")
end = Timestamp("1969-11-14")
expected = date_range(start=start, end=end, freq="-1H")
assert expected[0] == start
assert expected[-1] == end
dti = date_range(end=end, periods=len(expected), freq="-1H")
tm.assert_index_equal(dti, expected)
start2 = Timestamp("1970-02-01")
end2 = Timestamp("1677-10-22")
expected2 = date_range(start=start2, end=end2, freq="-1H")
assert expected2[0] == start2
assert expected2[-1] == end2
dti2 = date_range(start=start2, periods=len(expected2), freq="-1H")
tm.assert_index_equal(dti2, expected2)
def test_date_range_out_of_bounds(self):
# GH#14187
msg = "Cannot generate range"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("2016-01-01", periods=100000, freq="D")
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1763-10-12", periods=100000, freq="D")
def test_date_range_gen_error(self):
rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min")
assert len(rng) == 4
@pytest.mark.parametrize("freq", ["AS", "YS"])
def test_begin_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
freq=freq,
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["A", "Y"])
def test_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["BA", "BY"])
def test_business_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq(self):
# GH 11018
rng = date_range("2011-12-31", freq="-2A", periods=3)
exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2A"
rng = date_range("2011-01-31", freq="-2M", periods=3)
exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2M"
def test_date_range_bms_bug(self):
# #1645
rng = date_range("1/1/2000", periods=10, freq="BMS")
ex_first = Timestamp("2000-01-03")
assert rng[0] == ex_first
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq="2D")
offset = timedelta(2)
values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)
tm.assert_index_equal(rng, values)
rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B")
the_time = time(8, 15)
for val in rng:
assert val.time() == the_time
def test_date_range_fy5252(self):
dr = date_range(
start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"),
)
assert dr[0] == Timestamp("2013-01-31")
assert dr[1] == Timestamp("2014-01-30")
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start, end, periods=10, freq="s")
def test_date_range_convenience_periods(self):
# GH 20808
result = date_range("2018-04-24", "2018-04-27", periods=3)
expected = DatetimeIndex(
["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"],
freq=None,
)
tm.assert_index_equal(result, expected)
# Test if spacing remains linear if tz changes to dst in range
result = date_range(
"2018-04-01 01:00:00",
"2018-04-01 04:00:00",
tz="Australia/Sydney",
periods=3,
)
expected = DatetimeIndex(
[
Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"),
Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"),
Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"),
]
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start,end,result_tz",
[
["20180101", "20180103", "US/Eastern"],
[datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"],
[Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
"US/Eastern",
],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
None,
],
],
)
def test_date_range_linspacing_tz(self, start, end, result_tz):
# GH 20983
result = date_range(start, end, periods=3, tz=result_tz)
expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_date_range_businesshour(self):
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH")
rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
"2014-07-08 11:00",
"2014-07-08 12:00",
"2014-07-08 13:00",
"2014-07-08 14:00",
"2014-07-08 15:00",
"2014-07-08 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
def test_range_misspecified(self):
# GH #1095
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(periods=10)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(periods=10, freq="H")
with pytest.raises(ValueError, match=msg):
date_range()
def test_compat_replace(self):
# https://github.com/statsmodels/statsmodels/issues/3349
# replace should take ints/longs for compat
result = date_range(
Timestamp("1960-04-01 00:00:00", freq="QS-JAN"), periods=76, freq="QS-JAN"
)
assert len(result) == 76
def test_catch_infinite_loop(self):
offset = offsets.DateOffset(minute=5)
# blow up, don't loop forever
msg = "Offset <DateOffset: minute=5> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset)
@pytest.mark.parametrize("periods", (1, 2))
def test_wom_len(self, periods):
# https://github.com/pandas-dev/pandas/issues/20517
res = date_range(start="20110101", periods=periods, freq="WOM-1MON")
assert len(res) == periods
def test_construct_over_dst(self):
# GH 20854
pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=True
)
pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=False
)
expect_data = [
Timestamp("2010-11-07 00:00:00", tz="US/Pacific"),
pre_dst,
pst_dst,
]
expected = DatetimeIndex(expect_data, freq="H")
result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific")
| tm.assert_index_equal(result, expected) | pandas._testing.assert_index_equal |
import os
import pandas as pd
from pandas import DataFrame
from tqdm.autonotebook import tqdm
def group_seeds(dirname):
seeds = []
for f in os.listdir(dirname):
num, _ = f.split('.csv')
try:
num = int(num)
seeds.append(num)
except Exception:
pass
out = None
for seed in sorted(seeds):
df = | pd.read_csv(f'{dirname}/{seed}.csv') | pandas.read_csv |
import time
import numpy as np
import pandas as pd
import geopandas as gpd
import numpy as np
from sqlalchemy import extract, select, func
from sqlalchemy.sql import or_, and_
import datetime
from shapely.geometry import Point
from src.data.processing_func import (get_direction, extract_geo_sections)
def extract_jps(meta, date_begin, date_end, periods=None, weekends=False,
summary=False, skip=None, limit=20000, return_count=False):
start = time.time()
jps = meta.tables["JamPerSection"]
jam = meta.tables["Jam"]
sctn = meta.tables["Section"]
mongo_record = meta.tables["MongoRecord"]
query_count = select([func.count()])
query_all = select([mongo_record.c.MgrcDateStart,
jps.c.JpsId,
jps.c.SctnId,
jam.c.JamId,
jam.c.JamIndLevelOfTraffic,
jam.c.JamQtdLengthMeters,
jam.c.JamSpdMetersPerSecond,
jam.c.JamTimeDelayInSeconds,
jam.c.JamDscCoordinatesLonLat])
queries = [query_count, query_all]
queries = [q.select_from(mongo_record.join(jam.join(jps), isouter=True)).\
where(mongo_record.c.MgrcDateStart.between(date_begin, date_end)) for q in queries]
if not weekends:
queries = [q.where(extract("isodow", mongo_record.c.MgrcDateStart).in_(list(range(1,6))))
for q in queries]
if periods:
or_list=[]
for t in periods:
or_list.append(and_(extract("hour", mongo_record.c.MgrcDateStart)>=t[0],
extract("hour", mongo_record.c.MgrcDateStart)<t[1]
)
)
queries = [q.where(or_(*or_list)) for q in queries]
query_count, query_all = queries
if return_count:
size = query_count.execute().scalar()
return size
query_all = query_all.order_by(mongo_record.c.MgrcDateStart, jps.c.JpsId).offset(skip).limit(limit)
df_jps = | pd.read_sql(query_all, meta.bind) | pandas.read_sql |
#----------------------------------------------------------------------------------------------
####################
# IMPORT LIBRARIES #
####################
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import modelling as ml
import os
import altair as alt
import altair
import itertools
import statsmodels.api as sm
from scipy import stats
import sys
from streamlit import caching
import SessionState
import platform
import base64
from io import BytesIO
from pygam import LinearGAM, LogisticGAM, s
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from factor_analyzer import FactorAnalyzer
from factor_analyzer.factor_analyzer import calculate_bartlett_sphericity
from factor_analyzer.factor_analyzer import calculate_kmo
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
#sys.tracebacklimit = 0
# Show altair tooltip when full screen
st.markdown('<style>#vg-tooltip-element{z-index: 1000051}</style>',unsafe_allow_html=True)
#Session state
session_state = SessionState.get(id = 0)
# Analysis type
analysis_type = st.selectbox("What kind of analysis would you like to conduct?", ["Regression", "Multi-class classification", "Data decomposition"], key = session_state.id)
st.header("**Multivariate data**")
if analysis_type == "Regression":
st.markdown("Get your data ready for powerfull methods: Artificial Neural Networks, Boosted Regression Trees, Random Forest, Generalized Additive Models, Multiple Linear Regression, and Logistic Regression! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
if analysis_type == "Multi-class classification":
st.markdown("Get your data ready for powerfull multi-class classification methods! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
if analysis_type == "Data decomposition":
st.markdown("Decompose your data with Principal Component Analysis or Factor Analysis! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA IMPORT
# File upload section
df_dec = st.sidebar.radio("Get data", ["Use example dataset", "Upload data"])
uploaded_data=None
if df_dec == "Upload data":
#st.subheader("Upload your data")
#uploaded_data = st.sidebar.file_uploader("Make sure that dot (.) is a decimal separator!", type=["csv", "txt"])
separator_expander=st.sidebar.beta_expander('Upload settings')
with separator_expander:
a4,a5=st.beta_columns(2)
with a4:
dec_sep=a4.selectbox("Decimal sep.",['.',','], key = session_state.id)
with a5:
col_sep=a5.selectbox("Column sep.",[';', ',' , '|', '\s+', '\t','other'], key = session_state.id)
if col_sep=='other':
col_sep=st.text_input('Specify your column separator', key = session_state.id)
a4,a5=st.beta_columns(2)
with a4:
thousands_sep=a4.selectbox("Thousands x sep.",[None,'.', ' ','\s+', 'other'], key = session_state.id)
if thousands_sep=='other':
thousands_sep=st.text_input('Specify your thousands separator', key = session_state.id)
with a5:
encoding_val=a5.selectbox("Encoding",[None,'utf_8','utf_8_sig','utf_16_le','cp1140','cp1250','cp1251','cp1252','cp1253','cp1254','other'], key = session_state.id)
if encoding_val=='other':
encoding_val=st.text_input('Specify your encoding', key = session_state.id)
# Error handling for separator selection:
if dec_sep==col_sep:
st.sidebar.error("Decimal and column separators cannot be identical!")
elif dec_sep==thousands_sep:
st.sidebar.error("Decimal and thousands separators cannot be identical!")
elif col_sep==thousands_sep:
st.sidebar.error("Column and thousands separators cannot be identical!")
uploaded_data = st.sidebar.file_uploader("Default separators: decimal '.' | column ';'", type=["csv", "txt"])
if uploaded_data is not None:
df = pd.read_csv(uploaded_data, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
df_name=os.path.splitext(uploaded_data.name)[0]
st.sidebar.success('Loading data... done!')
elif uploaded_data is None:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
df = pd.read_csv("default data/WHR_2021.csv", sep = ";|,|\t",engine='python')
df_name="WHR_2021"
if analysis_type == "Multi-class classification":
df = pd.read_csv("default data/iris.csv", sep = ";|,|\t",engine='python')
df_name="iris"
else:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
df = pd.read_csv("default data/WHR_2021.csv", sep = ";|,|\t",engine='python')
df_name="WHR_2021"
if analysis_type == "Multi-class classification":
df = pd.read_csv("default data/iris.csv", sep = ";|,|\t",engine='python')
df_name="iris"
st.sidebar.markdown("")
#Basic data info
n_rows = df.shape[0]
n_cols = df.shape[1]
#++++++++++++++++++++++++++++++++++++++++++++
# SETTINGS
settings_expander=st.sidebar.beta_expander('Settings')
with settings_expander:
st.caption("**Precision**")
user_precision=st.number_input('Number of digits after the decimal point',min_value=0,max_value=10,step=1,value=4)
st.caption("**Help**")
sett_hints = st.checkbox('Show learning hints', value=False)
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False)
sett_theme = st.selectbox('Theme', ["Light", "Dark"])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
fc.theme_func_dl_button()
#++++++++++++++++++++++++++++++++++++++++++++
# RESET INPUT
reset_clicked = st.sidebar.button("Reset all your input")
if reset_clicked:
session_state.id = session_state.id + 1
st.sidebar.markdown("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA PREPROCESSING & VISUALIZATION
# Check if enough data is available
if n_rows > 0 and n_cols > 0:
st.empty()
else:
st.error("ERROR: Not enough data!")
return
data_exploration_container = st.beta_container()
with data_exploration_container:
st.header("**Data screening and processing**")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA SUMMARY
# Main panel for data summary (pre)
#----------------------------------
dev_expander_dsPre = st.beta_expander("Explore raw data info and stats ", expanded = False)
with dev_expander_dsPre:
# Default data description:
if uploaded_data == None:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
if st.checkbox("Show data description", value = False, key = session_state.id):
st.markdown("**Data source:**")
st.markdown("The data come from the Gallup World Poll surveys from 2018 to 2020. For more details see the [World Happiness Report 2021] (https://worldhappiness.report/).")
st.markdown("**Citation:**")
st.markdown("Helliwell, <NAME>., <NAME>, <NAME>, and <NAME>, eds. 2021. World Happiness Report 2021. New York: Sustainable Development Solutions Network.")
st.markdown("**Variables in the dataset:**")
col1,col2=st.beta_columns(2)
col1.write("Country")
col2.write("country name")
col1,col2=st.beta_columns(2)
col1.write("Year ")
col2.write("year ranging from 2005 to 2020")
col1,col2=st.beta_columns(2)
col1.write("Ladder")
col2.write("happiness score or subjective well-being with the best possible life being a 10, and the worst possible life being a 0")
col1,col2=st.beta_columns(2)
col1.write("Log GDP per capita")
col2.write("in purchasing power parity at constant 2017 international dollar prices")
col1,col2=st.beta_columns(2)
col1.write("Social support")
col2.write("the national average of the binary responses (either 0 or 1) to the question regarding relatives or friends to count on")
col1,col2=st.beta_columns(2)
col1.write("Healthy life expectancy at birth")
col2.write("based on the data extracted from the World Health Organization’s Global Health Observatory data repository")
col1,col2=st.beta_columns(2)
col1.write("Freedom to make life choices")
col2.write("national average of responses to the corresponding question")
col1,col2=st.beta_columns(2)
col1.write("Generosity")
col2.write("residual of regressing national average of response to the question regarding money donations in the past month on GDP per capita")
col1,col2=st.beta_columns(2)
col1.write("Perceptions of corruption")
col2.write("the national average of the survey responses to the corresponding question")
col1,col2=st.beta_columns(2)
col1.write("Positive affect")
col2.write("the average of three positive affect measures (happiness, laugh and enjoyment)")
col1,col2=st.beta_columns(2)
col1.write("Negative affect (worry, sadness and anger)")
col2.write("the average of three negative affect measures (worry, sadness and anger)")
st.markdown("")
if analysis_type == "Multi-class classification":
if st.checkbox("Show data description", value = False, key = session_state.id):
st.markdown("**Data source:**")
st.markdown("The data come from Fisher's Iris data set. See [here] (https://archive.ics.uci.edu/ml/datasets/iris) for more information.")
st.markdown("**Citation:**")
st.markdown("<NAME>. (1936). The use of multiple measurements in taxonomic problems. Annals of Eugenics, 7(2): 179–188. doi: [10.1111/j.1469-1809.1936.tb02137.x] (https://doi.org/10.1111%2Fj.1469-1809.1936.tb02137.x)")
st.markdown("**Variables in the dataset:**")
col1,col2=st.beta_columns(2)
col1.write("class_category")
col2.write("Numerical category for 'class': Iris Setosa (0), Iris Versicolour (1), and Iris Virginica (2)")
col1,col2=st.beta_columns(2)
col1.write("class")
col2.write("Iris Setosa, Iris Versicolour, and Iris Virginica")
col1,col2=st.beta_columns(2)
col1.write("sepal length")
col2.write("sepal length in cm")
col1,col2=st.beta_columns(2)
col1.write("sepal width")
col2.write("sepal width in cm")
col1,col2=st.beta_columns(2)
col1.write("petal length")
col2.write("petal length in cm")
col1,col2=st.beta_columns(2)
col1.write("petal width")
col2.write("petal width in cm")
st.markdown("")
# Show raw data & data info
df_summary = fc.data_summary(df)
if st.checkbox("Show raw data ", value = False, key = session_state.id):
st.write(df)
st.write("Data shape: ", n_rows, " rows and ", n_cols, " columns")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl=st.checkbox("Show duplicates and NAs info ", value = False, key = session_state.id)
if check_nasAnddupl:
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0])
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(np.where(df.isnull())[0])))))
# Show variable info
if st.checkbox('Show variable info ', value = False, key = session_state.id):
st.write(df_summary["Variable types"])
# Show summary statistics (raw data)
if st.checkbox('Show summary statistics (raw data) ', value = False, key = session_state.id):
st.write(df_summary["ALL"].style.set_precision(user_precision))
# Download link for summary statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_summary["Variable types"].to_excel(excel_file, sheet_name="variable_info")
df_summary["ALL"].to_excel(excel_file, sheet_name="summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Summary statistics__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
#++++++++++++++++++++++
# DATA PROCESSING
# Settings for data processing
#-------------------------------------
#st.subheader("Data processing")
dev_expander_dm_sb = st.beta_expander("Specify data processing preferences", expanded = False)
with dev_expander_dm_sb:
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
n_rows_wNAs_pre_processing = "No"
if n_rows_wNAs > 0:
n_rows_wNAs_pre_processing = "Yes"
a1, a2, a3 = st.beta_columns(3)
else: a1, a3 = st.beta_columns(2)
sb_DM_dImp_num = None
sb_DM_dImp_other = None
sb_DM_delRows=None
sb_DM_keepRows=None
with a1:
#--------------------------------------------------------------------------------------
# DATA CLEANING
st.markdown("**Data cleaning**")
# Delete rows
delRows =st.selectbox('Delete rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = session_state.id)
if delRows!='-':
if delRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
if (row_1 + 1) < row_2 :
sb_DM_delRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif delRows=='equal':
sb_DM_delRows = st.multiselect("to...", df.index, key = session_state.id)
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = session_state.id)
if delRows=='greater':
sb_DM_delRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.warning("WARNING: No row is deleted!")
elif delRows=='greater or equal':
sb_DM_delRows=df.index[df.index >= row_1]
if row_1 == 0:
st.error("ERROR: All rows are deleted!")
return
elif delRows=='smaller':
sb_DM_delRows=df.index[df.index < row_1]
if row_1 == 0:
st.warning("WARNING: No row is deleted!")
elif delRows=='smaller or equal':
sb_DM_delRows=df.index[df.index <= row_1]
if row_1 == len(df)-1:
st.error("ERROR: All rows are deleted!")
return
if sb_DM_delRows is not None:
df = df.loc[~df.index.isin(sb_DM_delRows)]
no_delRows=n_rows-df.shape[0]
# Keep rows
keepRows =st.selectbox('Keep rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = session_state.id)
if keepRows!='-':
if keepRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
if (row_1 + 1) < row_2 :
sb_DM_keepRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif keepRows=='equal':
sb_DM_keepRows = st.multiselect("to...", df.index, key = session_state.id)
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = session_state.id)
if keepRows=='greater':
sb_DM_keepRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.error("ERROR: No row is kept!")
return
elif keepRows=='greater or equal':
sb_DM_keepRows=df.index[df.index >= row_1]
if row_1 == 0:
st.warning("WARNING: All rows are kept!")
elif keepRows=='smaller':
sb_DM_keepRows=df.index[df.index < row_1]
if row_1 == 0:
st.error("ERROR: No row is kept!")
return
elif keepRows=='smaller or equal':
sb_DM_keepRows=df.index[df.index <= row_1]
if sb_DM_keepRows is not None:
df = df.loc[df.index.isin(sb_DM_keepRows)]
no_keptRows=df.shape[0]
# Delete columns
sb_DM_delCols = st.multiselect("Select columns to delete ", df.columns, key = session_state.id)
df = df.loc[:,~df.columns.isin(sb_DM_delCols)]
# Keep columns
sb_DM_keepCols = st.multiselect("Select columns to keep", df.columns, key = session_state.id)
if len(sb_DM_keepCols) > 0:
df = df.loc[:,df.columns.isin(sb_DM_keepCols)]
# Delete duplicates if any exist
if df[df.duplicated()].shape[0] > 0:
sb_DM_delDup = st.selectbox("Delete duplicate rows ", ["No", "Yes"], key = session_state.id)
if sb_DM_delDup == "Yes":
n_rows_dup = df[df.duplicated()].shape[0]
df = df.drop_duplicates()
elif df[df.duplicated()].shape[0] == 0:
sb_DM_delDup = "No"
# Delete rows with NA if any exist
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
if n_rows_wNAs > 0:
sb_DM_delRows_wNA = st.selectbox("Delete rows with NAs ", ["No", "Yes"], key = session_state.id)
if sb_DM_delRows_wNA == "Yes":
df = df.dropna()
elif n_rows_wNAs == 0:
sb_DM_delRows_wNA = "No"
# Filter data
st.markdown("**Data filtering**")
filter_var = st.selectbox('Filter your data by a variable...', list('-')+ list(df.columns), key = session_state.id)
if filter_var !='-':
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if df[filter_var].dtypes=="float64":
filter_format="%.8f"
else:
filter_format=None
user_filter=st.selectbox('Select values that are ...', options=['greater','greater or equal','smaller','smaller or equal', 'equal','between'], key = session_state.id)
if user_filter=='between':
filter_1=st.number_input('Lower limit is', format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
filter_2=st.number_input('Upper limit is', format=filter_format, value=df[filter_var].max(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
#reclassify values:
if filter_1 < filter_2 :
df = df[(df[filter_var] > filter_1) & (df[filter_var] < filter_2)]
if len(df) == 0:
st.error("ERROR: No data available for the selected limits!")
return
elif filter_1 >= filter_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif user_filter=='equal':
filter_1=st.multiselect('to... ', options=df[filter_var].values, key = session_state.id)
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
else:
filter_1=st.number_input('than... ',format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
#reclassify values:
if user_filter=='greater':
df = df[df[filter_var] > filter_1]
elif user_filter=='greater or equal':
df = df[df[filter_var] >= filter_1]
elif user_filter=='smaller':
df= df[df[filter_var]< filter_1]
elif user_filter=='smaller or equal':
df = df[df[filter_var] <= filter_1]
if len(df) == 0:
st.error("ERROR: No data available for the selected value!")
return
elif len(df) == n_rows:
st.warning("WARNING: Data are not filtered for this value!")
else:
filter_1=st.multiselect('Filter your data by a value...', (df[filter_var]).unique(), key = session_state.id)
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
if n_rows_wNAs_pre_processing == "Yes":
with a2:
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
# Select data imputation method (only if rows with NA not deleted)
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.markdown("**Data imputation**")
sb_DM_dImp_choice = st.selectbox("Replace entries with NA ", ["No", "Yes"], key = session_state.id)
if sb_DM_dImp_choice == "Yes":
# Numeric variables
sb_DM_dImp_num = st.selectbox("Imputation method for numeric variables ", ["Mean", "Median", "Random value"], key = session_state.id)
# Other variables
sb_DM_dImp_other = st.selectbox("Imputation method for other variables ", ["Mode", "Random value"], key = session_state.id)
df = fc.data_impute(df, sb_DM_dImp_num, sb_DM_dImp_other)
else:
st.markdown("**Data imputation**")
st.write("")
st.info("No NAs in data set!")
with a3:
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
st.markdown("**Data transformation**")
# Select columns for different transformation types
transform_options = df.select_dtypes([np.number]).columns
numCat_options = df.columns
sb_DM_dTrans_log = st.multiselect("Select columns to transform with log ", transform_options, key = session_state.id)
if sb_DM_dTrans_log is not None:
df = fc.var_transform_log(df, sb_DM_dTrans_log)
sb_DM_dTrans_sqrt = st.multiselect("Select columns to transform with sqrt ", transform_options, key = session_state.id)
if sb_DM_dTrans_sqrt is not None:
df = fc.var_transform_sqrt(df, sb_DM_dTrans_sqrt)
sb_DM_dTrans_square = st.multiselect("Select columns for squaring ", transform_options, key = session_state.id)
if sb_DM_dTrans_square is not None:
df = fc.var_transform_square(df, sb_DM_dTrans_square)
sb_DM_dTrans_cent = st.multiselect("Select columns for centering ", transform_options, key = session_state.id)
if sb_DM_dTrans_cent is not None:
df = fc.var_transform_cent(df, sb_DM_dTrans_cent)
sb_DM_dTrans_stand = st.multiselect("Select columns for standardization ", transform_options, key = session_state.id)
if sb_DM_dTrans_stand is not None:
df = fc.var_transform_stand(df, sb_DM_dTrans_stand)
sb_DM_dTrans_norm = st.multiselect("Select columns for normalization ", transform_options, key = session_state.id)
if sb_DM_dTrans_norm is not None:
df = fc.var_transform_norm(df, sb_DM_dTrans_norm)
sb_DM_dTrans_numCat = st.multiselect("Select columns for numeric categorization ", numCat_options, key = session_state.id)
if sb_DM_dTrans_numCat:
if not df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist():
sb_DM_dTrans_numCat_sel = st.multiselect("Select variables for manual categorization ", sb_DM_dTrans_numCat, key = session_state.id)
if sb_DM_dTrans_numCat_sel:
for var in sb_DM_dTrans_numCat_sel:
if df[var].unique().size > 5:
st.error("ERROR: Selected variable has too many categories (>5): " + str(var))
return
else:
manual_cats = pd.DataFrame(index = range(0, df[var].unique().size), columns=["Value", "Cat"])
text = "Category for "
# Save manually selected categories
for i in range(0, df[var].unique().size):
text1 = text + str(var) + ": " + str(sorted(df[var].unique())[i])
man_cat = st.number_input(text1, value = 0, min_value=0, key = session_state.id)
manual_cats.loc[i]["Value"] = sorted(df[var].unique())[i]
manual_cats.loc[i]["Cat"] = man_cat
new_var_name = "numCat_" + var
new_var = pd.DataFrame(index = df.index, columns = [new_var_name])
for c in df[var].index:
if pd.isnull(df[var][c]) == True:
new_var.loc[c, new_var_name] = np.nan
elif pd.isnull(df[var][c]) == False:
new_var.loc[c, new_var_name] = int(manual_cats[manual_cats["Value"] == df[var][c]]["Cat"])
df[new_var_name] = new_var.astype('int64')
# Exclude columns with manual categorization from standard categorization
numCat_wo_manCat = [var for var in sb_DM_dTrans_numCat if var not in sb_DM_dTrans_numCat_sel]
df = fc.var_transform_numCat(df, numCat_wo_manCat)
else:
df = fc.var_transform_numCat(df, sb_DM_dTrans_numCat)
else:
col_with_na = df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist()
st.error("ERROR: Please select columns without NAs: " + ', '.join(map(str,col_with_na)))
return
else:
sb_DM_dTrans_numCat = None
sb_DM_dTrans_mult = st.number_input("Number of variable multiplications ", value = 0, min_value=0, key = session_state.id)
if sb_DM_dTrans_mult != 0:
multiplication_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_mult), columns=["Var1", "Var2"])
text = "Multiplication pair"
for i in range(0, sb_DM_dTrans_mult):
text1 = text + " " + str(i+1)
text2 = text + " " + str(i+1) + " "
mult_var1 = st.selectbox(text1, transform_options, key = session_state.id)
mult_var2 = st.selectbox(text2, transform_options, key = session_state.id)
multiplication_pairs.loc[i]["Var1"] = mult_var1
multiplication_pairs.loc[i]["Var2"] = mult_var2
fc.var_transform_mult(df, mult_var1, mult_var2)
sb_DM_dTrans_div = st.number_input("Number of variable divisions ", value = 0, min_value=0, key = session_state.id)
if sb_DM_dTrans_div != 0:
division_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_div), columns=["Var1", "Var2"])
text = "Division pair"
for i in range(0, sb_DM_dTrans_div):
text1 = text + " " + str(i+1) + " (numerator)"
text2 = text + " " + str(i+1) + " (denominator)"
div_var1 = st.selectbox(text1, transform_options, key = session_state.id)
div_var2 = st.selectbox(text2, transform_options, key = session_state.id)
division_pairs.loc[i]["Var1"] = div_var1
division_pairs.loc[i]["Var2"] = div_var2
fc.var_transform_div(df, div_var1, div_var2)
data_transform=st.checkbox("Transform data in Excel?", value=False)
if data_transform==True:
st.info("Press the button to open your data in Excel. Don't forget to save your result as a csv or a txt file!")
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="data",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Data_transformation__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Transform your data in Excel</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# PROCESSING SUMMARY
if st.checkbox('Show a summary of my data processing preferences ', value = False, key = session_state.id):
st.markdown("Summary of data changes:")
#--------------------------------------------------------------------------------------
# DATA CLEANING
# Rows
if sb_DM_delRows is not None and delRows!='-' :
if no_delRows > 1:
st.write("-", no_delRows, " rows were deleted!")
elif no_delRows == 1:
st.write("-",no_delRows, " row was deleted!")
elif no_delRows == 0:
st.write("- No row was deleted!")
else:
st.write("- No row was deleted!")
if sb_DM_keepRows is not None and keepRows!='-' :
if no_keptRows > 1:
st.write("-", no_keptRows, " rows are kept!")
elif no_keptRows == 1:
st.write("-",no_keptRows, " row is kept!")
elif no_keptRows == 0:
st.write("- All rows are kept!")
else:
st.write("- All rows are kept!")
# Columns
if len(sb_DM_delCols) > 1:
st.write("-", len(sb_DM_delCols), " columns were deleted:", ', '.join(sb_DM_delCols))
elif len(sb_DM_delCols) == 1:
st.write("-",len(sb_DM_delCols), " column was deleted:", str(sb_DM_delCols[0]))
elif len(sb_DM_delCols) == 0:
st.write("- No column was deleted!")
if len(sb_DM_keepCols) > 1:
st.write("-", len(sb_DM_keepCols), " columns are kept:", ', '.join(sb_DM_keepCols))
elif len(sb_DM_keepCols) == 1:
st.write("-",len(sb_DM_keepCols), " column is kept:", str(sb_DM_keepCols[0]))
elif len(sb_DM_keepCols) == 0:
st.write("- All columns are kept!")
# Duplicates
if sb_DM_delDup == "Yes":
if n_rows_dup > 1:
st.write("-", n_rows_dup, " duplicate rows were deleted!")
elif n_rows_dup == 1:
st.write("-", n_rows_dup, "duplicate row was deleted!")
else:
st.write("- No duplicate row was deleted!")
# NAs
if sb_DM_delRows_wNA == "Yes":
if n_rows_wNAs > 1:
st.write("-", n_rows_wNAs, "rows with NAs were deleted!")
elif n_rows_wNAs == 1:
st.write("-", n_rows - n_rows_wNAs, "row with NAs was deleted!")
else:
st.write("- No row with NAs was deleted!")
# Filter
if filter_var != "-":
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if isinstance(filter_1, list):
if len(filter_1) == 0:
st.write("-", " Data was not filtered!")
elif len(filter_1) > 0:
st.write("-", " Data filtered by:", str(filter_var))
elif filter_1 is not None:
st.write("-", " Data filtered by:", str(filter_var))
else:
st.write("-", " Data was not filtered!")
elif len(filter_1)>0:
st.write("-", " Data filtered by:", str(filter_var))
elif len(filter_1) == 0:
st.write("-", " Data was not filtered!")
else:
st.write("-", " Data was not filtered!")
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.write("- Data imputation method for numeric variables:", sb_DM_dImp_num)
st.write("- Data imputation method for other variable types:", sb_DM_dImp_other)
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
# log
if len(sb_DM_dTrans_log) > 1:
st.write("-", len(sb_DM_dTrans_log), " columns were log-transformed:", ', '.join(sb_DM_dTrans_log))
elif len(sb_DM_dTrans_log) == 1:
st.write("-",len(sb_DM_dTrans_log), " column was log-transformed:", sb_DM_dTrans_log[0])
elif len(sb_DM_dTrans_log) == 0:
st.write("- No column was log-transformed!")
# sqrt
if len(sb_DM_dTrans_sqrt) > 1:
st.write("-", len(sb_DM_dTrans_sqrt), " columns were sqrt-transformed:", ', '.join(sb_DM_dTrans_sqrt))
elif len(sb_DM_dTrans_sqrt) == 1:
st.write("-",len(sb_DM_dTrans_sqrt), " column was sqrt-transformed:", sb_DM_dTrans_sqrt[0])
elif len(sb_DM_dTrans_sqrt) == 0:
st.write("- No column was sqrt-transformed!")
# square
if len(sb_DM_dTrans_square) > 1:
st.write("-", len(sb_DM_dTrans_square), " columns were squared:", ', '.join(sb_DM_dTrans_square))
elif len(sb_DM_dTrans_square) == 1:
st.write("-",len(sb_DM_dTrans_square), " column was squared:", sb_DM_dTrans_square[0])
elif len(sb_DM_dTrans_square) == 0:
st.write("- No column was squared!")
# centering
if len(sb_DM_dTrans_cent) > 1:
st.write("-", len(sb_DM_dTrans_cent), " columns were centered:", ', '.join(sb_DM_dTrans_cent))
elif len(sb_DM_dTrans_cent) == 1:
st.write("-",len(sb_DM_dTrans_cent), " column was centered:", sb_DM_dTrans_cent[0])
elif len(sb_DM_dTrans_cent) == 0:
st.write("- No column was centered!")
# standardize
if len(sb_DM_dTrans_stand) > 1:
st.write("-", len(sb_DM_dTrans_stand), " columns were standardized:", ', '.join(sb_DM_dTrans_stand))
elif len(sb_DM_dTrans_stand) == 1:
st.write("-",len(sb_DM_dTrans_stand), " column was standardized:", sb_DM_dTrans_stand[0])
elif len(sb_DM_dTrans_stand) == 0:
st.write("- No column was standardized!")
# normalize
if len(sb_DM_dTrans_norm) > 1:
st.write("-", len(sb_DM_dTrans_norm), " columns were normalized:", ', '.join(sb_DM_dTrans_norm))
elif len(sb_DM_dTrans_norm) == 1:
st.write("-",len(sb_DM_dTrans_norm), " column was normalized:", sb_DM_dTrans_norm[0])
elif len(sb_DM_dTrans_norm) == 0:
st.write("- No column was normalized!")
# numeric category
if sb_DM_dTrans_numCat is not None:
if len(sb_DM_dTrans_numCat) > 1:
st.write("-", len(sb_DM_dTrans_numCat), " columns were transformed to numeric categories:", ', '.join(sb_DM_dTrans_numCat))
elif len(sb_DM_dTrans_numCat) == 1:
st.write("-",len(sb_DM_dTrans_numCat), " column was transformed to numeric categories:", sb_DM_dTrans_numCat[0])
elif sb_DM_dTrans_numCat is None:
st.write("- No column was transformed to numeric categories!")
# multiplication
if sb_DM_dTrans_mult != 0:
st.write("-", "Number of variable multiplications: ", sb_DM_dTrans_mult)
elif sb_DM_dTrans_mult == 0:
st.write("- No variables were multiplied!")
# division
if sb_DM_dTrans_div != 0:
st.write("-", "Number of variable divisions: ", sb_DM_dTrans_div)
elif sb_DM_dTrans_div == 0:
st.write("- No variables were divided!")
st.write("")
st.write("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# UPDATED DATA SUMMARY
# Show only if changes were made
if any(v for v in [sb_DM_delCols, sb_DM_dImp_num, sb_DM_dImp_other, sb_DM_dTrans_log, sb_DM_dTrans_sqrt, sb_DM_dTrans_square, sb_DM_dTrans_cent, sb_DM_dTrans_stand, sb_DM_dTrans_norm, sb_DM_dTrans_numCat ] if v is not None) or sb_DM_delDup == "Yes" or sb_DM_delRows_wNA == "Yes" or sb_DM_dTrans_mult != 0 or sb_DM_dTrans_div != 0 or filter_var != "-" or delRows!='-' or keepRows!='-' or len(sb_DM_keepCols) > 0:
dev_expander_dsPost = st.beta_expander("Explore cleaned and transformed data info and stats ", expanded = False)
with dev_expander_dsPost:
if df.shape[1] > 0 and df.shape[0] > 0:
# Show cleaned and transformed data & data info
df_summary_post = fc.data_summary(df)
if st.checkbox("Show cleaned and transformed data ", value = False, key = session_state.id):
n_rows_post = df.shape[0]
n_cols_post = df.shape[1]
st.dataframe(df)
st.write("Data shape: ", n_rows_post, "rows and ", n_cols_post, "columns")
# Download transformed data:
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="Clean. and transf. data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "CleanedTransfData__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned and transformed data</a>
""",
unsafe_allow_html=True)
st.write("")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl2 = st.checkbox("Show duplicates and NAs info (processed) ", value = False, key = session_state.id)
if check_nasAnddupl2:
index_c = []
for c in df.columns:
for r in df.index:
if pd.isnull(df[c][r]):
index_c.append(r)
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", len(pd.unique(sorted(index_c))))
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(sorted(index_c))))))
# Show cleaned and transformed variable info
if st.checkbox("Show cleaned and transformed variable info ", value = False, key = session_state.id):
st.write(df_summary_post["Variable types"])
# Show summary statistics (cleaned and transformed data)
if st.checkbox('Show summary statistics (cleaned and transformed data) ', value = False, key = session_state.id):
st.write(df_summary_post["ALL"].style.set_precision(user_precision))
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="cleaned_data")
df_summary_post["Variable types"].to_excel(excel_file, sheet_name="cleaned_variable_info")
df_summary_post["ALL"].to_excel(excel_file, sheet_name="cleaned_summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Cleaned data summary statistics_multi_" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned data summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
else:
st.error("ERROR: No data available for preprocessing!")
return
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA VISUALIZATION
data_visualization_container = st.beta_container()
with data_visualization_container:
st.write("")
st.write("")
st.header("**Data visualization**")
dev_expander_dv = st.beta_expander("Explore visualization types ", expanded = False)
with dev_expander_dv:
if df.shape[1] > 0 and df.shape[0] > 0:
st.write('**Variable selection**')
varl_sel_options = df.columns
var_sel = st.selectbox('Select variable for visualizations', varl_sel_options, key = session_state.id)
if df[var_sel].dtypes == "float64" or df[var_sel].dtypes == "float32" or df[var_sel].dtypes == "int64" or df[var_sel].dtypes == "int32":
a4, a5 = st.beta_columns(2)
with a4:
st.write('**Scatterplot with LOESS line**')
yy_options = df.columns
yy = st.selectbox('Select variable for y-axis', yy_options, key = session_state.id)
if df[yy].dtypes == "float64" or df[yy].dtypes == "float32" or df[yy].dtypes == "int64" or df[yy].dtypes == "int32":
fig_data = pd.DataFrame()
fig_data[yy] = df[yy]
fig_data[var_sel] = df[var_sel]
fig_data["Index"] = df.index
fig = alt.Chart(fig_data).mark_circle().encode(
x = alt.X(var_sel, scale = alt.Scale(domain = [min(fig_data[var_sel]), max(fig_data[var_sel])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(yy, scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [yy, var_sel, "Index"]
)
st.altair_chart(fig + fig.transform_loess(var_sel, yy).mark_line(size = 2, color = "darkred"), use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_scatterplot")))
else: st.error("ERROR: Please select a numeric variable for the y-axis!")
with a5:
st.write('**Histogram**')
binNo = st.slider("Select maximum number of bins", 5, 100, 25, key = session_state.id)
fig2 = alt.Chart(df).mark_bar().encode(
x = alt.X(var_sel, title = var_sel + " (binned)", bin = alt.BinParams(maxbins = binNo), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip(var_sel, bin = alt.BinParams(maxbins = binNo))]
)
st.altair_chart(fig2, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_histogram")))
a6, a7 = st.beta_columns(2)
with a6:
st.write('**Boxplot**')
# Boxplot
boxplot_data = pd.DataFrame()
boxplot_data[var_sel] = df[var_sel]
boxplot_data["Index"] = df.index
boxplot = alt.Chart(boxplot_data).mark_boxplot(size = 100, color = "#1f77b4", median = dict(color = "darkred"),).encode(
y = alt.Y(var_sel, scale = alt.Scale(zero = False)),
tooltip = [var_sel, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(boxplot, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_boxplot")))
with a7:
st.write("**QQ-plot**")
var_values = df[var_sel]
qqplot_data = pd.DataFrame()
qqplot_data[var_sel] = var_values
qqplot_data["Index"] = df.index
qqplot_data = qqplot_data.sort_values(by = [var_sel])
qqplot_data["Theoretical quantiles"] = stats.probplot(var_values, dist="norm")[0][0]
qqplot = alt.Chart(qqplot_data).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qqplot_data["Theoretical quantiles"]), max(qqplot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(var_sel, title = str(var_sel), scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [var_sel, "Theoretical quantiles", "Index"]
)
st.altair_chart(qqplot + qqplot.transform_regression('Theoretical quantiles', var_sel).mark_line(size = 2, color = "darkred"), use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("dv_qqplot")))
else: st.error("ERROR: Please select a numeric variable!")
else: st.error("ERROR: No data available for Data Visualization!")
# scatter matrix
#Check If variables are numeric
num_cols=[]
for column in df:
if df[column].dtypes in ('float', 'float64', 'int','int64'):
num_cols.append(column)
if len(num_cols)>1:
show_scatter_matrix=st.checkbox('Show scatter matrix',value=False,key= session_state.id)
if show_scatter_matrix==True:
multi_var_sel = st.multiselect('Select variables for scatter matrix', num_cols, num_cols, key = session_state.id)
if len(multi_var_sel)<2:
st.error("ERROR: Please choose at least two variables fro a scatterplot")
else:
#Plot scatter matrix:
scatter_matrix=alt.Chart(df[multi_var_sel]).mark_circle().encode(
x=alt.X(alt.repeat("column"), type='quantitative'),
y=alt.Y(alt.repeat("row"), type='quantitative')
).properties(
width=150,
height=150
).repeat(
row=multi_var_sel,
column=multi_var_sel
).interactive()
st.altair_chart(scatter_matrix, use_container_width=True)
#------------------------------------------------------------------------------------------
# REGRESSION
if analysis_type == "Regression":
#++++++++++++++++++++++++++++++++++++++++++++
# MACHINE LEARNING (PREDICTIVE DATA ANALYSIS)
st.write("")
st.write("")
data_machinelearning_container = st.beta_container()
with data_machinelearning_container:
st.header("**Multivariate data modelling**")
st.markdown("Go for creating predictive models of your data using classical and machine learning techniques! STATY will take care of the modelling for you, so you can put your focus on results interpretation and communication! ")
ml_settings = st.beta_expander("Specify models ", expanded = False)
with ml_settings:
# Initial status for running models
run_models = False
sb_ML_alg = "NA"
do_hypTune = "No"
do_modval = "No"
do_hypTune_no = "No hyperparameter tuning"
final_hyPara_values="None"
model_val_results = None
model_full_results = None
gam_finalPara = None
brt_finalPara = None
brt_tuning_results = None
rf_finalPara = None
rf_tuning_results = None
ann_finalPara = None
ann_tuning_results = None
MLR_intercept = None
MLR_cov_type = None
MLR_finalPara = None
MLR_model = "OLS"
LR_cov_type = None
LR_finalPara = None
LR_finalPara = None
if df.shape[1] > 0 and df.shape[0] > 0:
#--------------------------------------------------------------------------------------
# GENERAL SETTINGS
st.markdown("**Variable selection**")
# Variable categories
df_summary_model = fc.data_summary(df)
var_cat = df_summary_model["Variable types"].loc["category"]
# Response variable
response_var_options = df.columns
response_var = st.selectbox("Select response variable", response_var_options, key = session_state.id)
# Check if response variable is numeric and has no NAs
response_var_message_num = False
response_var_message_na = False
response_var_message_cat = False
if var_cat.loc[response_var] == "string/binary" or var_cat.loc[response_var] == "bool/binary":
response_var_message_num = "ERROR: Please transform the binary response variable into a numeric binary categorization in data processing preferences!"
elif var_cat.loc[response_var] == "string/categorical" or var_cat.loc[response_var] == "other" or var_cat.loc[response_var] == "string/single":
response_var_message_num = "ERROR: Please select a numeric or binary response variable!"
elif var_cat.loc[response_var] == "categorical":
response_var_message_cat = "WARNING: Non-continuous variables are treated as continuous!"
if response_var_message_num != False:
st.error(response_var_message_num)
if response_var_message_cat != False:
st.warning(response_var_message_cat)
# Continue if everything is clean for response variable
if response_var_message_num == False and response_var_message_na == False:
# Select explanatory variables
expl_var_options = df.columns
expl_var_options = expl_var_options[expl_var_options.isin(df.drop(response_var, axis = 1).columns)]
expl_var = st.multiselect("Select explanatory variables", expl_var_options, key = session_state.id)
var_list = list([response_var]) + list(expl_var)
# Check if explanatory variables are numeric
expl_var_message_num = False
expl_var_message_na = False
if any(a for a in df[expl_var].dtypes if a != "float64" and a != "float32" and a != "int64" and a != "int64"):
expl_var_not_num = df[expl_var].select_dtypes(exclude=["int64", "int32", "float64", "float32"]).columns
expl_var_message_num = "ERROR: Please exclude non-numeric variables: " + ', '.join(map(str,list(expl_var_not_num)))
# Check if NAs are present and delete them automatically (delete before run models button)
if np.where(df[var_list].isnull())[0].size > 0:
st.warning("WARNING: Your modelling data set includes NAs. Rows with NAs are automatically deleted!")
if expl_var_message_num != False:
st.error(expl_var_message_num)
elif expl_var_message_na != False:
st.error(expl_var_message_na)
# Continue if everything is clean for explanatory variables and at least one was selected
elif expl_var_message_num == False and expl_var_message_na == False and len(expl_var) > 0:
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.markdown("**Specify modelling algorithms**")
# Select algorithms based on chosen response variable
# Binary (has to be integer or float)
if var_cat.loc[response_var] == "binary":
algorithms = ["Multiple Linear Regression", "Logistic Regression", "Generalized Additive Models", "Random Forest", "Boosted Regression Trees", "Artificial Neural Networks"]
response_var_type = "binary"
# Multi-class (has to be integer, currently treated as continuous response)
elif var_cat.loc[response_var] == "categorical":
algorithms = ["Multiple Linear Regression", "Generalized Additive Models", "Random Forest", "Boosted Regression Trees", "Artificial Neural Networks"]
response_var_type = "continuous"
# Continuous
elif var_cat.loc[response_var] == "numeric":
algorithms = ["Multiple Linear Regression", "Generalized Additive Models", "Random Forest", "Boosted Regression Trees", "Artificial Neural Networks"]
response_var_type = "continuous"
alg_list = list(algorithms)
sb_ML_alg = st.multiselect("Select modelling techniques", alg_list, alg_list)
# MLR + binary info message
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression") and response_var_type == "binary":
st.warning("WARNING: For Multiple Linear Regression only the full model output will be determined.")
st.markdown("**Model-specific settings**")
# Multiple Linear Regression settings
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
MLR_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "covType"])
MLR_intercept = "Yes"
MLR_cov_type = "non-robust"
MLR_finalPara["intercept"] = MLR_intercept
MLR_finalPara["covType"] = MLR_cov_type
if st.checkbox("Adjust settings for Multiple Linear Regression"):
col1, col2 = st.beta_columns(2)
with col1:
MLR_intercept = st.selectbox("Include intercept", ["Yes", "No"])
with col2:
MLR_cov_type = st.selectbox("Covariance type", ["non-robust", "HC0", "HC1", "HC2", "HC3"])
MLR_finalPara["intercept"] = MLR_intercept
MLR_finalPara["covType"] = MLR_cov_type
st.write("")
# Logistic Regression settings
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
LR_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "covType"])
LR_intercept = "Yes"
LR_cov_type = "non-robust"
LR_finalPara["intercept"] = LR_intercept
LR_finalPara["covType"] = LR_cov_type
if st.checkbox("Adjust settings for Logistic Regression"):
col1, col2 = st.beta_columns(2)
with col1:
LR_intercept = st.selectbox("Include intercept ", ["Yes", "No"])
with col2:
LR_cov_type = st.selectbox("Covariance type", ["non-robust", "HC0"])
LR_finalPara["intercept"] = LR_intercept
LR_finalPara["covType"] = LR_cov_type
st.write("")
# Generalized Additive Models settings
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
gam_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "number of splines", "spline order", "lambda"])
gam_finalPara["intercept"] = "Yes"
gam_finalPara["number of splines"] = 20
gam_finalPara["spline order"] = 3
gam_finalPara["lambda"] = 0.6
gam_lam_search = "No"
if st.checkbox("Adjust settings for Generalized Additive Models"):
gam_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "number of splines", "spline order", "lambda"])
col1, col2 = st.beta_columns(2)
with col1:
gam_intercept = st.selectbox("Include intercept ", ["Yes", "No"])
gam_finalPara["intercept"] = gam_intercept
with col2:
gam_lam_search = st.selectbox("Search for lambda ", ["No", "Yes"])
if gam_lam_search == "Yes":
ls_col1, ls_col2, ls_col3 = st.beta_columns(3)
with ls_col1:
ls_min = st.number_input("Minimum lambda value", value=0.001, step=1e-3, min_value=0.001, format="%.3f")
with ls_col2:
ls_max = st.number_input("Maximum lambda value", value=100.000, step=1e-3, min_value=0.002, format="%.3f")
with ls_col3:
ls_number = st.number_input("Lambda values per variable", value=50, min_value=2)
if ls_number**len(expl_var) > 10000:
st.warning("WARNING: Your grid has " + str(ls_number**len(expl_var)) + " combinations. Please note that searching for lambda will take a lot of time!")
else:
st.info("Your grid has " + str(ls_number**len(expl_var)) + " combinations.")
if gam_lam_search == "No":
gam_col1, gam_col2, gam_col3 = st.beta_columns(3)
if gam_lam_search == "Yes":
gam_col1, gam_col2= st.beta_columns(2)
gam_nos_values = []
gam_so_values = []
gam_lam_values = []
for gset in range(0,len(expl_var)):
var_name = expl_var[gset]
with gam_col1:
nos = st.number_input("Number of splines (" + var_name + ")", value = 20, min_value=1)
gam_nos_values.append(nos)
with gam_col2:
so = st.number_input("Spline order (" + var_name + ")", value = 3, min_value=3)
gam_so_values.append(so)
if gam_lam_search == "No":
with gam_col3:
lam = st.number_input("Lambda (" + var_name + ")", value = 0.6, min_value=0.001, step=1e-3, format="%.3f")
gam_lam_values.append(lam)
if nos <= so:
st.error("ERROR: Please make sure that the number of splines is greater than the spline order for "+ str(expl_var[gset]) + "!")
return
if gam_lam_search == "Yes":
lam = np.round(np.linspace(ls_min, ls_max, ls_number),3)
if len(expl_var) == 1:
gam_lam_values = lam
else:
gam_lam_values = [lam] * len(expl_var)
gam_finalPara.at["value", "number of splines"] = gam_nos_values
gam_finalPara.at["value","spline order"] = gam_so_values
gam_finalPara.at["value","lambda"] = gam_lam_values
st.write("")
# Save hyperparameter values for machine learning methods
final_hyPara_values = {}
# Random Forest settings
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_finalPara["number of trees"] = [100]
rf_finalPara["maximum tree depth"] = [None]
rf_finalPara["maximum number of features"] = [len(expl_var)]
rf_finalPara["sample rate"] = [0.99]
final_hyPara_values["rf"] = rf_finalPara
if st.checkbox("Adjust settings for Random Forest "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
rf_finalPara["number of trees"] = st.number_input("Number of trees", value=100, step=1, min_value=1)
with col3:
rf_mtd_sel = st.selectbox("Specify maximum tree depth ", ["No", "Yes"])
if rf_mtd_sel == "No":
rf_finalPara["maximum tree depth"] = [None]
if rf_mtd_sel == "Yes":
rf_finalPara["maximum tree depth"] = st.slider("Maximum tree depth ", value=20, step=1, min_value=1, max_value=50)
if len(expl_var) >1:
with col4:
rf_finalPara["maximum number of features"] = st.slider("Maximum number of features ", value=len(expl_var), step=1, min_value=1, max_value=len(expl_var))
with col2:
rf_finalPara["sample rate"] = st.slider("Sample rate ", value=0.99, step=0.01, min_value=0.5, max_value=0.99)
else:
with col2:
rf_finalPara["sample rate"] = st.slider("Sample rate ", value=0.99, step=0.01, min_value=0.5, max_value=0.99)
final_hyPara_values["rf"] = rf_finalPara
st.write("")
# Boosted Regression Trees settings
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "learning rate", "maximum tree depth", "sample rate"])
brt_finalPara["number of trees"] = [100]
brt_finalPara["learning rate"] = [0.1]
brt_finalPara["maximum tree depth"] = [3]
brt_finalPara["sample rate"] = [1]
final_hyPara_values["brt"] = brt_finalPara
if st.checkbox("Adjust settings for Boosted Regression Trees "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
brt_finalPara["number of trees"] = st.number_input("Number of trees ", value=100, step=1, min_value=1)
with col2:
brt_finalPara["learning rate"] = st.slider("Learning rate ", value=0.1, min_value=0.001, max_value=0.1 , step=1e-3, format="%.3f")
with col3:
brt_finalPara["maximum tree depth"] = st.slider("Maximum tree depth ", value=3, step=1, min_value=1, max_value=30)
with col4:
brt_finalPara["sample rate"] = st.slider("Sample rate ", value=1.0, step=0.01, min_value=0.5, max_value=1.0)
final_hyPara_values["brt"] = brt_finalPara
st.write("")
# Artificial Neural Networks settings
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_finalPara = pd.DataFrame(index = ["value"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "hidden layer sizes", "learning rate", "L² regularization"])
ann_finalPara["weight optimization solver"] = ["adam"]
ann_finalPara["maximum number of iterations"] = [200]
ann_finalPara["activation function"] = ["relu"]
ann_finalPara["hidden layer sizes"] = [(100,)]
ann_finalPara["learning rate"] = [0.001]
ann_finalPara["L² regularization"] = [0.0001]
final_hyPara_values["ann"] = ann_finalPara
if st.checkbox("Adjust settings for Artificial Neural Networks "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
col5, col6 = st.beta_columns(2)
with col1:
ann_finalPara["weight optimization solver"] = st.selectbox("Weight optimization solver ", ["adam"])
with col2:
ann_finalPara["activation function"] = st.selectbox("Activation function ", ["relu", "identity", "logistic", "tanh"])
with col3:
ann_finalPara["maximum number of iterations"] = st.slider("Maximum number of iterations ", value=200, step=1, min_value=10, max_value=1000)
with col4:
ann_finalPara["learning rate"] = st.slider("Learning rate ", min_value=0.0001, max_value=0.01, value=0.001, step=1e-4, format="%.4f")
with col5:
number_hidden_layers = st.selectbox("Number of hidden layers", [1, 2, 3])
if number_hidden_layers == 1:
number_nodes1 = st.slider("Number of nodes in hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,)]
if number_hidden_layers == 2:
number_nodes1 = st.slider("Number of neurons in first hidden layer", 5, 500, 100)
number_nodes2 = st.slider("Number of neurons in second hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,number_nodes2,)]
if number_hidden_layers == 3:
number_nodes1 = st.slider("Number of neurons in first hidden layer", 5, 500, 100)
number_nodes2 = st.slider("Number of neurons in second hidden layer", 5, 500, 100)
number_nodes3 = st.slider("Number of neurons in third hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,number_nodes2,number_nodes3,)]
with col6:
ann_finalPara["L² regularization"] = st.slider("L² regularization ", min_value=0.00001, max_value=0.001, value=0.0001, step=1e-5, format="%.5f")
#--------------------------------------------------------------------------------------
# HYPERPARAMETER TUNING SETTINGS
if len(sb_ML_alg) >= 1:
# Depending on algorithm selection different hyperparameter settings are shown
if any(a for a in sb_ML_alg if a == "Random Forest") or any(a for a in sb_ML_alg if a == "Boosted Regression Trees") or any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
# General settings
st.markdown("**Hyperparameter-tuning settings**")
do_hypTune = st.selectbox("Use hyperparameter-tuning", ["No", "Yes"])
# Save hyperparameter values for all algorithms
hyPara_values = {}
# No hyperparameter-tuning
if do_hypTune == "No":
do_hypTune_no = "Default hyperparameter values are used!"
# Hyperparameter-tuning
elif do_hypTune == "Yes":
st.warning("WARNING: Hyperparameter-tuning can take a lot of time! For tips, please [contact us](mailto:<EMAIL>?subject=Staty-App).")
# Further general settings
hypTune_method = st.selectbox("Hyperparameter-search method", ["random grid-search", "grid-search", "Bayes optimization", "sequential model-based optimization"])
col1, col2 = st.beta_columns(2)
with col1:
hypTune_nCV = st.slider("Select number for n-fold cross-validation", 2, 10, 5)
if hypTune_method == "random grid-search" or hypTune_method == "Bayes optimization" or hypTune_method == "sequential model-based optimization":
with col2:
hypTune_iter = st.slider("Select number of iterations for search", 20, 1000, 20)
else:
hypTune_iter = False
st.markdown("**Model-specific tuning settings**")
# Random Forest settings
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_tunePara["number of trees"] = [50, 500]
rf_tunePara["maximum tree depth"] = [None, None]
rf_tunePara["maximum number of features"] = [1, len(expl_var)]
rf_tunePara["sample rate"] = [0.8, 0.99]
hyPara_values["rf"] = rf_tunePara
if st.checkbox("Adjust tuning settings for Random Forest"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
rf_tunePara["number of trees"] = st.slider("Range for number of trees ", 50, 1000, [50, 500])
with col3:
rf_mtd_choice = st.selectbox("Specify maximum tree depth", ["No", "Yes"])
if rf_mtd_choice == "Yes":
rf_tunePara["maximum tree depth"] = st.slider("Range for maximum tree depth ", 1, 50, [2, 10])
else:
rf_tunePara["maximum tree depth"] = [None, None]
with col4:
if len(expl_var) > 1:
rf_tunePara["maximum number of features"] = st.slider("Range for maximum number of features", 1, len(expl_var), [1, len(expl_var)])
else:
rf_tunePara["maximum number of features"] = [1,1]
with col2:
rf_tunePara["sample rate"] = st.slider("Range for sample rate ", 0.5, 0.99, [0.8, 0.99])
hyPara_values["rf"] = rf_tunePara
# Boosted Regression Trees settings
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["number of trees", "learning rate", "maximum tree depth", "sample rate"])
brt_tunePara["number of trees"] = [50, 500]
brt_tunePara["learning rate"] = [0.001, 0.010]
brt_tunePara["maximum tree depth"] = [2, 10]
brt_tunePara["sample rate"] = [0.8, 1.0]
hyPara_values["brt"] = brt_tunePara
if st.checkbox("Adjust tuning settings for Boosted Regression Trees"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
brt_tunePara["number of trees"] = st.slider("Range for number of trees", 50, 1000, [50, 500])
with col2:
brt_tunePara["learning rate"] = st.slider("Range for learning rate", 0.001, 0.1, [0.001, 0.02], step=1e-3, format="%.3f")
with col3:
brt_tunePara["maximum tree depth"] = st.slider("Range for maximum tree depth", 1, 30, [2, 10])
with col4:
brt_tunePara["sample rate"] = st.slider("Range for sample rate", 0.5, 1.0, [0.8, 1.0])
hyPara_values["brt"] = brt_tunePara
# Artificial Neural Networks settings
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "number of hidden layers", "nodes per hidden layer", "learning rate","L² regularization"])# "learning rate schedule", "momentum", "epsilon"])
ann_tunePara["weight optimization solver"] = list([["adam"], "NA"])
ann_tunePara["maximum number of iterations"] = [100, 200]
ann_tunePara["activation function"] = list([["relu"], "NA"])
ann_tunePara["number of hidden layers"] = list([1, "NA"])
ann_tunePara["nodes per hidden layer"] = [50, 100]
ann_tunePara["learning rate"] = [0.0001, 0.002]
ann_tunePara["L² regularization"] = [0.00001, 0.0002]
hyPara_values["ann"] = ann_tunePara
if st.checkbox("Adjust tuning settings for Artificial Neural Networks"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
col5, col6 = st.beta_columns(2)
with col1:
weight_opt_list = st.selectbox("Weight optimization solver ", ["adam"])
if len(weight_opt_list) == 0:
weight_opt_list = ["adam"]
st.warning("WARNING: Default value used 'adam'")
ann_tunePara["weight optimization solver"] = list([[weight_opt_list], "NA"])
with col2:
ann_tunePara["maximum number of iterations"] = st.slider("Maximum number of iterations (epochs) ", 10, 1000, [100, 200])
with col3:
act_func_list = st.multiselect("Activation function ", ["identity", "logistic", "tanh", "relu"], ["relu"])
if len(act_func_list) == 0:
act_func_list = ["relu"]
st.warning("WARNING: Default value used 'relu'")
ann_tunePara["activation function"] = list([act_func_list, "NA"])
with col5:
number_hidden_layers = st.selectbox("Number of hidden layers ", [1, 2, 3])
ann_tunePara["number of hidden layers"] = list([number_hidden_layers, "NA"])
# Cases for hidden layers
if number_hidden_layers == 1:
ann_tunePara["nodes per hidden layer"] = st.slider("Number of nodes in hidden layer ", 5, 500, [50, 100])
if number_hidden_layers == 2:
number_nodes1 = st.slider("Number of neurons in first hidden layer ", 5, 500, [50, 100])
number_nodes2 = st.slider("Number of neurons in second hidden layer ", 5, 500, [50, 100])
min_nodes = list([number_nodes1[0], number_nodes2[0]])
max_nodes = list([number_nodes1[1], number_nodes2[1]])
ann_tunePara["nodes per hidden layer"] = list([min_nodes, max_nodes])
if number_hidden_layers == 3:
number_nodes1 = st.slider("Number of neurons in first hidden layer ", 5, 500, [50, 100])
number_nodes2 = st.slider("Number of neurons in second hidden layer ", 5, 500, [50, 100])
number_nodes3 = st.slider("Number of neurons in third hidden layer ", 5, 500, [50, 100])
min_nodes = list([number_nodes1[0], number_nodes2[0], number_nodes3[0]])
max_nodes = list([number_nodes1[1], number_nodes2[1], number_nodes3[1]])
ann_tunePara["nodes per hidden layer"] = list([min_nodes, max_nodes])
with col6:
if weight_opt_list == "adam":
ann_tunePara["learning rate"] = st.slider("Range for learning rate ", 0.0001, 0.01, [0.0001, 0.002], step=1e-4, format="%.4f")
with col4:
ann_tunePara["L² regularization"] = st.slider("L² regularization parameter ", 0.0, 0.001, [0.00001, 0.0002], step=1e-5, format="%.5f")
hyPara_values["ann"] = ann_tunePara
#--------------------------------------------------------------------------------------
# VALIDATION SETTINGS
st.markdown("**Validation settings**")
do_modval= st.selectbox("Use model validation", ["No", "Yes"])
if do_modval == "Yes":
col1, col2 = st.beta_columns(2)
# Select training/ test ratio
with col1:
train_frac = st.slider("Select training data size", 0.5, 0.95, 0.8)
# Select number for validation runs
with col2:
val_runs = st.slider("Select number for validation runs", 5, 100, 10)
#--------------------------------------------------------------------------------------
# PREDICTION SETTINGS
st.markdown("**Model predictions**")
do_modprednew = st.selectbox("Use model prediction for new data", ["No", "Yes"])
if do_modprednew == "Yes":
# Upload new data
new_data_pred = st.file_uploader(" ", type=["csv", "txt"])
if new_data_pred is not None:
# Read data
if uploaded_data is not None:
df_new = pd.read_csv(new_data_pred, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
else:
df_new = pd.read_csv(new_data_pred, sep = ";|,|\t",engine='python')
st.success('Loading data... done!')
# Transform columns if any were transformed
# Log-transformation
if sb_DM_dTrans_log is not None:
# List of log-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_log:
if "log_"+tv in expl_var:
tv_list.append(tv)
# Check if log-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for log-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_log(df_new, tv_list)
# Sqrt-transformation
if sb_DM_dTrans_sqrt is not None:
# List of sqrt-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_sqrt:
if "sqrt_"+tv in expl_var:
tv_list.append(tv)
# Check if sqrt-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for sqrt-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_sqrt(df_new, tv_list)
# Square-transformation
if sb_DM_dTrans_square is not None:
# List of square-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_square:
if "square_"+tv in expl_var:
tv_list.append(tv)
# Check if square-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for square-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_square(df_new, tv_list)
# Standardization
if sb_DM_dTrans_stand is not None:
# List of standardized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_stand:
if "stand_"+tv in expl_var:
tv_list.append(tv)
# Check if standardized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for standardization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use mean and standard deviation of original data for standardization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if df[tv].std() != 0:
new_var_name = "stand_" + tv
new_var = (df_new[tv] - df[tv].mean())/df[tv].std()
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be standardized!")
return
# Normalization
if sb_DM_dTrans_norm is not None:
# List of normalized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_norm:
if "norm_"+tv in expl_var:
tv_list.append(tv)
# Check if normalized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for normalization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use min and max of original data for normalization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if (df[tv].max()-df[tv].min()) != 0:
new_var_name = "norm_" + tv
new_var = (df_new[tv] - df[tv].min())/(df[tv].max()-df[tv].min())
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be normalized!")
return
# Categorization
if sb_DM_dTrans_numCat is not None:
# List of categorized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_numCat:
if "numCat_"+tv in expl_var:
tv_list.append(tv)
# Check if categorized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for categorization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use same categories as for original data
for tv in tv_list:
new_var_name = "numCat_" + tv
new_var = pd.DataFrame(index = df_new.index, columns = [new_var_name])
for r in df_new.index:
if df.loc[df[tv] == df_new[tv][r]].empty == False:
new_var.loc[r, new_var_name] = df["numCat_" + tv][df.loc[df[tv] == df_new[tv][r]].index[0]]
else:
st.error("ERROR: Category is missing for the value in row: "+ str(r) + ", variable: " + str(tv))
return
df_new[new_var_name] = new_var.astype('int64')
# Multiplication
if sb_DM_dTrans_mult != 0:
# List of multiplied variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_mult):
mult_name = "mult_" + str(multiplication_pairs.loc[tv]["Var1"]) + "_" + str(multiplication_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(multiplication_pairs.loc[tv]["Var1"]))
tv_list.append(str(multiplication_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for multiplication in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_mult):
df_new = fc.var_transform_mult(df_new, multiplication_pairs.loc[var]["Var1"], multiplication_pairs.loc[var]["Var2"])
# Division
if sb_DM_dTrans_div != 0:
# List of divided variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_div):
mult_name = "div_" + str(division_pairs.loc[tv]["Var1"]) + "_" + str(division_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(division_pairs.loc[tv]["Var1"]))
tv_list.append(str(division_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for division in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_div):
df_new = fc.var_transform_div(df_new, division_pairs.loc[var]["Var1"], division_pairs.loc[var]["Var2"])
# Check if explanatory variables are available as columns
expl_list = []
for expl_incl in expl_var:
if expl_incl not in df_new.columns:
expl_list.append(expl_incl)
if expl_list:
st.error("ERROR: Some variables are missing in new data: "+ ', '.join(expl_list))
return
else:
st.info("All variables are available for predictions!")
# Check if NAs are present and delete them automatically
if df_new.iloc[list(pd.unique(np.where(df_new.isnull())[0]))].shape[0] == 0:
st.empty()
else:
df_new = df_new[expl_var].dropna()
st.warning("WARNING: Your new data set includes NAs. Rows with NAs are automatically deleted!")
df_new = df_new[expl_var]
# Modelling data set
df = df[var_list]
# Check if NAs are present and delete them automatically
if np.where(df[var_list].isnull())[0].size > 0:
df = df.dropna()
#--------------------------------------------------------------------------------------
# SETTINGS SUMMARY
st.write("")
# Show modelling data
if st.checkbox("Show modelling data"):
st.write(df)
st.write("Data shape: ", df.shape[0], " rows and ", df.shape[1], " columns")
# Download link for modelling data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="modelling_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "Modelling data__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download modelling data</a>
""",
unsafe_allow_html=True)
st.write("")
# Show prediction data
if do_modprednew == "Yes":
if new_data_pred is not None:
if st.checkbox("Show new data for predictions"):
st.write(df_new)
st.write("Data shape: ", df_new.shape[0], " rows and ", df_new.shape[1], " columns")
# Download link for forecast data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_new.to_excel(excel_file, sheet_name="new_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "New data for predictions__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download new data for predictions</a>
""",
unsafe_allow_html=True)
st.write("")
# Show machine learning summary
if st.checkbox('Show a summary of machine learning settings', value = False):
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.write("Algorithms summary:")
st.write("- Models:", ', '.join(sb_ML_alg))
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
# st.write("- Multiple Linear Regression model: ", MLR_model)
st.write("- Multiple Linear Regression including intercept: ", MLR_intercept)
st.write("- Multiple Linear Regression covariance type: ", MLR_cov_type)
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
st.write("- Logistic Regression including intercept: ", LR_intercept)
st.write("- Logistic Regression covariance type: ", LR_cov_type)
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.write("- Generalized Additive Models parameters: ")
st.write(gam_finalPara)
if any(a for a in sb_ML_alg if a == "Random Forest") and do_hypTune == "No":
st.write("- Random Forest parameters: ")
st.write(rf_finalPara)
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees") and do_hypTune == "No":
st.write("- Boosted Regression Trees parameters: ")
st.write(brt_finalPara)
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks") and do_hypTune == "No":
st.write("- Artificial Neural Networks parameters: ")
st.write(ann_finalPara)
st.write("")
#--------------------------------------------------------------------------------------
# SETTINGS
# Hyperparameter settings summary
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks" or a == "Boosted Regression Trees" or a == "Random Forest"):
st.write("Hyperparameter-tuning settings summary:")
if do_hypTune == "No":
st.write("- ", do_hypTune_no)
st.write("")
if do_hypTune == "Yes":
st.write("- Search method:", hypTune_method)
st.write("- ", hypTune_nCV, "-fold cross-validation")
if hypTune_method == "random grid-search" or hypTune_method == "Bayes optimization" or hypTune_method == "sequential model-based optimization":
st.write("- ", hypTune_iter, "iterations in search")
st.write("")
# Random Forest summary
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.write("Random Forest tuning settings summary:")
st.write(rf_tunePara)
# Boosted Regression Trees summary
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.write("Boosted Regression Trees tuning settings summary:")
st.write(brt_tunePara)
# Artificial Neural Networks summary
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.write("Artificial Neural Networks tuning settings summary:")
st.write(ann_tunePara.style.format({"L² regularization": "{:.5}"}))
#st.caption("** Learning rate is only used in adam")
st.write("")
# General settings summary
st.write("General settings summary:")
st.write("- Response variable type: ", response_var_type)
# Modelling formula
if expl_var != False:
st.write("- Modelling formula:", response_var, "~", ' + '.join(expl_var))
if do_modval == "Yes":
# Train/ test ratio
if train_frac != False:
st.write("- Train/ test ratio:", str(round(train_frac*100)), "% / ", str(round(100-train_frac*100)), "%")
# Validation runs
if val_runs != False:
st.write("- Validation runs:", str(val_runs))
st.write("")
#--------------------------------------------------------------------------------------
# RUN MODELS
# Models are run on button click
st.write("")
run_models = st.button("Run models")
st.write("")
if run_models:
# Check if new data available
if do_modprednew == "Yes":
if new_data_pred is None:
st.error("ERROR: Please upload new data for additional model predictions or select 'No'!")
return
#Hyperparameter
if do_hypTune == "Yes":
# Tuning
model_tuning_results = ml.model_tuning(df, sb_ML_alg, hypTune_method, hypTune_iter, hypTune_nCV, hyPara_values, response_var_type, response_var, expl_var)
# Save final hyperparameters
# Random Forest
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_tuning_results = model_tuning_results["rf tuning"]
rf_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_finalPara["number of trees"] = [rf_tuning_results.loc["value"]["number of trees"]]
if [rf_tuning_results.loc["value"]["maximum tree depth"]][0] == "None":
rf_finalPara["maximum tree depth"] = None
else:
rf_finalPara["maximum tree depth"] = [rf_tuning_results.loc["value"]["maximum tree depth"]]
rf_finalPara["maximum number of features"] = [rf_tuning_results.loc["value"]["maximum number of features"]]
rf_finalPara["sample rate"] = [rf_tuning_results.loc["value"]["sample rate"]]
final_hyPara_values["rf"] = rf_finalPara
# Boosted Regression Trees
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_tuning_results = model_tuning_results["brt tuning"]
brt_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "learning rate", "maximum tree depth", "sample rate"])
brt_finalPara["number of trees"] = [brt_tuning_results.loc["value"]["number of trees"]]
brt_finalPara["learning rate"] = [brt_tuning_results.loc["value"]["learning rate"]]
brt_finalPara["maximum tree depth"] = [brt_tuning_results.loc["value"]["maximum tree depth"]]
brt_finalPara["sample rate"] = [brt_tuning_results.loc["value"]["sample rate"]]
final_hyPara_values["brt"] = brt_finalPara
# Artificial Neural Networks
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_tuning_results = model_tuning_results["ann tuning"]
ann_finalPara = pd.DataFrame(index = ["value"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "hidden layer sizes", "learning rate", "L² regularization"]) #"learning rate schedule", "momentum", "epsilon"])
ann_finalPara["weight optimization solver"] = [ann_tuning_results.loc["value"]["weight optimization solver"]]
ann_finalPara["maximum number of iterations"] = [ann_tuning_results.loc["value"]["maximum number of iterations"]]
ann_finalPara["activation function"] = [ann_tuning_results.loc["value"]["activation function"]]
ann_finalPara["hidden layer sizes"] = [ann_tuning_results.loc["value"]["hidden layer sizes"]]
ann_finalPara["learning rate"] = [ann_tuning_results.loc["value"]["learning rate"]]
#ann_finalPara["learning rate schedule"] = [ann_tuning_results.loc["value"]["learning rate schedule"]]
#ann_finalPara["momentum"] = [ann_tuning_results.loc["value"]["momentum"]]
ann_finalPara["L² regularization"] = [ann_tuning_results.loc["value"]["L² regularization"]]
#ann_finalPara["epsilon"] = [ann_tuning_results.loc["value"]["epsilon"]]
final_hyPara_values["ann"] = ann_finalPara
# Lambda search for GAM
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
if gam_lam_search == "Yes":
st.info("Lambda search")
my_bar = st.progress(0.0)
progress = 0
Y_data_gam = df[response_var]
X_data_gam = df[expl_var]
nos = gam_finalPara["number of splines"][0]
so = gam_finalPara["spline order"][0]
lams = gam_lam_values
if response_var_type == "continuous":
if gam_finalPara["intercept"][0] == "Yes":
gam_grid = LinearGAM(n_splines = nos, spline_order = so, fit_intercept = True).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
if gam_finalPara["intercept"][0] == "No":
gam_grid = LinearGAM(n_splines = nos, spline_order = so, fit_intercept = False).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
if response_var_type == "binary":
if gam_finalPara["intercept"][0] == "Yes":
gam_grid = LogisticGAM(n_splines = nos, spline_order = so, fit_intercept = True).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
if gam_finalPara["intercept"][0] == "No":
gam_grid = LogisticGAM(n_splines = nos, spline_order = so, fit_intercept = False).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
progress += 1
my_bar.progress(progress/1)
# Model validation
if do_modval == "Yes":
model_val_results = ml.model_val(df, sb_ML_alg, MLR_model, train_frac, val_runs, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara, MLR_finalPara, LR_finalPara)
# Full model (depending on prediction for new data)
if do_modprednew == "Yes":
if new_data_pred is not None:
model_full_results = ml.model_full(df, df_new, sb_ML_alg, MLR_model, MLR_finalPara, LR_finalPara, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara)
if do_modprednew == "No":
df_new = pd.DataFrame()
model_full_results = ml.model_full(df, df_new, sb_ML_alg, MLR_model, MLR_finalPara, LR_finalPara, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara)
# Success message
st.success('Models run successfully!')
else: st.error("ERROR: No data available for Modelling!")
#++++++++++++++++++++++
# ML OUTPUT
# Show only if models were run (no further widgets after run models or the full page reloads)
if run_models == True:
st.write("")
st.write("")
st.header("**Model outputs**")
#--------------------------------------------------------------------------------------
# FULL MODEL OUTPUT
full_output = st.beta_expander("Full model output", expanded = False)
with full_output:
if model_full_results is not None:
st.markdown("**Correlation Matrix & 2D-Histogram**")
# Define variable selector
var_sel_cor = alt.selection_single(fields=['variable', 'variable2'], clear=False,
init={'variable': response_var, 'variable2': response_var})
# Calculate correlation data
corr_data = df[[response_var] + expl_var].corr().stack().reset_index().rename(columns={0: "correlation", 'level_0': "variable", 'level_1': "variable2"})
corr_data["correlation_label"] = corr_data["correlation"].map('{:.2f}'.format)
# Basic plot
base = alt.Chart(corr_data).encode(
x = alt.X('variable2:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12)),
y = alt.Y('variable:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12))
)
# Correlation values to insert
text = base.mark_text().encode(
text='correlation_label',
color = alt.condition(
alt.datum.correlation > 0.5,
alt.value('white'),
alt.value('black')
)
)
# Correlation plot
corr_plot = base.mark_rect().encode(
color = alt.condition(var_sel_cor, alt.value('#86c29c'), 'correlation:Q', legend = alt.Legend(title = "Bravais-Pearson correlation coefficient", orient = "top", gradientLength = 350), scale = alt.Scale(scheme='redblue', reverse = True, domain = [-1,1]))
).add_selection(var_sel_cor)
# Calculate values for 2d histogram
value_columns = df[[response_var] + expl_var]
df_2dbinned = pd.concat([fc.compute_2d_histogram(var1, var2, df) for var1 in value_columns for var2 in value_columns])
# 2d binned histogram plot
scat_plot = alt.Chart(df_2dbinned).transform_filter(
var_sel_cor
).mark_rect().encode(
alt.X('value2:N', sort = alt.EncodingSortField(field='raw_left_value2'), axis = alt.Axis(title = "Horizontal variable", labelFontSize = 12)),
alt.Y('value:N', axis = alt.Axis(title = "Vertical variable", labelFontSize = 12), sort = alt.EncodingSortField(field='raw_left_value', order = 'descending')),
alt.Color('count:Q', scale = alt.Scale(scheme='reds'), legend = alt.Legend(title = "Count", orient = "top", gradientLength = 350))
)
# Combine all plots
correlation_plot = alt.vconcat((corr_plot + text).properties(width = 400, height = 400), scat_plot.properties(width = 400, height = 400)).resolve_scale(color = 'independent')
corr_plot1 = (corr_plot + text).properties(width = 400, height = 400)
correlation_plot = correlation_plot.properties(padding = {"left": 50, "top": 5, "right": 5, "bottom": 50})
# hist_2d_plot = scat_plot.properties(height = 350)
if response_var_type == "continuous":
st.altair_chart(correlation_plot, use_container_width = True)
if response_var_type == "binary":
st.altair_chart(correlation_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_cor")))
st.write("")
#-------------------------------------------------------------
# Continuous response variable
if response_var_type == "continuous":
# MLR specific output
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
st.markdown("**Multiple Linear Regression**")
# Regression information
fm_mlr_reg_col1, fm_mlr_reg_col2 = st.beta_columns(2)
with fm_mlr_reg_col1:
st.write("Regression information:")
st.table(model_full_results["MLR information"].style.set_precision(user_precision))
# Regression statistics
with fm_mlr_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["MLR statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_regStat")))
st.write("")
# Coefficients
st.write("Coefficients:")
st.table(model_full_results["MLR coefficients"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_coef")))
st.write("")
# ANOVA
st.write("ANOVA:")
st.table(model_full_results["MLR ANOVA"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_ANOVA")))
st.write("")
# Heteroskedasticity tests
if MLR_intercept == "Yes":
st.write("Heteroskedasticity tests:")
st.table(model_full_results["MLR hetTest"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_hetTest")))
st.write("")
# Variable importance (via permutation)
fm_mlr_reg2_col1, fm_mlr_reg2_col2 = st.beta_columns(2)
with fm_mlr_reg2_col1:
st.write("Variable importance (via permutation):")
mlr_varImp_table = model_full_results["MLR variable importance"]
st.table(mlr_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_mlr_reg2_col2:
st.write("")
st.write("")
st.write("")
mlr_varImp_plot_data = model_full_results["MLR variable importance"]
mlr_varImp_plot_data["Variable"] = mlr_varImp_plot_data.index
mlr_varImp = alt.Chart(mlr_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(mlr_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_varImp")))
st.write("")
# Graphical output
fm_mlr_figs_col1, fm_mlr_figs_col2 = st.beta_columns(2)
with fm_mlr_figs_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["MLR fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_mlr_figs_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_fitted_data["Fitted"] = model_full_results["MLR fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_obsResVsFit")))
st.write("")
fm_mlr_figs1_col1, fm_mlr_figs1_col2 = st.beta_columns(2)
with fm_mlr_figs1_col1:
st.write("Normal QQ-plot:")
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_qqplot")))
with fm_mlr_figs1_col2:
st.write("Scale-Location:")
scale_location_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
scale_location_data["SqrtStandResiduals"] = np.sqrt(abs((residuals - residuals.mean())/residuals.std()))
scale_location_data["Fitted"] = model_full_results["MLR fitted"]
scale_location_data["Index"] = df.index
scale_location = alt.Chart(scale_location_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(scale_location_data["Fitted"]), max(scale_location_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("SqrtStandResiduals", title = "sqrt(|stand. residuals|)", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["SqrtStandResiduals", "Fitted", "Index"]
)
scale_location_plot = scale_location + scale_location.transform_loess("Fitted", "SqrtStandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(scale_location_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_scaleLoc")))
st.write("")
fm_mlr_figs2_col1, fm_mlr_figs2_col2 = st.beta_columns(2)
with fm_mlr_figs2_col1:
st.write("Residuals vs Leverage:")
residuals_leverage_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_leverage_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
residuals_leverage_data["Leverage"] = model_full_results["MLR leverage"]
residuals_leverage_data["Index"] = df.index
residuals_leverage = alt.Chart(residuals_leverage_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Leverage", title = "leverage", scale = alt.Scale(domain = [min(residuals_leverage_data["Leverage"]), max(residuals_leverage_data["Leverage"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals","Leverage", "Index"]
)
residuals_leverage_plot = residuals_leverage + residuals_leverage.transform_loess("Leverage", "StandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_leverage_plot, use_container_width = True)
with fm_mlr_figs2_col2:
st.write("Cook's distance:")
cooksD_data = pd.DataFrame()
cooksD_data["CooksD"] = model_full_results["MLR Cooks distance"]
cooksD_data["Index"] = df.index
cooksD = alt.Chart(cooksD_data, height = 200).mark_bar(size = 2).encode(
x = alt.X("Index", title = "index", scale = alt.Scale(domain = [-1, max(cooksD_data["Index"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("CooksD", title = "Cook's distance", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["CooksD", "Index"]
)
st.altair_chart(cooksD, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_resVsLev_cooksD")))
# Download link for MLR output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["MLR information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["MLR statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["MLR coefficients"].to_excel(excel_file, sheet_name="coefficients")
model_full_results["MLR ANOVA"].to_excel(excel_file, sheet_name="ANOVA")
model_full_results["MLR hetTest"].to_excel(excel_file, sheet_name="heteroskedasticity_tests")
mlr_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "MLR full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Multiple Linear Regression full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# GAM specific output
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.markdown("**Generalized Additive Models**")
fm_gam_reg_col1, fm_gam_reg_col2 = st.beta_columns(2)
# Regression information
with fm_gam_reg_col1:
st.write("Regression information:")
st.table(model_full_results["GAM information"].style.set_precision(user_precision))
# Regression statistics
with fm_gam_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["GAM statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_regStat")))
st.write("")
# Feature significance
st.write("Feature significance:")
st.table(model_full_results["GAM feature significance"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_featSig")))
st.write("")
# Variable importance (via permutation)
fm_gam_figs1_col1, fm_gam_figs1_col2 = st.beta_columns(2)
with fm_gam_figs1_col1:
st.write("Variable importance (via permutation):")
gam_varImp_table = model_full_results["GAM variable importance"]
st.table(gam_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_gam_figs1_col2:
st.write("")
st.write("")
st.write("")
gam_varImp_plot_data = model_full_results["GAM variable importance"]
gam_varImp_plot_data["Variable"] = gam_varImp_plot_data.index
gam_varImp = alt.Chart(gam_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(gam_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_gam_figs3_col1, fm_gam_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_gam = pd.DataFrame(columns = [pd_var])
pd_data_gam[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam["Partial dependence"] = model_full_results["GAM partial dependence"][pd_var]["pd_values"]
pd_data_gam["Lower 95%"] = model_full_results["GAM partial dependence"][pd_var]["lower_95"]
pd_data_gam["Upper 95%"] = model_full_results["GAM partial dependence"][pd_var]["upper_95"]
pd_chart_gam = alt.Chart(pd_data_gam, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Upper 95%", "Partial dependence", "Lower 95%"] + [pd_var]
)
pd_data_ticks_gam = pd.DataFrame(columns = [pd_var])
pd_data_ticks_gam[pd_var] = df[pd_var]
pd_data_ticks_gam["y"] = [model_full_results["GAM partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_gam = alt.Chart(pd_data_ticks_gam, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_gam[pd_var].min(), pd_data_ticks_gam[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
pd_data_gam_lower = pd.DataFrame(columns = [pd_var])
pd_data_gam_lower[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam_lower["Lower 95%"] = model_full_results["GAM partial dependence"][pd_var]["lower_95"]
pd_chart_gam_lower = alt.Chart(pd_data_gam_lower, height = 200).mark_line(strokeDash=[1,1], color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Lower 95%", title = "", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Lower 95%"] + [pd_var]
)
pd_data_gam_upper = pd.DataFrame(columns = [pd_var])
pd_data_gam_upper[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam_upper["Upper 95%"] = model_full_results["GAM partial dependence"][pd_var]["upper_95"]
pd_chart_gam_upper = alt.Chart(pd_data_gam_upper, height = 200).mark_line(strokeDash=[1,1], color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Upper 95%", title = "", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Upper 95%"] + [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_gam_figs3_col1:
st.altair_chart(pd_ticks_gam + pd_chart_gam_lower + pd_chart_gam_upper + pd_chart_gam, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_gam_figs3_col2:
st.altair_chart(pd_ticks_gam + pd_chart_gam_lower + pd_chart_gam_upper + pd_chart_gam, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_partDep")))
st.write("")
# Further graphical output
fm_gam_figs4_col1, fm_gam_figs4_col2 = st.beta_columns(2)
with fm_gam_figs4_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["GAM fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_gam_figs4_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Generalized Additive Models"]
residuals_fitted_data["Fitted"] = model_full_results["GAM fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_obsResVsFit")))
# Download link for GAM output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["GAM information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["GAM statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["GAM feature significance"].to_excel(excel_file, sheet_name="feature_significance")
gam_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "GAM full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Generalized Additive Models full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# RF specific output
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
fm_rf_reg_col1, fm_rf_reg_col2 = st.beta_columns(2)
# Regression information
with fm_rf_reg_col1:
st.write("Regression information:")
st.table(model_full_results["RF information"].style.set_precision(user_precision))
# Regression statistics
with fm_rf_reg_col2:
st.write("Regression statistics:")
rf_error_est = pd.DataFrame(index = ["MSE", "RMSE", "MAE", "Residual SE"], columns = ["Value"])
rf_error_est.loc["MSE"] = model_full_results["model comparison"].loc["MSE"]["Random Forest"]
rf_error_est.loc["RMSE"] = model_full_results["model comparison"].loc["RMSE"]["Random Forest"]
rf_error_est.loc["MAE"] = model_full_results["model comparison"].loc["MAE"]["Random Forest"]
rf_error_est.loc["Residual SE"] = model_full_results["RF Residual SE"]
st.table(rf_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_regStat")))
st.write("")
# Variable importance (via permutation)
fm_rf_figs1_col1, fm_rf_figs1_col2 = st.beta_columns(2)
with fm_rf_figs1_col1:
st.write("Variable importance (via permutation):")
rf_varImp_table = model_full_results["RF variable importance"]
st.table(rf_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_rf_figs1_col2:
st.write("")
st.write("")
st.write("")
rf_varImp_plot_data = model_full_results["RF variable importance"]
rf_varImp_plot_data["Variable"] = rf_varImp_plot_data.index
rf_varImp = alt.Chart(rf_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(rf_varImp, use_container_width = True)
st.write("")
fm_rf_figs2_col1, fm_rf_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_rf_figs2_col1:
st.write("Feature importance (impurity-based):")
rf_featImp_table = model_full_results["RF feature importance"]
st.table(rf_featImp_table.style.set_precision(user_precision))
st.write("")
with fm_rf_figs2_col2:
st.write("")
st.write("")
st.write("")
rf_featImp_plot_data = model_full_results["RF feature importance"]
rf_featImp_plot_data["Variable"] = rf_featImp_plot_data.index
rf_featImp = alt.Chart(rf_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(rf_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_rf_figs3_col1, fm_rf_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_rf = pd.DataFrame(columns = [pd_var])
pd_data_rf[pd_var] = model_full_results["RF partial dependence"][pd_var][1][0]
pd_data_rf["Partial dependence"] = model_full_results["RF partial dependence"][pd_var][0][0]
pd_chart_rf = alt.Chart(pd_data_rf, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_rf = pd.DataFrame(columns = [pd_var])
pd_data_ticks_rf[pd_var] = df[pd_var]
pd_data_ticks_rf["y"] = [model_full_results["RF partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_rf = alt.Chart(pd_data_ticks_rf, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_rf[pd_var].min(), pd_data_ticks_rf[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_rf_figs3_col1:
st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_rf_figs3_col2:
st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_partDep")))
st.write("")
# Further graphical output
fm_rf_figs4_col1, fm_rf_figs4_col2 = st.beta_columns(2)
with fm_rf_figs4_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["RF fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_rf_figs4_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Random Forest"]
residuals_fitted_data["Fitted"] = model_full_results["RF fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_obsResVsFit")))
# Download link for RF output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["RF information"].to_excel(excel_file, sheet_name="regression_information")
rf_error_est.to_excel(excel_file, sheet_name="regression_statistics")
rf_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
rf_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "RF full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Random Forest full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# BRT specific output
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
fm_brt_reg_col1, fm_brt_reg_col2 = st.beta_columns(2)
# Regression information
with fm_brt_reg_col1:
st.write("Regression information:")
st.table(model_full_results["BRT information"].style.set_precision(user_precision))
# Regression statistics
with fm_brt_reg_col2:
st.write("Regression statistics:")
brt_error_est = pd.DataFrame(index = ["MSE", "RMSE", "MAE", "Residual SE"], columns = ["Value"])
brt_error_est.loc["MSE"] = model_full_results["model comparison"].loc["MSE"]["Boosted Regression Trees"]
brt_error_est.loc["RMSE"] = model_full_results["model comparison"].loc["RMSE"]["Boosted Regression Trees"]
brt_error_est.loc["MAE"] = model_full_results["model comparison"].loc["MAE"]["Boosted Regression Trees"]
brt_error_est.loc["Residual SE"] = model_full_results["BRT Residual SE"]
st.table(brt_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_regStat")))
st.write("")
# Training score (MSE vs. number of trees)
st.write("Training score:")
train_score = pd.DataFrame(index = range(model_full_results["BRT train score"].shape[0]), columns = ["Training MSE"])
train_score["Training MSE"] = model_full_results["BRT train score"]
train_score["Trees"] = train_score.index+1
train_score_plot = alt.Chart(train_score, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Trees", title = "trees", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [train_score["Trees"].min(), train_score["Trees"].max()])),
y = alt.Y("Training MSE", title = "training MSE", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Training MSE", "Trees"]
)
st.altair_chart(train_score_plot, use_container_width = True)
st.write("")
# Variable importance (via permutation)
fm_brt_figs1_col1, fm_brt_figs1_col2 = st.beta_columns(2)
with fm_brt_figs1_col1:
st.write("Variable importance (via permutation):")
brt_varImp_table = model_full_results["BRT variable importance"]
st.table(brt_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_brt_figs1_col2:
st.write("")
st.write("")
st.write("")
brt_varImp_plot_data = model_full_results["BRT variable importance"]
brt_varImp_plot_data["Variable"] = brt_varImp_plot_data.index
brt_varImp = alt.Chart(brt_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(brt_varImp, use_container_width = True)
st.write("")
fm_brt_figs2_col1, fm_brt_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_brt_figs2_col1:
st.write("Feature importance (impurity-based):")
brt_featImp_table = model_full_results["BRT feature importance"]
st.table(brt_featImp_table.style.set_precision(user_precision))
st.write("")
with fm_brt_figs2_col2:
st.write("")
st.write("")
st.write("")
brt_featImp_plot_data = model_full_results["BRT feature importance"]
brt_featImp_plot_data["Variable"] = brt_featImp_plot_data.index
brt_featImp = alt.Chart(brt_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(brt_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_brt_figs3_col1, fm_brt_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_brt = pd.DataFrame(columns = [pd_var])
pd_data_brt[pd_var] = model_full_results["BRT partial dependence"][pd_var][1][0]
pd_data_brt["Partial dependence"] = model_full_results["BRT partial dependence"][pd_var][0][0]
pd_chart_brt = alt.Chart(pd_data_brt, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["BRT partial dependence min/max"]["min"].min(), model_full_results["BRT partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_brt = pd.DataFrame(columns = [pd_var])
pd_data_ticks_brt[pd_var] = df[pd_var]
pd_data_ticks_brt["y"] = [model_full_results["BRT partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_brt = alt.Chart(pd_data_ticks_brt, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_brt[pd_var].min(), pd_data_ticks_brt[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["BRT partial dependence min/max"]["min"].min(), model_full_results["BRT partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_brt_figs3_col1:
st.altair_chart(pd_ticks_brt + pd_chart_brt, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_brt_figs3_col2:
st.altair_chart(pd_ticks_brt + pd_chart_brt, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_partDep")))
st.write("")
# Further graphical output
fm_brt_figs4_col1, fm_brt_figs4_col2 = st.beta_columns(2)
with fm_brt_figs4_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["BRT fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_brt_figs4_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Boosted Regression Trees"]
residuals_fitted_data["Fitted"] = model_full_results["BRT fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_obsResVsFit")))
# Download link for BRT output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["BRT information"].to_excel(excel_file, sheet_name="regression_information")
brt_error_est.to_excel(excel_file, sheet_name="regression_statistics")
brt_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
brt_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "BRT full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Boosted Regression Trees full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# ANN specific output
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
fm_ann_reg_col1, fm_ann_reg_col2 = st.beta_columns(2)
# Regression information
with fm_ann_reg_col1:
st.write("Regression information:")
st.table(model_full_results["ANN information"].style.set_precision(user_precision))
# Regression statistics
with fm_ann_reg_col2:
st.write("Regression statistics:")
ann_error_est = pd.DataFrame(index = ["MSE", "RMSE", "MAE", "Residual SE", "Best loss"], columns = ["Value"])
ann_error_est.loc["MSE"] = model_full_results["model comparison"].loc["MSE"]["Artificial Neural Networks"]
ann_error_est.loc["RMSE"] = model_full_results["model comparison"].loc["RMSE"]["Artificial Neural Networks"]
ann_error_est.loc["MAE"] = model_full_results["model comparison"].loc["MAE"]["Artificial Neural Networks"]
ann_error_est.loc["Residual SE"] = model_full_results["ANN Residual SE"]
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
ann_error_est.loc["Best loss"] = model_full_results["ANN loss"]
st.table(ann_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_regStat")))
st.write("")
# Loss curve (loss vs. number of iterations (epochs))
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
st.write("Loss curve:")
loss_curve = pd.DataFrame(index = range(len(model_full_results["ANN loss curve"])), columns = ["Loss"])
loss_curve["Loss"] = model_full_results["ANN loss curve"]
loss_curve["Iterations"] = loss_curve.index+1
loss_curve_plot = alt.Chart(loss_curve, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Iterations", title = "iterations (epochs)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [loss_curve["Iterations"].min(), loss_curve["Iterations"].max()])),
y = alt.Y("Loss", title = "loss", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Loss", "Iterations"]
)
st.altair_chart(loss_curve_plot, use_container_width = True)
st.write("")
fm_ann_figs1_col1, fm_ann_figs1_col2 = st.beta_columns(2)
# Variable importance (via permutation)
with fm_ann_figs1_col1:
st.write("Variable importance (via permutation):")
ann_varImp_table = model_full_results["ANN variable importance"]
st.table(ann_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_ann_figs1_col2:
st.write("")
st.write("")
st.write("")
ann_varImp_plot_data = model_full_results["ANN variable importance"]
ann_varImp_plot_data["Variable"] = ann_varImp_plot_data.index
ann_varImp = alt.Chart(ann_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(ann_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_ann_figs2_col1, fm_ann_figs2_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_ann = pd.DataFrame(columns = [pd_var])
pd_data_ann[pd_var] = (model_full_results["ANN partial dependence"][pd_var][1][0]*(df[pd_var].std()))+df[pd_var].mean()
pd_data_ann["Partial dependence"] = model_full_results["ANN partial dependence"][pd_var][0][0]
pd_chart_ann = alt.Chart(pd_data_ann, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_ann = pd.DataFrame(columns = [pd_var])
pd_data_ticks_ann[pd_var] = df[pd_var]
pd_data_ticks_ann["y"] = [model_full_results["ANN partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_ann = alt.Chart(pd_data_ticks_ann, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_ann[pd_var].min(), pd_data_ticks_ann[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_ann_figs2_col1:
st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_ann_figs2_col2:
st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_partDep")))
st.write("")
# Further graphical output
fm_ann_figs3_col1, fm_ann_figs3_col2 = st.beta_columns(2)
with fm_ann_figs3_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["ANN fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_ann_figs3_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Artificial Neural Networks"]
residuals_fitted_data["Fitted"] = model_full_results["ANN fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_obsResVsFit")))
# Download link for ANN output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["ANN information"].to_excel(excel_file, sheet_name="regression_information")
ann_error_est.to_excel(excel_file, sheet_name="regression_statistics")
ann_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "ANN full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Artificial Neural Networks full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# Performance metrics across all models
st.markdown("**Model comparison**")
st.write("Performance metrics:")
model_comp_sort_enable = (model_full_results["model comparison"]).transpose()
st.write(model_comp_sort_enable.style.set_precision(user_precision))
if len(sb_ML_alg) > 1:
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modCompPerf")))
st.write("")
model_full_res = pd.DataFrame(index = ["min", "25%-Q", "median", "75%-Q", "max"], columns = sb_ML_alg)
for m in sb_ML_alg:
model_full_res.loc["min"][m] = model_full_results["residuals"][m].min()
model_full_res.loc["25%-Q"][m] = model_full_results["residuals"][m].quantile(q = 0.25)
model_full_res.loc["median"][m] = model_full_results["residuals"][m].quantile(q = 0.5)
model_full_res.loc["75%-Q"][m] = model_full_results["residuals"][m].quantile(q = 0.75)
model_full_res.loc["max"][m] = model_full_results["residuals"][m].max()
st.write("Residuals distribution:")
st.write((model_full_res).transpose().style.set_precision(user_precision))
if len(sb_ML_alg) > 1:
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modCompRes")))
st.write("")
# Download link for model comparison output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_comp_sort_enable.to_excel(excel_file, sheet_name="performance_metrics")
model_full_res.transpose().to_excel(excel_file, sheet_name="residuals_distribution")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Model comparison full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download model comparison output</a>
""",
unsafe_allow_html=True)
st.write("")
#-------------------------------------------------------------
# Binary response variable
if response_var_type == "binary":
# MLR specific output
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
st.markdown("**Multiple Linear Regression**")
# Regression information
fm_mlr_reg_col1, fm_mlr_reg_col2 = st.beta_columns(2)
with fm_mlr_reg_col1:
st.write("Regression information:")
st.table(model_full_results["MLR information"].style.set_precision(user_precision))
# Regression statistics
with fm_mlr_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["MLR statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_regStat")))
st.write("")
# Coefficients
st.write("Coefficients:")
st.table(model_full_results["MLR coefficients"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_coef")))
st.write("")
# ANOVA
st.write("ANOVA:")
st.table(model_full_results["MLR ANOVA"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_ANOVA")))
st.write("")
# Heteroskedasticity tests
if MLR_intercept == "Yes":
st.write("Heteroskedasticity tests:")
st.table(model_full_results["MLR hetTest"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_hetTest")))
st.write("")
# Variable importance (via permutation)
fm_mlr_reg2_col1, fm_mlr_reg2_col2 = st.beta_columns(2)
with fm_mlr_reg2_col1:
st.write("Variable importance (via permutation):")
mlr_varImp_table = model_full_results["MLR variable importance"]
st.table(mlr_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_mlr_reg2_col2:
st.write("")
st.write("")
st.write("")
mlr_varImp_plot_data = model_full_results["MLR variable importance"]
mlr_varImp_plot_data["Variable"] = mlr_varImp_plot_data.index
mlr_varImp = alt.Chart(mlr_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(mlr_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_varImp")))
st.write("")
# Graphical output
fm_mlr_figs_col1, fm_mlr_figs_col2 = st.beta_columns(2)
with fm_mlr_figs_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["MLR fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_mlr_figs_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = | pd.DataFrame() | pandas.DataFrame |
# Load libraries
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
import numpy as np
from sklearn import tree
from sklearn import preprocessing
from sklearn.tree import export_graphviz
import matplotlib.pyplot as plt
#reading Data
path = "house-votes-84.data"
data=pd.read_csv(path,names=['Class Name','handicapped-infants','water-project-cost-sharing'
,'adoption-of-the-budget-resolution','physician-fee-freeze',
'el-salvador-aid','religious-groups-in-schools','anti-satellite-test-ban',
'aid-to-nicaraguan-contras','mx-missile','immigration','synfuels-corporation-cutback'
,'education-spending','superfund-right-to-sue','crime','duty-free-exports','export-administration-act-south-africa'],
na_values=["?"])
colNames = list(data.columns)
#replace missing vlue with most frequent value
data.replace('?', np.NaN)
for i in colNames:
data[i] = data[i].fillna(data[i].mode()[0])
#Encoding Categorical data to numeric value
data2 = data
for i in colNames:
#creating label encoder
le = preprocessing.LabelEncoder()
#convert string into numbers
data2[i] = le.fit_transform(data2[i])
#fetarues
X =data2.iloc[ : ,1:]
#class
y =data2.iloc[ : ,0:1]
testSize = [0.6,0.5,0.4,0.3,0.2]
maxAccuracy = 0
accuracyList = []
bestCLF = None
iterationNumber = 0
testSizeIndex = 0
treeNodesCount=[]
for ieration in range(1,4):
List = []
nodes=[]
for i in range(5):
#split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testSize[i])
#create calssifier
clf = tree.DecisionTreeClassifier()
#fit the data
clf = clf.fit(X_train,y_train)
#predict test data
y_predict = clf.predict(X_test)
List.append(metrics.accuracy_score(y_test, y_predict))
nodes.append(clf.tree_.node_count)
if List[i] > maxAccuracy:
maxAccuracy = List[i]
bestCLF = clf
iterationNumber = ieration
testSizeIndex = testSize[i]
print("Accuracy with : ", testSize[i]," and run time number : ",ieration," = ",List[i])
print("Max Accuracy of ",ieration," st Time = ",max(List))
print("Min Accuracy of ",ieration," st Time = ",min(List))
print("Mean Accuracy of ",ieration,"'st Time = ",sum(List)/len(List))
print("Max Size of ",ieration,"'st Time = ",max(nodes))
print("Min Size of ",ieration,"'st Time = ",min(nodes))
print("Mean Size of ",ieration,"'st Time = ",int(sum(nodes)/len(nodes)))
accuracyList.append(List)
treeNodesCount.append(nodes)
print("---------------------------------------------------------")
print("The max accuracy of iteration:",iterationNumber," , testSize: ",testSizeIndex," = ",maxAccuracy)
#plot Nodes number against Accuracy of final tree iteration
SizesAndAccurcy = []
z = 0
for i in testSize:
List = []
List.append(str(i))
for j in range(3):
List.append(accuracyList[j][z])
z += 1
SizesAndAccurcy.append(List)
fig,ax=plt.subplots(figsize=(7,5))
ax.plot(treeNodesCount[iterationNumber-1],accuracyList[iterationNumber-1])
ax.set_xlabel('Nodes Number')
ax.set_ylabel('Accuracy')
ax.set_title('Nodes Number vs Accuracy')
plt.show()
#plot Accuracy against test size of each iteration
df = | pd.DataFrame(SizesAndAccurcy, columns=['Test Size', 'Itreation 1', 'Itreation 2', 'Itreation 3']) | pandas.DataFrame |
from datetime import timedelta
from math import ceil
import pandas as pd
from pyparsing import col
import scipy.sparse
from sklearn.decomposition import PCA
from sklearn.feature_extraction import DictVectorizer
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from .lib import Lib
from .vectorizer import Vectorizer
from wordcloud import WordCloud, STOPWORDS
from matplotlib import pyplot as plt
from sklearn.preprocessing import minmax_scale
from sklearn.compose import ColumnTransformer
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator as SG
from sklearn.datasets import load_iris, load_boston
from collections import Counter
from .chart import Chart
import numpy as np
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
class DataFrame:
"""
"""
__vectorizer = None
__generator = None
def __init__(self, data_link=None, columns_names_as_list=None, data_types_in_order=None, delimiter=',',
data_type='csv', has_header=True, line_index=None, skip_empty_line=False, sheet_name='Sheet1'):
if data_link is not None:
if data_type == 'csv':
if has_header is True:
self.__dataframe = pd.read_csv(data_link, encoding='utf-8', delimiter=delimiter,
low_memory=False, error_bad_lines=False, skip_blank_lines=False)
else:
self.__dataframe = pd.read_csv(data_link, encoding='utf-8', delimiter=delimiter,
low_memory=False, error_bad_lines=False, skip_blank_lines=False,
header=None)
elif data_type == 'json':
self.__dataframe = | pd.read_json(data_link, encoding='utf-8') | pandas.read_json |
# -*- coding: utf-8 -*-
"""
Created on Sun May 22 10:30:01 2016
SC process signups functions
@author: tkc
"""
#%%
import pandas as pd
import numpy as np
from datetime import datetime, date
import re, glob, math
from openpyxl import load_workbook # writing to Excel
from PIL import Image, ImageDraw, ImageFont
import tkinter as tk
import pkg.SC_config as cnf # _OUTPUT_DIR and _INPUT_DIR
#%%
def combinephrases(mylist):
''' Combine list of phrases using commas & and '''
if len(mylist)==1:
return str(mylist[0])
elif len(mylist)==2:
tempstr=str(mylist[0])+ ' and ' +str(mylist[1])
return tempstr
else:
rest=mylist[:-1]
rest=[str(i) for i in rest]
last=mylist[-1]
tempstr=', '.join(rest) +' and ' + str(last)
return tempstr#%%
def writetoxls(df, sheetname, xlsfile):
''' Generic write of given df to specified tab of given xls file '''
book=load_workbook(xlsfile)
writer=pd.ExcelWriter(xlsfile, engine='openpyxl', datetime_format='mm/dd/yy', date_format='mm/dd/yy')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
df.to_excel(writer,sheet_name=sheetname,index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
return
def loadtransfers(df, signups):
''' Load transferred players and add to signups (then run player ID);
transfers added as normal players but need fake billing entries
'''
df=df.rename(columns={'Fname':'First','Lname':'Last','Street':'Address','Parish':'Parish of Registration'})
df=df.rename(columns={'Phone':'Phone1','Birthdate':'DOB','Sex':'Gender','Open/Closed':'Ocstatus'})
# Replace Girl, Boy with m f
df.loc[:,'Gender']=df.Gender.replace('F','Girl')
df.loc[:,'Gender']=df.Gender.replace('M','Boy')
# Manually enter sport
print('Enter sport for transferred players')
sport=input()
df.loc[:,'Sport']=sport
df=df.dropna(subset=['First']) # remove blank rows if present
mycols=[col for col in df if col in signups]
df=df[mycols]
df=formatnamesnumbers(df)
# place date/transfer in timestamp
mystamp=datetime.strftime(datetime.now(),'%m/%d/%y')+' transfer'
df.loc[:,'Timestamp']=mystamp
mycols=signups.columns
signups=signups.append(df, ignore_index=True)
signups=signups[mycols]
return signups
def packagetransfers(teams, Mastersignups, famcontact, players, season, year, acronyms, messfile):
''' Package roster and contact info by sport- school and save as separate xls files
also generate customized e-mails in single log file (for cut and paste send to appropriate persons)
args:
teams - loaded team list
mastersignups - signups w/ team assignment
players -player DB
famcontact - family contact db
season - Fall, Winter or Spring
year - starting sports year (i.e. 2019 for 2019-20 school year)
acronyms - school/parish specific abbreviations
messfile - e-mail message template w/ blanks
returns:
'''
teams=teams[pd.notnull(teams['Team'])]
transferteams=np.ndarray.tolist(teams[teams['Team'].str.contains('#')].Team.unique())
transSU=Mastersignups[Mastersignups['Team'].isin(transferteams)]
# ensure that these are from correct season/year
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
transSU=transSU.loc[(transSU['Sport'].isin(sportlist)) & (transSU['Year']==year)] # season is not in mastersignups... only individual sports
# get family contact info from famcontacts
transSU=pd.merge(transSU, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
transSU=pd.merge(transSU, players, how='left', on=['Plakey'], suffixes=('','_r2'))
# get division from Teams xls (for roster)
transSU=pd.merge(transSU, teams, how='left', on=['Team'], suffixes=('','_r3')) # effectively adds other team info for roster toall players
transSU.loc[:,'Role']='Player' # add column for role
# transSU['Open/Closed']='Closed'
# Sort by grade pre-split
transSU.loc[:,'Grade']=transSU.Grade.replace('K',0)
transSU.loc[:,'Grade']=transSU.Grade.apply(int)
transSU=transSU.sort_values(['Grade'], ascending=True)
transSU.loc[:,'Grade']=transSU.Grade.replace(0,'K') # replace K with zero to allow sorting
# Column for sorting by transferred to school
transSU.loc[:,'Transchool']=transSU['Team'].str.split('#').str[0]
grouped=transSU.groupby(['Sport','Transchool'])
for [sport, school], group in grouped:
# prepare roster tab
xlsname=cnf._OUTPUT_DIR+'\\Cabrini_to_'+school+'_'+sport+'_'+str(year)+'.xlsx'
writer=pd.ExcelWriter(xlsname, engine='openpyxl')
Transferroster=organizeroster(group)
Transferroster=Transferroster.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
Transferroster=replaceacro(Transferroster,acronyms)
Transferroster.to_excel(writer,sheet_name='roster',index=False)
# prep contacts tab
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Team']
Transfercontacts=group[mycols]
Transfercontacts.to_excel(writer, sheet_name='contacts', index=False)
writer.save()
# Now generate list of e-mails for all schools/directors
logfile='transfers_director_emails_log.txt'
with open(logfile,'w+') as emaillog:
# Read generic file to sport director
with open(messfile, 'r') as file:
blankmessage=file.read()
for [sport, school], group in grouped:
plagroup=group.groupby(['Grade', 'Gender'])
platypes=[] # list of # of players by grade, gender
gradedict={'K':'K', 1:'1st', 2:'2nd',3:'3rd',4:'4th',5:'5th',6:'6th', 7:'7th',8:'8th'}
genderdict={'f':'girls', 'F':'girls','m':'boys','M':'boys'}
for [grade, gender], group in plagroup:
numplays=str(int(group['Grade'].count()))
grname=gradedict.get(grade)
genname=genderdict.get(gender)
platypes.append(numplays+' '+grname+' '+genname)
plalist=combinephrases(platypes)
thismess=blankmessage.replace('$SCHOOL', school)
thismess=thismess.replace('$SPORT', sport)
thismess=thismess.replace('$PLALIST', plalist)
emaillog.write(thismess)
emaillog.write('\n\n')
return
def findcards():
'''Search ID cards folder and return player # and file link
cards resized to 450x290 pix jpg in photoshop (scripts-image processor)
keys are either player number as string or coach CYC ID, vals are links to files'''
cardlist=glob.glob('%s\\IDcards\\*.jpg' %cnf._OUTPUT_DIR, recursive=True)
# construct list of [card #, filename]
nums=[i.split('\\')[-1] for i in cardlist]
nums=[i.split('_')[0] if '_' in i else i.split('--')[0] for i in nums ]
cards={} # dict for card numbers/filenames
for i,num in enumerate(nums):
cards.update({num: cardlist[i]})
return cards
def makethiscard(IDlist, team):
''' Passes link to ID card or player name (if missing) From team's list of player numbers (in alphabetical order), find/open card links, and create single image'''
# make the master image and determine image array size
margin=10 # pix on all sides
if len(IDlist)<11: # use 2 x 5 array (horiz)
wide=2
high=5
elif len(IDlist)<13: # 4w x 3 h (vert)
wide=4
high=3
elif len(IDlist)<22: # 3x by 5-7 high (horiz); max 21
wide=3
high=math.ceil(len(IDlist)/3)
else: # more than 21 ... yikes
wide=3
high=math.ceil(len(IDlist)/3)
cardimage = Image.new('RGB', (450*wide+2*margin, 300*high+2*margin), "white") # blank image of correct size
draw=ImageDraw.Draw(cardimage) # single draw obj for adding missing card names
ttfont=ImageFont.truetype('arial.ttf', size=36)
for i,fname in enumerate(IDlist):
row=i//high # remainder is row
col=i%high # mod is correct column
xpos=margin+row*450
ypos=margin+col*300
try:
thiscard=Image.open(fname)
thiscard=thiscard.resize((450, 300), Image.ANTIALIAS)
cardimage.paste(im=thiscard, box=(xpos, ypos)) # paste w/ xpos,ypos as upper left
except: # occurs when "first last" present instead of file name/path
# blankcard=Image.new('RGB', (450, 300)) # make blank image as placeholder
draw.text((xpos+50,ypos+100),fname,font=ttfont, fill="red")
return cardimage
''' TESTING
i=0 team=teamlist[i]
'''
def makeCYCcards(df, players, teams, coaches, season, year, **kwargs):
''' From mastersignups and teams, output contact lists for all teams/all sports separately
team assignments must be finished
args:
df -- mastersignups dataframe
players - player info dataframe
teams - this year's teams csv
coaches - full coach CYC info list
season - Fall, Winter or Spring
kwargs:
showmissing - True (shows missing player's name); False- skip missing player
otherSchools - default False (also make card sheets for transferred teams/players)
kwargs={'showmissing':False}
missing = makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':True} )
missing = makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':False} )
'''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
cards=findcards() # dictionary with number: filename combo for existing CYC cards
df=df[(df['Year']==year)]
df=df.reset_index(drop=True)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df[df['Sport'].isin(sportlist)] # season is not in mastersignups... only individual sports
# Make list of teams that need cards (all track and others >1st grade)
def processGrade(val):
if val=='K':
return 0
else:
return int(val)
teams.loc[:,'Grade'] = teams['Grade'].apply(lambda x:processGrade(x))
if not kwargs.get('otherSchools', False):
# all transfer teams with contain # (e.g. SMOS#3G) so remove these
# dropped by default
teams = teams[~teams['Team'].str.contains('#')]
# need track teams or any team from grades 2+
cardTeamList= teams[ (teams['Grade']>1) | (teams['Sport']=='Track') ]['Team'].unique()
df=df[ df['Team'].isin(cardTeamList) ]
df=df.sort_values(['Last'])
# plakeys as string will be easiest for below matching
df.loc[:,'Plakey']=df['Plakey'].astype(int)
df.loc[:,'Plakey']=df['Plakey'].astype(str)
def getName(gr, pk):
# get name from plakey as string
match=gr[gr['Plakey']==pk]
name=match.iloc[0]['First'] + ' ' + match.iloc[0]['Last']
return name
teamgrouped = df.groupby(['Team'])
missinglist=[] # list of plakeys with missing card
for team, gr in teamgrouped:
# keys in card dict are strings
IDlist = [str(int(i)) for i in gr.Plakey.unique()]
missinglist.extend([i for i in gr.Plakey.unique() if i not in cards.keys() ])
if not kwargs.get('showmissing', False):
# Shows only valid cards, drops missing names
IDlist = [ cards.get(i) for i in IDlist if i in cards.keys() ]
filename='Cards_'+ team +'.jpg'
else: # show cards and missing name when card image not in IDcards folder
IDlist = [cards.get(i) if i in cards.keys() else getName(gr, i) for i in IDlist ]
filename='Cards_'+ team +'_all.jpg'
# get team's coaches
IDlist.extend(getcoachIDs(team, teams, coaches, cards)) # add coach ID image file or first/last if missing
cardimage =makethiscard(IDlist, team) # directly saved
# save the card file
cardimage.save(cnf._OUTPUT_DIR+'\\'+filename)
missingcards=players[players['Plakey'].isin(missinglist)]
missingcards=missingcards.sort_values(['Grade','Last'])
return missingcards
def getcoachIDs(team, teams, coaches, cards):
''' Returns CYC IDs for all team's coaches '''
thisteam=teams[teams['Team']==team]
IDlist=[]
if len(thisteam)!=1:
print(team, 'not found in current teams list')
return IDlist # blank list
thisteam=thisteam.dropna(subset=['Coach ID'])
if len(thisteam)!=1:
print('Coach ID not found for', team)
return IDlist # blank list
if thisteam.iloc[0]['Coach ID']!='': # possibly blank
thisID=thisteam.iloc[0]['Coach ID'].strip()
if thisID in cards:
IDlist.append(cards.get(thisID,'')) # file path to this coach's ID
else: # get first/last
thiscoach=coaches[coaches['Coach ID']==thisID]
if len(thiscoach)==1:
IDlist.append(thiscoach.iloc[0]['Fname']+' '+thiscoach.iloc[0]['Lname'])
else:
print("Couldn't find coach ", thisID)
thisteam=thisteam.dropna(subset=['AssistantIDs'])
if len(thisteam)==1: # grab asst IDs if they exist
asstIDs=thisteam.iloc[0]['AssistantIDs']
asstIDs=[str(s).strip() for s in asstIDs.split(",")]
for i, asstID in enumerate(asstIDs):
if asstID in cards:
IDlist.append(cards.get(asstID,'')) # found assistant coaches ID card image
else: # can't find ... get assistant first last
thisasst=coaches[coaches['Coach ID']==asstID] # matching asst coach row
if len(thisasst)==1:
IDlist.append(thisasst.iloc[0]['Fname']+' '+thisasst.iloc[0]['Lname'])
else:
print("Couldn't find coach ", asstID)
return IDlist
def autocsvbackup(df, filename, newback=True):
''' Pass df (i.e players for backup and basename (i.e. "family_contact" for file.. finds list of existing backups and keeps ones of
certain ages based on targetdates list;
can't remember why was newback=False was needed (always true here to make new backup)
'''
# TODO fix this!
pass
return
def parseDate(val):
'''
Conversion of date string to datetime.date (always header line 2 40:60)
Possible date formats: 20180316 (newer style) or 03/15/2018 (older style)
For NGA files Date format changed from 03/15/2018 to 20180316 (on jday 75 in 2018)
time format: 221100 or 22:11:00 (sometimes w/ UTC)
not terribly concerned w/ time
possible date formats: 0) 03/01/2018, 3/1/2018, 3/1/18 or 03/01/18
2) 1/1/19 2) 2019-1-1 3) 2019-01-01
'''
if not isinstance(val, str):
return val
else:
if ' ' in val: # Remove time substring (but will fail for 3 Oct 2019)
val=val.split(' ')[0] # strip time substring if present
patterns=['\d{1,2}/\d{1,2}/\d{2,4}', '\d{4}-\d{1,2}-\d{1,2}', '\d{1,2}-\d{1,2}-\d{4}']
for i, patt in enumerate(patterns):
match=re.search(r'%s' %patt, val)
if match:
if i==0: # Extract 03/16/2018 (or rarely 28/10/2019 style)
try:
(mo,dy,yr)=[int(i) for i in val.split('/')]
if yr<100 and len(str(yr))==2: # handle 2 digit year
yr=int('20'+str(yr))
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return None
if i==1: # extract 2017-01-01 style (year first)
try:
(yr,mo,dy)=[int(i) for i in val.split('-')]
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return val
if i==2: # extract 01-01-2019 style (year last)
try:
(mo,dy,yr)=[int(i) for i in val.split('-')]
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return val
def loadProcessPlayerInfo():
'''Loads and processes players & family contacts (but not signup file)
args:
gsignups -- google signups
season - 'Fall', 'Winter', 'Spring
year - 4 digit int (uses fall value all school year.. ie. 2018-19 year is always
2018)
'''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if not isinstance(players.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
players.loc[:,'DOB']=players.DOB.apply(lambda x: parseDate(x))
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
famcontact=formatnamesnumbers(famcontact)
return players, famcontact
def loadProcessGfiles(gsignups, season, year):
'''Loads and processes players, family contacts and signup file, gets active
season and year
args:
gsignups -- google signups
season - 'Fall', 'Winter', 'Spring
year - 4 digit int (uses fall value all school year.. ie. 2018-19 year is always
2018)
'''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if not isinstance(players.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
players.loc[:,'DOB']=players.DOB.apply(lambda x: parseDate(x))
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
if season=='Winter':
gsignups['Sport']='Basketball'
# TODO determine where multiple sports converted to separate lines
duplicated=gsignups[gsignups.duplicated(subset=['First', 'Last','Grade','Sport'])]
if len(duplicated)>0:
print('Remove duplicate signups for %s' %", ".join(duplicated.Last.unique().tolist()))
gsignups=gsignups.drop_duplicates(subset=['First', 'Last','Grade','Sport'])
gsignups.loc[:,'Sport']=gsignups['Sport'].str.replace('Volleyball','VB')
#gsignups.loc[:,'Sport']=gsignups.loc[:,'Sport'].str.replace('Volleyball','VB').copy()
#gsignups.loc[:,'Sport']=gsignups['Sport'].replace({'Volleyball':'VB'}, regex=True).copy()
missing=[i for i in ['Famkey','Plakey'] if i not in gsignups.columns]
for col in missing: # add blank vals
gsignups.loc[gsignups.index, col]=np.nan
# convert assorted DOB strings to datetime.date
if not isinstance(gsignups.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
gsignups.loc[:,'DOB']=gsignups.DOB.apply(lambda x: parseDate(x))
# Get year from signup file name
outputduplicates(gsignups) # quick check of duplicates output in console window (already removed from signups)
gsignups=formatnamesnumbers(gsignups) # format phone numbers, names to title case, standardize schools, etc.
famcontact=formatnamesnumbers(famcontact)
def processGkey(val):
''' Some plakey/famkey copied to drive... must convert nan(float), whitespace or
number as string to either nan or int
'''
if isinstance(val, str):
val=''.join(val.split(' '))
if val=='':
return np.nan
else:
try:
return int(val)
except:
return np.nan
else:
return np.nan
# ensure gsignups has only int or nan (no whitespace)
gsignups.loc[:,'Plakey']=gsignups['Plakey'].apply(lambda x: processGkey(x))
gsignups.loc[:,'Famkey']=gsignups['Famkey'].apply(lambda x: processGkey(x))
return players, famcontact, gsignups
def loadprocessfiles(signupfile):
'''Loads and processes players, family contacts and signup file, gets active
season and year '''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if type(players.DOB[0])!=pd.Timestamp: # sometimes direct import to pd timestamp works, other times not
try:
players.loc[:'DOB']=parseDate(players.DOB) # return properly converted date columns series
except:
print('Failure converting player DOB to datetime/timestamp')
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
# read this season's sports signup file and rename columns
if signupfile.endswith('.csv'):
SUraw=pd.read_csv(signupfile)
elif 'xls' in signupfile:
try:
SUraw=pd.read_excel(signupfile, sheetname='Raw') # may or may not have plakey/famkey
except:
SUraw=pd.read_excel(signupfile)
if SUraw.shape[1]==30 and 'Plakey' in SUraw.columns:
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School','Grade',
'Address','Zip','Parish','Sport','AltPlacement','Ocstatus','Pfirst1',
'Plast1','Phone1','Text1','Email','Othercontact','Coach','Pfirst2','Plast2',
'Phone2','Text2','Email2','Coach2','Unisize','Unineed','Plakey','Famkey']
elif SUraw.shape[1]==28 and 'Plakey' in SUraw.columns:
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School','Grade',
'Address','Zip','Parish','Sport','AltPlacement','Ocstatus','Pfirst1',
'Plast1','Phone1','Text1','Email','Othercontact','Coach','Pfirst2','Plast2',
'Phone2','Text2','Email2','Coach2','Plakey','Famkey']
elif SUraw.shape[1]==26 and 'Plakey' not in SUraw.columns: # Raw value without plakey and famkey
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School',
'Grade','Address','Zip','Parish','Sport','AltPlacement','Ocstatus',
'Pfirst1','Plast1','Phone1','Text1','Email','Othercontact','Coach',
'Pfirst2','Plast2','Phone2','Text2','Email2','Coach2']
elif SUraw.shape[1]==28 and 'Plakey' not in SUraw.columns: # Raw value without plakey and famkey
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School',
'Grade','Address','Zip','Parish','Sport','AltPlacement','Ocstatus',
'Pfirst1','Plast1','Phone1','Text1','Email','Othercontact','Coach',
'Pfirst2','Plast2','Phone2','Text2','Email2','Coach2','Unisize','Unineed']
SUraw.loc[SUraw.index,'Plakey']=np.nan # add if absent
SUraw.loc[SUraw.index,'Famkey']=np.nan
signups=SUraw.drop_duplicates(subset=['First', 'Last','Grade','Sport'])
signups['Sport'].replace({'Volleyball':'VB'},inplace=True, regex=True)
# Get year from signup file name
season=re.match(r'(\D+)', signupfile).group(0) # season at string beginning followed by year (non-digit)
if '\\' in season: # remove file path problem
season=season.split('\\')[-1]
year=int(re.search(r'(\d{4})', signupfile).group(0)) # full year should be only number string in signups file
outputduplicates(SUraw) # quick check of duplicates output in console window (already removed from signups)
signups=formatnamesnumbers(signups) # format phone numbers, names to title case, standardize schools, etc.
famcontact=formatnamesnumbers(famcontact)
return players, famcontact, signups, season, year
def findavailablekeys(df, colname, numkeys):
'''Pass df and colname, return a defined number of available keys list
used for players, families, signups, etc.
'''
# list comprehension
allnums=[i for i in range(1,len(df))]
usedkeys=df[colname].unique()
usedkeys=np.ndarray.tolist(usedkeys)
availkeys=[i for i in allnums if i not in usedkeys]
if len(availkeys)<numkeys: # get more keys starting at max+1
needed=numkeys-len(availkeys)
for i in range(0,needed):
nextval=int(max(usedkeys)+1) # if no interior vals are available find next one
availkeys.append(nextval+i)
availkeys=availkeys[:numkeys] # truncate and only return the requested number of needed keys
return availkeys
def organizeroster(df):
''' Renaming, reorg, delete unnecessary columns for CYC roster output
already split by sport and year'''
df=df.rename(columns={'First':'Fname','Last':'Lname','Address':'Street','Parish_registration':'Parish of Registration'})
df=df.rename(columns={'Parish_residence':'Parish of Residence','Phone1':'Phone','DOB':'Birthdate','Gender':'Sex'})
df=df.rename(columns={'Email1':'Email'})
# replace Girl, Boy with m f
df.loc[:,'Sex']=df.Sex.replace('Girl','F').replace('Boy','M')
df.loc[:,'Sex']=df.Sex.str.upper() # ensure uppercase
# Convert date format to 8/25/2010 string format
mycols=['Fname', 'Lname', 'Street', 'City', 'State', 'Zip', 'Phone', 'Email', 'Birthdate', 'Sex', 'Role', 'Division', 'Grade', 'Team', 'School', 'Parish of Registration', 'Parish of Residence', 'Coach ID']
df=df[mycols] # put back in desired order
df=df.sort_values(['Team'])
return df
'''TESTING row=tempplay.iloc[7]
signups=signups[signups['Last']=='Elston']
'''
def processdatachanges(signups, players, famcontact, year):
'''Pass SC signups subset from google drive, update address for more up-to-date
contact information, new address, etc.
must start here if troubleshooting
args:
signups -- online signups file (normally google drive)
players - player DOB, grade, etc
famcontact- family contact info
year - sports year (int); e.g. 2019 for 2019-20 school year
'''
# Using all entries from signups (manual and gdrive)
# Updates from paper signups should be done directly to famcontact and players csv files (skip entirely)
'''
signups.Timestamp=pd.to_datetime(signups.Timestamp, errors='coerce') # converts to naT or timestamp
gdsignups=signups.dropna(subset=['Timestamp']) # drops manual entries (no google drive timestamp)
'''
# merge w/ players and update grade, recalc grade adjustment, and school
# must use left merge to keep correct indices from players df (inner causes reindexing)
players=players.reset_index(drop=True)
tempplay=pd.merge(players, signups, how='inner', on=['Plakey'], suffixes=('','_n'))
tempplay=tempplay.dropna(subset=['Gender_n']) # this drops all without a google drive entry
for index, row in tempplay.iterrows():
upkwargs={}
# Skip approval for grade updates
if row.Grade!=row.Grade_n: # grade discrepancy between players.csv and current signup
match=players[players['Plakey']==row.Plakey]
if len(match)==1:
thisind=match.index[0]
# update player grade (no approval)
players.loc[thisind,'Grade']=row.Grade_n # set to new value from current signup file
print (row.First," ",row.Last," grade changed to ", row.Grade_n)
if row.School!=row.School_n and str(row.School_n)!='nan':
upkwargs.update({'school':True})
# Check for DOB inconsistency between google drive and players.csv
if row.DOB!=row.DOB_n: # don't change grade adjustment if DOB discrepancy
if row.DOB_n.year!=year: # skip birthday instead of DOB error
upkwargs.update({'DOB':True})
else: # recalculate grade adjustment
# Direct adjustment to gradeadj in players (if indicated)
players=updategradeadjust(row, players, year)
if 'school' in upkwargs or 'DOB' in upkwargs:
# Interactively approve school or DOB changes
players=updateplayer_tk(row, players, **upkwargs)
autocsvbackup(players,'players', newback=True) # run autobackup script
outname=cnf._OUTPUT_DIR+'\\players.csv'
players.to_csv(outname,index=False) # direct save of changes from google drive info
# now update new info into family contacts
# faminfo=gdsignups.drop_duplicates(subset=['Famkey']) # only process first kid from family
faminfo=signups.drop_duplicates(subset=['Famkey'])
famcontact=prepcontacts(famcontact)
faminfo=prepcontacts(faminfo)
tempfam=pd.merge(famcontact, faminfo, how='inner', on=['Famkey'], suffixes=('','_n')) # same indices as famcontact
tempfam=tempfam.dropna(subset=['Zip_n']) # drops those without timestamped google drive entry
for index,row in tempfam.iterrows():
# Update/reshuffle phone, email, parent list, parish of registration (direct to famcontact)
famcontact=update_contact(row, famcontact) # update/reshuffle phone,text (list of lists)
autocsvbackup(famcontact,'family_contact', newback=True) # run autobackup script
outname=cnf._INPUT_DIR+'\\family_contact.csv'
famcontact.to_csv(outname, index=False)
return players, famcontact
def updatefamcon_tk(row, famcontact, **upkwargs):
''' Interactive approval of family contact changes
changes directly made to famcontacts (but not yet autosaved)
upkwargs: phone, email, address
'''
root = tk.Tk()
root.title('Update family contact info')
choice=tk.StringVar() # must be define outside of event called functions
rownum=0
mytxt='Family: '+row.Family+' # '+str(row.Plakey)
tk.Label(root, text=mytxt).grid(row=rownum, column=0)
tk.Label(root, text='Deselect to remove').grid(row=rownum, column=1)
rownum+=1
# Use listbox of common schools?
if 'parlist' in upkwargs: # indicates new parent found
colnum=0
parlist=upkwargs.get('parlist',[])
# Checkboxes to add new parent
if 'newpar1' in upkwargs:
addpar1=tk.BooleanVar()
addpar1.set(True)
try:
mytext='Add parent: '+ (' '.join(upkwargs.get('newpar1',[]))+'?')
except:
print('Error adding parent 1', )
mytext=''
tk.Checkbutton(root, variable=addpar1, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
if 'newpar2' in upkwargs:
addpar2=tk.BooleanVar()
addpar2.set(True)
try:
mytext='Add parent: '+ (' '.join(upkwargs.get('newpar2',[]))+'?')
except:
mytext=''
tk.Checkbutton(root, variable=addpar2, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each parent (default true)
pbools=[] # List of bools for parent inclusion
for i in range(0,len(parlist)):
pbools.append(tk.BooleanVar())
pbools[i].set(True)
tempstr=parlist[i]
tk.Checkbutton(root, variable=pbools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
rownum+=1
if 'emails' in upkwargs: # indicates new parent found
emaillist=upkwargs.get('emails',[])
# Checkboxes to add new parent
colnum=0
if 'email1' in upkwargs:
addemail1=tk.BooleanVar()
addemail1.set(True)
email1=tk.StringVar()
email1.set(upkwargs.get('email1',''))
tk.Checkbutton(root, variable=addemail1, text='Add new email1').grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=email1).grid(row=rownum, column=colnum)
rownum+=1
if 'email2' in upkwargs:
addemail2=tk.BooleanVar()
addemail2.set(True)
email2=tk.StringVar()
email2.set(upkwargs.get('email2',''))
tk.Checkbutton(root, variable=addemail2, text='Add new email2').grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=email2).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each email (default true)
ebools=[] # List of bools for parent inclusion
for i in range(0,len(emaillist)):
ebools.append(tk.BooleanVar())
tempstr=emaillist[i]
ebools[i].set(True)
tk.Checkbutton(root, variable=ebools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
rownum+=1
if 'phones' in upkwargs: # indicates new parent found
phlist=upkwargs.get('phones',[])
# Checkboxes to add new parent
colnum=0
if 'phone1' in upkwargs:
addphone1=tk.BooleanVar()
addphone1.set(True)
try:
mytext='Add phone/text: '+ upkwargs.get('phone1','')
except:
mytext=''
tk.Checkbutton(root, variable=addphone1, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
if 'phone2' in upkwargs:
addphone2=tk.BooleanVar()
addphone2.set(True)
try:
mytext='Add phone/text: '+ ', '.join(upkwargs.get('phone2',[]))
except:
mytext=''
tk.Checkbutton(root, variable=addphone2, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each email (default true)
phbools=[] # List of bools for parent inclusion
for i in range(0,len(phlist)):
phbools.append(tk.BooleanVar())
tempstr=phlist[i]
phbools[i].set(True)
tk.Checkbutton(root, variable=phbools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
if 'address' in upkwargs:
colnum=0
tk.Label(root, text='Possible change of address').grid(row=rownum, column=colnum)
rownum+=1
newaddrbool=tk.BooleanVar()
newaddr=tk.StringVar()
newaddrbool.set(False)
newaddr.set(row.Address_n)
newzip=tk.StringVar()
try:
newzip.set(int(row.Zip_n))
except:
print('Non-standard zip value',str(row.Zip_n))
tk.Checkbutton(root, variable=newaddrbool, text='Change address?').grid(row=rownum, column=colnum)
colnum+=1
tk.Label(root, text='Current address').grid(row=rownum, column=colnum)
colnum=0
rownum+=1
tk.Entry(root, textvariable=newaddr).grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=newzip).grid(row=rownum, column=colnum)
colnum+=1
tempstr=str(row.Address)+' '+str(row.Zip)
tk.Label(root, text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
# Now set up select/close buttons
def skip(event):
choice.set('skip')
root.destroy()
def change(event):
choice.set('change')
root.destroy()
f=tk.Button(root, text='Skip')
f.bind('<Button-1>', skip)
f.grid(row=rownum, column=0)
g=tk.Button(root, text='Change')
g.bind('<Button-1>', change)
g.grid(row=rownum, column=1)
root.mainloop()
mychoice=choice.get()
if mychoice=='change':
# Find matching row for family (needed for all changes below)
famkey=row.Famkey
match=famcontact[famcontact['Famkey']==famkey]
if len(match)==1:
thisind=match.index[0]
else:
print('Problem finding unique entry for famkey', str(famkey))
return famcontact # return unaltered
# Reconstruct parent list
if 'parlist' in upkwargs:
newparlist=[] # constructing entirely new parent list from checkbox choices
if 'newpar1' in upkwargs:
if addpar1.get():
newparlist.append(upkwargs.get('newpar1',[np.nan,np.nan]))
#TODO fix nan error
print('Added parent',' '.join(upkwargs.get('newpar1')),' to ',str(row.Family))
for i, val in enumerate(pbools):
if pbools[i].get():
newparlist.append(parlist[i]) # [first, last] format
if 'newpar2' in upkwargs:
if addpar2.get():
newparlist.append(upkwargs.get('newpar2',[np.nan,np.nan]))
print('Added parent 2',' '.join(upkwargs.get('newpar2')),' to ',str(row.Family))
# Now direct update of parents in this family's famcontact entry
newparlist=newparlist[0:3] # limit to 3 entries
while len(newparlist)<3:
newparlist.append([np.nan,np.nan]) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,4): # reset 3 existing parents entries
fname='Pfirst'+str(i)
lname='Plast'+str(i)
famcontact.loc[thisind, fname] = newparlist[i-1][0]
famcontact.loc[thisind, lname] = newparlist[i-1][1]
# Reconstruct email list
if 'emails' in upkwargs:
newemaillist=[]
if 'email1' in upkwargs:
if addemail1.get():
newemaillist.append(email1.get())
print('Added email1', email1.get(), ' to ', str(row.Family))
for i, val in enumerate(ebools):
if ebools[i].get():
newemaillist.append(emaillist[i])
if 'email2' in upkwargs:
if addemail2.get():
# insert in 2nd position
newemaillist.insert(1, email2.get())
print('Added email2', email2.get(), ' to ', str(row.Family))
# Now update emails in famcontact entry
# Direct update of parent list
newemaillist=newemaillist[0:3] # limit to 3 entries
while len(newemaillist)<3:
newemaillist.append(np.nan) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,4): # reset 3 existing parents entries
colname='Email'+str(i)
famcontact.loc[thisind, colname]= newemaillist[i-1]
# Reconstruct phone list
if 'phones' in upkwargs:
newphlist=[]
if 'phone1' in upkwargs:
if addphone1.get():
newphlist.append(upkwargs.get('phone1', [np.nan,np.nan]))
print('Added phone1', ','.join(upkwargs.get('phone1',[])), ' to ', str(row.Family))
for i, val in enumerate(phbools):
if phbools[i].get():
newphlist.append(phlist[i])
# added at end... probably should go
if 'phone2' in upkwargs:
if addphone2.get():
# insert in 2nd position
newphlist.insert(1, upkwargs.get('phone2',[np.nan,np.nan]))
print('Added phone2', ','.join(upkwargs.get('phone2',[])), ' to ', str(row.Family))
# Now update phone, text in famcontact entry
newphlist=newphlist[0:4] # limit to 4 entries
while len(newphlist)<4:
newphlist.append([np.nan, np.nan]) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,5): # reset max 4 phone entries
phname='Phone'+str(i)
textname='Text'+str(i)
famcontact.loc[thisind, phname] = newphlist[i-1][0]
famcontact.loc[thisind, textname] = newphlist[i-1][1]
# Handle change of address (direct change if approved)
# Also change associated zip code and reset parish of residence
if 'address' in upkwargs:
if newaddrbool:
print('Address changed for ', str(row.Family))
famcontact.loc[thisind, 'Address'] = newaddr.get()
# Reset parish of residence to nan (manually find and replace)
famcontact.loc[thisind, 'Parish_residence'] = np.nan
try:
famcontact.loc[thisind,'Zip']=int(newzip.get())
except:
print('Problem converting zip code ', newzip.get())
# TODO ... handle parish of registration
return famcontact
def update_contact(row, famcontact):
'''Update phone and textable list from google drive entries;
google drive raw entries first processed in process_data_changes (then update
contacts is called)
row is a merge of existing famcontact info and new signup info
existing entries from fam_contact listed first;
pass/modify/return series for family; reorder/replace numbers
has fairly long list of changes made w/o interactive approval:
1) changing order of email or phone numbers (e.g. swap phone1 and phone2)
2) add phone2 (or email2) if current phone2(email2) is nan
3) change order of parents (new parent1)
All other changes done w/ interactive approval using update_famcon_tk
'''
# [phone, text, order]
thisfam=row.Family
match=famcontact[famcontact['Famkey']==row.Famkey]
if len(match)==1:
thisind=match.index[0] # correct index for updating this family in famcontacts
else:
print(str(row.Family), " not found in famcontacts.. shouldn't happen")
return famcontact
upkwargs={} # empty dict for monitoring all changes
# check for possible change in address (housenum as trigger)
match1=re.search(r'\d+', row.Address)
match2=re.search(r'\d+', row.Address_n)
if match1 and match2:
num1=match1.group(0)
num2=match2.group(0)
if num1!=num2: # change in address number strongly suggestive of actual change
upkwargs.update({'address':True})
else:
print('No address # found for', str(thisfam))
phonelist=[] # list of lists with number and textable Y/N
for i in range(1,5): # get 4 existing phone entries (phone1, phone2, etc.)
phname='Phone'+str(i)
txtname='Text'+str(i)
if str(row[phname])!='nan':
phonelist.append([row[phname],row[txtname]]) # as phone and text y/N
# New google drive entries will be Phone1_n.. look for phone/text pair in existing list
if str(row.Phone1_n)!='nan' and [row.Phone1_n,row.Text1_n] in phonelist: # new ones phone is required entry
# default move of phone1, text1 to top of list - no confirmation
if [row.Phone1_n,row.Text1_n]!=phonelist[0]: # move if not in first position
phonelist.insert(0,phonelist.pop(phonelist.index([row.Phone1_n,row.Text1_n])))
print('Phone 1 changed for ', str(thisfam))
upkwargs.update({'phchange':True})
if str(row.Phone1_n)!='nan' and [row.Phone1_n,row.Text1_n] not in phonelist: # new ones phone is required entry
if [row.Phone1_n, np.nan] in phonelist: # remove if # present but w/o text indication (no confirm)
phonelist.remove([row.Phone1_n,np.nan])
phonelist.insert(0,[row.Phone1_n,row.Text1_n]) # insert in first position
print('Updated phone 1 to', row.Phone1_n,' for ',str(thisfam))
upkwargs.update({'phchange':True})
else:
# phone1 change to be confirmed
upkwargs.update({'phone1':[row.Phone1_n,row.Text1_n]})
upkwargs.update({'phones': phonelist})
if str(row.Phone2_n)!='nan': # check for phone2 entry (with _n suffix)
if [row.Phone2_n,row.Text2_n] not in phonelist: # add second phone to 2nd position if not present
if [row.Phone2_n,np.nan] in phonelist: # remove if # present but w/o text indication
phonelist.remove([row.Phone2_n,np.nan])
phonelist.insert(1,[row.Phone2_n,row.Text2_n])
print ('Updated phone 2 to ', str(row.Phone2_n), 'for ', str(thisfam))
upkwargs.update({'phchange':True})
else: # get approval for phone 2 addition
upkwargs.update({'phone2':[row.Phone2_n,row.Text2_n]})
upkwargs.update({'phones': phonelist})
# Construct existing list of known email addresses
emaillist=[]
for i in range(1,4): # get 3 existing email entries
emailname='Email'+str(i)
if str(row[emailname])!='nan':
emaillist.append(row[emailname].lower())
# Find new email1 entry in google drive data
if str(row.Email)!='nan' and '@' in row.Email: # real primary gd named email
if row.Email.lower() in emaillist: # add in first position if not present (no confirmation)
if row.Email.lower()!=emaillist[0]: # check if in first position already
emaillist.insert(0,emaillist.pop(emaillist.index(row.Email)))
upkwargs.update({'emchange':True})
print ('Updated email 1 ', str(row.Email.lower()), 'for family', str(thisfam))
else: # confirm email1 if not present
upkwargs.update({'email1':row.Email})
upkwargs.update({'emails':emaillist})
# look for new email in email2 position and add
if str(row.Email2_n)!='nan' and '@' in row.Email2_n:
if row.Email2_n.lower() not in emaillist: # add second email to 2nd position if not present
upkwargs.update({'email2':row.Email2_n})
upkwargs.update({'emails':emaillist})
# Update list of parent names (max 3 entries)
parlist=[] # construct existing list from family contacts
# skip if all nan for entered parents (non-gd entry)
for i in range(1,4): # construct existing parents list
fname='Pfirst'+str(i)
lname='Plast'+str(i)
if str(row[fname])!='nan':
parlist.append([row[fname],row[lname]]) # list of lists [first, last]
if str(row.Pfirst1_n)!='nan': # skip if parent name is nan
if [row.Pfirst1_n,row.Plast1_n] in parlist: # reorder in list
if [row.Pfirst1_n,row.Plast1_n]!=parlist[0]: # check if already in first
# move to first position (everything else requires approval)
parlist.insert(0,parlist.pop(parlist.index([row.Pfirst1_n,row.Plast1_n])))
parlist.insert(0,[row.Pfirst1_n, row.Plast1_n]) # insert in first position
upkwargs.update({'parchange':True})
else: # parent not in list (confirm)
upkwargs.update({'newpar1':[row.Pfirst1_n,row.Plast1_n]})
upkwargs.update({'parlist':parlist})
# inserts in first position while simultaneously removing other entry
if str(row.Pfirst2_n)!='nan': # Check for parent 2 entry
if [row.Pfirst2_n,row.Plast2_n] not in parlist: # add second phone to 2nd position if not present
upkwargs.update({'newpar2':[row.Pfirst2_n,row.Plast2_n]})
upkwargs.update({'parlist':parlist})
# Save auto-changes in phone to family contacts
if 'phchange' in upkwargs: # Record altered phonelist in famcontacts
if 'phones' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'phones': phonelist}) # ensure most current copy
phonelist=phonelist[0:3] # construct proper list
while len(phonelist)<4:
phonelist.append([np.nan,np.nan]) # pad with nan entries if necessary
for i in range(1,5): # reset 4 existing phone entries
phname='Phone'+str(i)
txtname='Text'+str(i)
famcontact.loc[thisind, phname] = phonelist[i-1][0] # first of tuple is phone
famcontact.loc[thisind, txtname] = phonelist[i-1][1] # 2nd of tuple is text y/n
del upkwargs['phchange']
print('automatic phone changes for', thisfam)
# Save auto-changes in emails to family contacts
if 'emchange' in upkwargs: # Record altered phonelist in famcontacts
if 'emails' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'emails': emaillist}) # ensure most current copy
emaillist=emaillist[0:2] # construct proper list
while len(emaillist)<3:
emaillist.append(np.nan) # pad with nan entries if necessary
for i in range(1,4): # reset 4 existing phone entries
emname='Email'+str(i)
famcontact.loc[thisind, emname] =emaillist[i-1]
del upkwargs['emchange']
print('automatic email changes for', thisfam)
if 'parchange' in upkwargs: # Record altered parents list in famcontacts
if 'parlist' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'parlist': parlist}) # ensure most current copy
parlist=parlist[0:2] # construct proper list
while len(parlist)<3:
parlist.append(np.nan) # pad with nan entries if necessary (3 total)
for i in range(1,4): # reset 4 existing phone entries
fname='Pfirst'+str(i)
lname='Plast'+str(i)
try:
famcontact.loc[thisind, fname] =parlist[i-1][0]
famcontact.loc[thisind, lname] =parlist[i-1][1]
except:
print('Error updating parents for', thisfam)
del upkwargs['parchange']
print('automatic parent changes for', thisfam)
# now check for any changes needing interactive approval
if len(upkwargs)>0: # something needs interactive approval
famcontact=updatefamcon_tk(row, famcontact, **upkwargs)
return famcontact
def updateplayer_tk(row, players, **upkwargs):
''' Interactive approval of player info updates (except date)
changes directly made to players (but not yet autosaved)
called by processdatachanges
'''
commonschools=['Cabrini','Soulard','SLPS','Charter','Private']
root = tk.Tk()
root.title('Update player info')
choice=tk.StringVar() # must be define outside of event called functions
rownum=0
mytxt='Player:'+row.First+' '+row.Last+' # '+str(row.Plakey)
tk.Label(root, text=mytxt).grid(row=rownum, column=0)
rownum+=1
# Use listbox of common schools?
if 'DOB' in upkwargs: # indicates discrepancy
DOB1=date(row.DOB)
DOB2=date(row.DOB_n)
# create and display DOB variables
def ChooseDOB1(event):
DOB.set(datetime.strftime(DOB1,'%m/%d/%y'))
def ChooseDOB2(event):
DOB.set(datetime.strftime(DOB2,'%m/%d/%y'))
DOB=tk.StringVar()
DOB.set(datetime.strftime(DOB1,'%m/%d/%y')) # defaults to original
tk.Label(root, text='Update date of birth?').grid(row=rownum, column=0)
mytxt='current DOB:'+datetime.strftime(DOB1,'%m/%d/%y')
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', ChooseDOB1)
b.grid(row=rownum, column=1)
mytxt='New DOB:'+datetime.strftime(DOB2,'%m/%d/%y')
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', ChooseDOB2)
b.grid(row=rownum, column=2)
tk.Entry(master=root, textvariable=DOB).grid(row=rownum, column=3)
rownum+=1
if 'school' in upkwargs:
school=tk.StringVar()
school.set(row.School) # default to existing value
tk.Label(root, text='Update school?').grid(row=rownum, column=0)
rownum+=1
def newschool(event):
school.set(row.School_n)
def oldschool(event):
school.set(row.School)
def pickschool(event):
# double-click to pick standard school choice
items=lb.curselection()[0] # gets selected position in list
school.set(commonschools[items])
tk.Entry(root, textvariable=school).grid(row=rownum, column=2)
mytxt='new school:'+str(row.School_n)
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', newschool)
b.grid(row=rownum, column=1)
mytxt='existing school:'+str(row.School)
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', oldschool)
b.grid(row=rownum, column=0)
# also include selectable listbox of common school choices
lb=tk.Listbox(master=root, selectmode=tk.SINGLE)
lb.bind("<Double-Button-1>", pickschool)
lb.grid(row=rownum, column=3)
for i,sch in enumerate(commonschools):
lb.insert(tk.END, sch)
rownum+=1
# Now set up select/close buttons
def skip(event):
choice.set('skip')
root.destroy()
def change(event):
choice.set('change')
root.destroy()
f=tk.Button(root, text='Skip')
f.bind('<Button-1>', skip)
f.grid(row=rownum, column=0)
g=tk.Button(root, text='Change')
g.bind('<Button-1>', change)
g.grid(row=rownum, column=1)
root.mainloop()
mychoice=choice.get()
if mychoice=='change':
try:
# make changes directly to players after finding correct index using plakey
plakey=row.Plakey
match=players[players['Plakey']==plakey]
thisind=match.index[0]
if 'school' in upkwargs:
players.loc[thisind,'School']= school.get()
if 'DOB' in upkwargs:
newDOB=datetime.strptime(DOB.get(),'%m/%d/%y')
players.loc[thisind,'DOB']= newDOB
except:
print('Error updating info for', row.Plakey, row.First, row.Last)
return players
def prepcontacts(df):
''' Prepare for update contacts/ matching with google drive info
avoids possible problems/spaces in manually entered info '''
mycols=['Pfirst1', 'Plast1','Pfirst2', 'Plast2', 'Pfirst3', 'Plast3',
'Phone1', 'Text1','Phone2', 'Text2', 'Phone3', 'Text3', 'Phone4',
'Text4', 'Email1','Email2', 'Email3']
for i, col in enumerate(mycols):
try:
df.loc[:,col]=df[col].str.strip()
except: # maybe only nan or not present (i.e. in signups)
pass
mycols=['Text1','Text2','Text3']
for i, col in enumerate(mycols):
try:
df.loc[:,col]=df[col].str.replace('No','N', case=False)
df.loc[:,col]=df[col].str.replace('Yes','Y', case=False)
except:
pass
return df
def findyearseason(df):
''' Pass raw signups and determine year and sports season '''
# get year from system clock and from google drive timestamp
now=datetime.now()
val=df.Timestamp[0] # grab first timestamp
if val!=datetime: # if not a timestamp (i.e. manual string entry find one
while type(val)!=datetime:
for index, row in df.iterrows():
val=df.Timestamp[index]
year=val.year # use year value from signup timestamps
if now.year!=val.year:
print ('Possible year discrepancy: Signups are from ',str(val.year))
# now find sports season
mask = np.column_stack([df['Sport'].str.contains("occer", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Fall'
mask = np.column_stack([df['Sport'].str.contains("rack", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Spring'
mask = np.column_stack([df['Sport'].str.contains("asket", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Winter'
return season, year
def outputduplicates(df):
'''Prints out names of players with duplicated entries into console... can then delete from google drive signups '''
tempdf=df[df.duplicated(['First','Last','Sport'])] # series with 2nd of duplicated entries as True
firsts=tempdf.First.tolist()
lasts=tempdf.Last.tolist()
for f,l in zip(firsts, lasts):
print('Duplicated signup for player: {} {}'.format(f,l))
return
def formatphone(df):
''' Convert all entered phone numbers in dfs phone columns to 314-xxx-xxxx string and standardize text field '''
def phoneFormat(val):
# lambda function phone number reformatting
if not isinstance(val, str):
return val
# replace/remove any white space
val="".join(val.split(' '))
if val=='': # blank phone causes problems
return np.nan
if not re.search(r'(\d+-\d+-\d+)', val):
val=re.sub("[^0-9]", "", val) # substitute blank for non-number
if len(val)==7:
return '314'+val
elif len(val)==11 and val.startswith('1'): # remove starting 1 if present
return val[1:11]
elif len(val)!=10: # sometimes has ---
# print('Bad number: ',val)
return val
else:
return val[0:3]+'-'+val[3:6]+'-'+val[6:10]
else:
return val # already good
# find phone columns (named phone, phone2, etc.)
phlist=[str(s) for s in df.columns if 'Phone' in s]
for col in phlist:
df.loc[:,col]=df[col].apply(lambda x: phoneFormat(x))
# now change yes in any text field to Y
txtlist=[str(s) for s in df.columns if 'Text' in s]
for col in txtlist:
df.loc[:,col]=df[col].replace('yes','Y')
df.loc[:,col]=df[col].replace('Yes','Y')
return df
def standardizeschool(df):
''' can pass any frame with school column and standardize name as Cabrini and Soulard'''
schstr='frances' + '|' + 'cabrini' + '|' + 'sfca' # multiple school matching string
tempdf=df[df['School'].str.contains(schstr, na=False, case=False)]
df.loc[tempdf.index,'School']='Cabrini'
tempdf = df[df['School'].str.contains('soulard', na=False, case=False)]
df.loc[tempdf.index,'School']='Soulard'
tempdf = df[df['School'].str.contains('public', na=False, case=False)]
df.loc[tempdf.index,'School']='Public'
schstr='city garden' + '|' + 'citygarden' # multiple school matching string
tempdf = df[df['School'].str.contains(schstr, na=False, case=False)]
df.loc[tempdf.index,'School']='City Garden'
return df
def formatnamesnumbers(df):
'''Switch names to title case, standardize gender, call phone/text reformat and standardize school name'''
def titleStrip(val):
try:
return val.title().strip()
except:
return val
processCols=['First','Last','Family','Pfirst1','Plast1','Pfirst2','Plast2','Email','Email2']
processCols=[i for i in processCols if i in df.columns]
for col in processCols:
df.loc[:, col]=df[col].apply(lambda x: titleStrip(x))
if 'Gender' in df:
df.loc[:,'Gender']=df.Gender.replace('Girl','f')
df.loc[:,'Gender']=df.Gender.replace('Boy','m')
if 'Grade' in df:
df.loc[:,'Grade']=df.Grade.replace('K',0)
df.loc[:,'Grade']=df.Grade.replace('pK',0)
try:
df.loc[:,'Grade']=df.Grade.astype(int)
except:
print('Player grade likely missing from raw signup file... enter manually')
df=formatphone(df) # call phone reformatting string
if 'School' in df:
df=standardizeschool(df) # use "Cabrini" and "Soulard" as school names
return df
def graduate_players(players, year):
''' Recalc grade based on grade adjustment, school year (run once per year in fall) and age.
some player grades will already have been updated (generally google drive entries)... however recalc shouldn't
change grade '''
players.loc[:,'Grade']=players.Grade.replace('K',0)
for index,row in players.iterrows():
# replace K with zero
grade=int(players.iloc[index]['Grade']) # get currently listed grade
gradeadj=players.iloc[index]['Gradeadj']
dob=players.iloc[index]['DOB']
if str(gradeadj)=='nan' or str(dob)=='NaT': # skip grade update if info is missing
continue
dob=date(dob)
# calculate current age at beginning of this school on 8/1
age=date(year,8,1)-dob
age = (age.days + age.seconds/86400)/365.2425
# assign grade based on age (and grade adjustment)
newgrade=int(age)+int(gradeadj)-5
if grade!=newgrade:
first=players.iloc[index]['First']
last=players.iloc[index]['Last']
print('Grade changed from',grade,'to',newgrade,'for', first, last)
players.loc[index, 'Grade'] = newgrade
players.loc[:,'Grade']=players.Grade.replace(0,'K')
return players
def removeEmptyFams(players, famcontact):
'''
Remove empty families (with no remaining players)
'''
# Remove families with no active players
plaset=[int(i) for i in list(players.Famkey.unique())]
famset=[int(i) for i in list(famcontact.Famkey.unique())]
# Empty families
emptykey=[i for i in famset if i not in plaset]
empty=famcontact[famcontact['Famkey'].isin(emptykey)]
print('Remove empty families:')
for ind, row in empty.iterrows():
print(row.Family, ':',row.Pfirst1, row.Plast1)
choice=input("Remove empty families (Y,N)?\n")
if choice.upper()=='Y':
famcontact=famcontact[~famcontact['Famkey'].isin(emptykey)]
outname=cnf._INPUT_DIR+'\\family_contact.csv'
famcontact.to_csv(outname, index=False)
return famcontact
def removeHSkids(players):
''' Drop graduated players (9th graders) from list '''
grlist=[i for i in range(0,9)]
grlist.append('K')
Hs=players.loc[~(players.Grade.isin(grlist))]
for ind, row in Hs.iterrows():
print(row.First, row.Last)
choice=input('Remove above HS players (Y/N)?\n')
if choice.upper()=='Y':
players=players.loc[(players.Grade.isin(grlist))]
print('HS Players removed but not autosaved')
return players
def estimategrade(df, year):
'''Estimate grade for this sports season based on DOB.. not commonly used '''
for index, row in df.iterrows():
grade=df.loc[index]['Grade']
if str(grade)=='nan': # skips any players who already have assigned grade
dob=df.loc[index]['DOB']
dob=date(dob) # convert to datetime date from timestamp
first=df.loc[index]['First']
last=df.loc[index]['Last']
if str(dob)=='nan':
print ('DOB missing for ', first,' ', last)
continue # skip to next if dob entry is missing
currage=date(year,8,1) - dob
currage = (currage.days + currage.seconds/86400)/365.2425 # age on first day of school/ sports season
gradeest=int(currage-5)
if gradeest==0:
gradeest='K'
print(first, last, 'probably in grade', gradeest)
df.loc[index,'Grade']=gradeest
return df
def updateoldteams(teams, year):
''' Load old teams after copy to teams tab in teams_coaches, then auto-update year-grade
must be manually saved with saveteams... then any adjustments made manually in Excel'''
# check to ensure teams are not already updated
if teams.iloc[0]['Year']==year:
print('Teams already updated for ', year,' school year')
return teams # pass back unaltered
# temporarily make the K to 0 replacements
teams.Grade=teams.Grade.replace('K',0)
teams.loc[:'Graderange']=teams['Graderange'].astype(str) # convert all to string
teams.loc[:,'Year']=year
teams.loc[:,'Grade']+=1
for index, row in teams.iterrows():
grade=teams.loc[index]['Grade']
div=teams.loc[index]['Division'] # division must match grade
div=div.replace('K','0') # replace any Ks in string
newdiv=''.join([s if not s.isdigit() else str(grade) for s in div]) # find replace for unknown # w/ new grade
teams.loc[index,'Division'] = newdiv
cycname=teams.loc[index]['Team'] # update grade portion of team name
if cycname.startswith('K'):
newcycname='1'+ cycname[1:]
teams.loc[index,'Team'] = newcycname
elif cycname[0].isdigit(): # now update teams beginning w/ numbers
newcycname=str(grade)+ cycname[1:]
teams.loc[index,'Team']= newcycname
# update grade ranges
grrange=teams.loc[index]['Graderange'] # should be all numbers
grrange=grrange.replace('K','0')
newrange=''.join([str(int(i)+1) for i in grrange])
teams.loc[index,'Graderange'] = newrange # grade range stored as string, right?
# no auto-save... save with saveteams after checking for proper changes
return teams
def splitcoaches(df):
''' Pass CYC teams list, split and duplicate rows with comma separated vals in colname for extra coaches'''
df.loc[:,'Role']='Coach' # add col for head or asst (first entry for head coach)
# df['Open/Closed']='Closed'
assistants=df.dropna(subset=['AssistantIDs']) # drop teams w/ no asst coaches
for index, rows in assistants.iterrows():
val=assistants.loc[index,'AssistantIDs']
asstcoaches=[str(s) for s in val.split(',')] #list of assistants for single team
for i,asst in enumerate(asstcoaches):
newrow=assistants.loc[index] # duplicate entry as series
asst=asst.strip() # strip leading, trailing blanks
newrow.loc['Coach ID'] = asst # set this asst coaches ID
newrow.loc['Role'] = 'Assistant Coach'
df=df.append(newrow)
df=df.sort_values(['Team'],ascending=True)
return df
def addcoachestoroster(teams, coaches):
'''Creates roster entries for coaches for each CYC team
pass teams and coaches (with coach roster info)
needed roster cols are all below (except sport used in output parsing)
args: teams -- team table w/ head and asst coach CYC ids
coaches - coaches table with CYC Id (key) and associated info
returns: coachroster --separate df to be appended to main player roster
'''
# Add team coaches (match by CYC-IDs)
thismask = teams['Team'].str.contains('-', case=False, na=False) # finds this season's CYC level teams
CYCcoach=teams.loc[thismask] # also has associated sport
CYCcoach=splitcoaches(CYCcoach) # makes new row for all assistant coaches on CYC teams
CYCcoach=pd.merge(CYCcoach, coaches, how='left', on=['Coach ID'], suffixes=('','_r'))
mycols=['Sport','Fname', 'Lname', 'Street', 'City', 'State', 'Zip', 'Phone', 'Email', 'Birthdate', 'Sex', 'Role', 'Division', 'Grade', 'Team', 'School', 'Parish of Registration', 'Parish of Residence', 'Coach ID']
for col in [col for col in mycols if col not in CYCcoach.columns]:
CYCcoach[col]='' # birthdate generally missing
CYCcoach=CYCcoach[mycols] # put back in desired order
# drop duplicates on CYC ID, team (sometimes occurs during merge)
CYCcoach=CYCcoach.drop_duplicates(['Coach ID','Team'])
return CYCcoach
def countteamplayers(df, teams, season, year):
''' For each team, summarize number of players (subset those that are younger or older) and list of names
passing mastersignups'''
df=df[df['Year']==year] # removes possible naming ambiguity
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season,[])
df=df[df['Sport'].isin(sportlist)] # only this sports season
df.Grade=df.Grade.replace('K',0)
df.Grade=df.Grade.astype('int')
teams.loc[:,'Grade']=teams.Grade.replace('K',0)
teams.loc[:,'Grade']=teams.Grade.astype('int')
teams.loc[:,'Playerlist']=teams.Playerlist.astype('str')
for index, row in teams.iterrows():
teamname=teams.loc[index]['Team']
match=df[df['Team']==teamname] # all players on this team from master_signups
teams.loc[index,'Number'] = len(match) # total number of players
# compose player list (First L.) and add to teams
playerlist=[]
for ind, ro in match.iterrows():
first=match.loc[ind]['First']
last=match.loc[ind]['Last']
strname=first+' ' +last[0]
playerlist.append(strname)
players=", ".join(playerlist)
teams.loc[index,'Playerlist'] = players
# count players above or below grade level
thisgrade=int(teams.loc[index]['Grade'])
teams.loc[index,'Upper'] = (match.Grade > thisgrade).sum()
teams.loc[index,'Lower'] = (match.Grade < thisgrade).sum()
writetoxls(teams, 'Teams', 'Teams_coaches.xlsx')
return teams
def writecontacts(df, famcontact, players, season, year):
''' From mastersignups and teams, output contact lists for all teams/all sports separately '''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year)] # season is not in mastersignups... only individual sports
'''# for transfers to same school (but different grades), combine all into single list for given school
for index,row in df.iterrows():
if str(df.loc[index]['Team'])!='nan': # avoids nan team screwups
if '#' in df.loc[index]['Team']: # this combines Ambrose#2B, Ambrose#3G to single tab
df=df.set_value(index,'Team',df.loc[index]['Team'].split('#')[0])
'''
# get family contact info from famcontacts
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# Sort by grade pre-split
df.loc[:,'Grade']=df.Grade.replace('K',0)
df.loc[:,'Grade']=df.Grade.apply(int)
df=df.sort_values(['Grade'], ascending=True)
df.loc[:,'Grade']=df.Grade.replace(0,'K') # replace K with zero to allow sorting
df.loc[:,'Team']=df.Team.replace(np.nan,'None') # still give contacts if team not yet assigned
df.loc[:,'Team']=df.Team.replace('','None')
# Standard sport contacts output for soccer, VB, basketball
if season!='Spring':
for i, sport in enumerate(sportlist):
fname=cnf._OUTPUT_DIR+'\\'+sport+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
Thissport=df[df['Sport']==sport]
teamlist= Thissport.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# Combine transfers to same school
transchools=[s.split('#')[0] for s in teamlist if '#' in s]
teamlist=[s for s in teamlist if '#' not in s]
teamlist.extend(transchools) # all to same school as single "team"
# now can organize contacts (and drop sport)
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Team', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Plakey', 'Famkey', 'Family']
Thissport=Thissport[mycols] # drop columns and rearrange
for i, team in enumerate(teamlist):
thisteam=Thissport[Thissport['Team'].str.contains(team)]
thisteam.to_excel(writer,sheet_name=team,index=False) # this overwrites existing file
writer.save()
else: # handle spring special case
Balls=df[df['Sport']!='Track'] # all ball-bat sports together
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Team', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Plakey', 'Famkey', 'Family']
Balls=Balls[mycols]
teamlist= Balls.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# Combine transfers
transchools=[s.split('#')[0] for s in teamlist if '#' in s]
teamlist=[s for s in teamlist if '#' not in s]
teamlist.extend(transchools) # all to same school as single "team"
fname=cnf._OUTPUT_DIR+'\\'+'Batball'+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
# create a separate tab for each team and write the contacts
for i, team in enumerate(teamlist):
thisteam=Balls[Balls['Team'].str.contains(team)]
thisteam.to_excel(writer,sheet_name=team,index=False) # this overwrites existing file
writer.save() # overwrites existing
# Entire track team as single file
Track=df[df['Sport']=='Track']
Track=Track[mycols] # drop columns and rearrange
fname=cnf._OUTPUT_DIR+'\\'+'Track'+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
Track.to_excel(writer,sheet_name='Track',index=False)
writer.save()
return
def makegoogcont(df, famcontact, players, season, year):
'''Create and save a google contacts file for all Cabrini teams
save to csv '''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year)] # season is not in mastersignups... only individual sports
'''# for transfers to same school (but different grades), combine all into single list for given school
for index,row in df.iterrows():
if str(df.loc[index]['Team'])!='nan': # avoids nan team screwups
if '#' in df.loc[index]['Team']: # this combines Ambrose#2B, Ambrose#3G to single tab
df=df.set_value(index,'Team',df.loc[index]['Team'].split('#')[0])
'''
# get family contact info from famcontacts
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# Drop any players not yet assigned
df=df.dropna(subset=['Team'])
# Full contacts list format for android/google
for i, sport in enumerate(sportlist):
Thissport=df[df['Sport']==sport]
teamlist= Thissport.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# drop if team is not yet assigned
teamlist=[s for s in teamlist if str(s) != 'nan']
# drop if team is 'drop'
teamlist=[s for s in teamlist if str(s) != 'drop']
# Drop all non-Cabrini transferred teams (which must contain #)
teamlist=[s for s in teamlist if '#' not in s]
# Combine track subteams to single team
teamlist=[s[0:5] if 'Track' in s else s for s in teamlist]
teamlist=set(teamlist)
teamlist=list(teamlist)
# now create google contacts list for each Cabrini team and save
for j, team in enumerate(teamlist):
thisteam=Thissport[Thissport['Team'].str.contains(team)]
# Drop duplicate from same family
thisteam=thisteam.drop_duplicates('Phone1')
thisteam.loc[:,'Name']=thisteam['First']+' '+thisteam['Last']
thisteam.loc[:,'Group']=sport+str(year)
mycols=['Name','Pfirst1','Last','Phone1','Phone2','Email1','Email2','Group']
newcols=['Name','Additional Name','Family Name','Phone 1 - Value','Phone 2 - Value',
'E-mail 1 - Value','E-mail 2 - Value','Group Membership']
thisteam=thisteam[mycols]
thisteam.columns=newcols
thisteam=thisteam.replace(np.nan,'')
fname=cnf._OUTPUT_DIR+'\\google'+team+'.csv'
thisteam.to_csv(fname, index=False)
return
def createsignups(df, Mastersignups, season, year):
''' pass signups and add signups to master list, also returns list of current
player keys by sport; typically use writesignupstoExcel instead
args:
df - signup (dataframe)
Mastersignups - existing all signups db-like file
season - ['Fall','Winter','Spring']
year- 4 digit year as int
returns:
Mastersignups - same with new unique entries
'''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
# Use comma sep on multiple sport entries??
now=datetime.now()
thisdate=date.strftime(now,'%m/%d/%Y') # for signup date
df.loc[:,'SUdate']=thisdate # can do this globally although might also add to signups
startlen=len(Mastersignups) # starting number of signups
intcols=['SUkey','Year']
for i, col in enumerate(intcols):
if col not in df:
df.loc[df.index, col]=np.nan
mycols=Mastersignups.columns.tolist() # desired column order
for i, col in enumerate(mycols):
if col not in df:
df.loc[df.index,col]=np.nan
# TODO one option here would be to clone comma-separated sport entries (i.e. track and softball)
for i, sport in enumerate(sportlist):
# Use caution here due to Tball in Softball string problem (currently set to T-ball)
thissport=df.loc[df['Sport'].str.contains(sport, na=False, case=False)] # also handles multi-sports
# Prepare necessary columns
for index, row in thissport.iterrows():
thissport.loc[index,'Sport'] = sport # set individually to formal sport name
thissport.loc[index,'Year'] = int(year)
thissport.loc[index,'SUkey'] = 0 # assigned actual key below
# Now organize signups and add year
Mastersignups=pd.concat([thissport,Mastersignups], ignore_index=True)
Mastersignups=Mastersignups[mycols] # put back in original order
# drop duplicates and save master signups file (keep older signup if present... already assigned SUkey)
Mastersignups=Mastersignups.sort_values(['Plakey', 'Sport','Year','SUkey'], ascending=False) # keeps oldest signup
Mastersignups=Mastersignups.drop_duplicates(subset=['Plakey', 'Sport','Year']) # drop duplicates (for rerun with updated signups)
newsignups=len(Mastersignups)-startlen # number of new signups added this pass
print('Added ', str(newsignups),' new ', season, ' signups to master list.')
# add unique SUkey (if not already assigned)
neededkeys = Mastersignups[(Mastersignups['SUkey']==0)] # filter by year
availSUkeys=findavailablekeys(Mastersignups, 'SUkey', len(neededkeys)) # get necessary # of unique SU keys
keycounter=0
for index, row in neededkeys.iterrows():
Mastersignups.loc[index,'SUkey'] = availSUkeys[keycounter] # reassign SU key in source master list
keycounter+=1 # move to next available key
Mastersignups.loc[:,'Grade']=Mastersignups.Grade.replace('K',0)
Mastersignups=Mastersignups.sort_values(['Year', 'Sport', 'Gender','Grade'], ascending=False)
Mastersignups.loc[:,'Grade']=Mastersignups.Grade.replace(0,'K')
# autocsvbackup(Mastersignups,'master_signups', newback=True)
Mastersignups.to_csv(cnf._INPUT_DIR + '\\master_signups.csv', index=False, date_format='mm/dd/yy') # automatically saved
return Mastersignups
def replaceacro(df, acronyms):
''' Pass df column and return with acronyms replaced with full translations (parishes and schools
currently used only for CYC rosters '''
for index, row in acronyms.iterrows():
acro=acronyms.loc[index]['acronym']
transl=acronyms.loc[index]['translation']
# TODO only for parish columns
df.loc[:,'Parish of Registration']=df['Parish of Registration'].replace(acro, transl)
df.loc[:,'Parish of Residence']=df['Parish of Residence'].replace(acro, transl)
df.loc[:,'School']=df['School'].replace(acro, transl)
return df
def createrosters(df, season, year, players, teams, coaches, famcontact, acronyms):
''' From Mastersignups of this season creates Cabrini CYC roster and transfers (for separate sports)
and all junior sports (calculates ages for Judge Dowd); pulls info merged from famcontact, players, teams, and coaches
teams should already be assigned using teams xls and assigntoteams function
returns: None ... direct save to OUTPUT_DIR
'''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
specials=['Chess','Track']
sports=sportsdict.get(season)
sportlist=[sport for sport in sports if sport not in specials]
speciallist=[sport for sport in sports if sport in specials] # for track, chess, other oddballs
Specials=df[(df['Year']==year) & (df['Sport'].isin(speciallist))] # deal with these at bottom
# Proceed with all normal South Central sports
df = df[(df['Year']==year) & (df['Sport'].isin(sportlist))] # filter by year
# make duplicate entry row for double-rostered players (multiple team assignments)
thismask = df['Team'].str.contains(',', na=False) # multiple teams are comma separated
doubles=df.loc[thismask]
for index, rows in doubles.iterrows():
team=doubles.loc[index,'Team']
team=team.split(',')[1] # grab 2nd of duplicate teams
doubles.loc[index, 'Team'] = team
df=pd.concat([df,doubles], ignore_index=True) # adds duplicate entry for double-rostered players with 2nd team
thismask = df['Team'].str.contains(',', na=False) # multiple teams are comma separated
for index, val in thismask.iteritems():
if val:
team=df.loc[index]['Team']
team=team.split(',')[0] # grab 1st of duplicate teams
df.loc[index, 'Team'] = team # removes 2nd team from first entry
# now grab all extra info needed for CYC rosters
# Street, City, State, Zip, Phone, email, Parishreg, parishres from fam-contact
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# Get division from Teams xls
df=pd.merge(df, teams, how='left', on=['Team'], suffixes=('','_r2')) # effectively adds other team info for roster toall players
# DOB, School from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
df.loc[:,'Role']='Player' # add column for role
# df['Open/Closed']=np.nan
df.loc[:,'Coach ID']=''
def formatDOB(val):
# Pat moore date format is 4/4/19.. reformat as string for csv output
try:
return datetime.strftime(val, "%m/%d/%y")
except:
# print('Problem converting %s of type %s to date string format' %(val, type(val)) )
return ''
# Find Cabrini CYC names (containing hyphen)
thismask = df['Team'].str.contains('-', case=False, na=False)
CabriniCYC=df.loc[thismask] # all players on Cabrini CYC teams all sports this season
# Finds info for CYC coaches (all sports) and generate roster entries
coachroster=addcoachestoroster(teams, coaches) # coaches roster already in correct format + sport column
if len(CabriniCYC)>1: # skip if all transfers or junior (i.e. in spring)
# Split by sport
for i, sport in enumerate(sportlist):
Sportroster=CabriniCYC[CabriniCYC['Sport']==sport]
# reformat this mess as single CYC roster
Sportroster=organizeroster(Sportroster)
# Add coaches from this sport to roster
Rostercoaches=coachroster[coachroster['Sport']==sport]
Rostercoaches=organizeroster(Rostercoaches)
Sportroster=pd.concat([Sportroster,Rostercoaches], ignore_index=True) # adds coaches and players together
Sportroster=Sportroster.sort_values(['Team','Role','Grade','Lname'])
fname=cnf._OUTPUT_DIR+'\\Cabrini_'+sport+'roster'+str(year)+'.csv'
Sportroster=replaceacro(Sportroster, acronyms) # replace abbreviations
Sportroster.loc[:,'Birthdate']=Sportroster['Birthdate'].apply(lambda x: formatDOB(x))
Sportroster.to_csv(fname, index=False)
# done with Cabrini CYC rosters
# Break out all other types of teams (transfers, junior teams, Chess/Track)
thismask = df['Team'].str.contains('-', case=False, na=False)
Others=df.loc[~thismask] # no hyphen for all non Cabrini CYC level (Cabrini junior and transfers)
# Cabrini transferred players to CYC teams with # (i.e. Ambrose#8B, OLS#3G)
# Non-CYC cabrini junior teams start with number
thismask = Others['Team'].str.contains('#', na=True) # flag nans and set to true (usually jr teams w/o assignment)
# Transferred teams contain # such as OLS#3G
Transfers=Others.loc[thismask] # transferred teams have # but no hyphen
for i, sport in enumerate(sportlist): # output roster for all transfers (all grades in case of CYC)
Transferroster=Transfers[Transfers['Sport']==sport]
Transferroster=organizeroster(Transferroster)
Transferroster=Transferroster.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
fname=cnf._OUTPUT_DIR+'\\CYC'+sport+'transfers.csv'
Transferroster=replaceacro(Transferroster,acronyms)
Transferroster.loc[:,'Birthdate']=Transferroster['Birthdate'].apply(lambda x: formatDOB(x))
Transferroster.to_csv(fname, index=False)
# Now deal with junior cabrini (should be only thing left after Cabrini CYC<
# transfers, special sports
Juniorteams=Others.loc[~thismask] # remove transfers
Juniorteams=Juniorteams[Juniorteams['Team']!='drop'] # remove dropped players
# now output all junior teams in same format (sometimes needed by <NAME>)
# also calculate current age
if len(Juniorteams)>0:
Juniorteams=organizeroster(Juniorteams) # put in standard South Central roster format
# Calculate current age from DOBs (renamed to Birthdate for roster only)
Juniorteams.loc[:,'Age']=calcage(Juniorteams['Birthdate'])
fname=cnf._OUTPUT_DIR+'\\Cabrini_junior_teams_'+str(year)+'.csv'
Juniorteams=replaceacro(Juniorteams, acronyms)
Juniorteams.loc[:,'Birthdate']=Juniorteams['Birthdate'].apply(lambda x: formatDOB(x))
Juniorteams.to_csv(fname, index=False)
# Deal with special cases -Track and Chess
# Get DOB/school from players.. anything else needed by <NAME>?
Specials=pd.merge(Specials, players, how='left', on='Plakey', suffixes=('','_r'))
# needs address
Specials=pd.merge(Specials, famcontact, how='left', on='Famkey', suffixes=('','_r2'))
for i, sport in enumerate(speciallist): # output roster for all transfers (all grades in case of CYC)
Specials=Specials[Specials['Sport']==sport]
Specials=Specials.rename(columns={'DOB':'Birthdate'})
mycols=['First', 'Last','Gender','Team','Grade','Birthdate','School','Address','Zip']
Specials=Specials[mycols]
Specials=Specials.sort_values(['Gender', 'Birthdate', 'Grade'], ascending=True)
Specials.loc[:,'Birthdate']=Specials['Birthdate'].apply(lambda x: formatDOB(x))
fname= cnf._OUTPUT_DIR+'\\'+ sport+'_'+str(year)+'_rosters.csv'
Specials.to_csv(fname, index=False)
return
def makemultiteam(df):
'''Small utility called by assigntoteams to make temp teams df that has separate entry for each grade if team is mixed grade
then merge to assign teams is straightforward
twoteams- '''
# TODO annoying problem with combining teams due to K1 (string but not int)
mycols=df.dtypes.index
# Deal with K1, K2 and such teams
kteams=[str(s) for s in np.ndarray.tolist(df.Graderange.unique()) if 'K' in str(s)]
kteams=[s for s in kteams if len(s)>1] # combo teams only
kteams=df[df['Graderange'].isin(kteams)]
xtrateams=pd.DataFrame(index=np.arange(0,0),columns=mycols) # empty df
# clones rows to match lower grades in range
for index, row in kteams.iterrows():
tempstr= kteams.loc[index]['Graderange']
gr1=0 # 0 for grade K
gr2=int(tempstr[1])
for gr in range(gr1,gr2):
newrow=kteams.loc[index] # grabs row as series
newrow.loc['Grade'] = gr # set to correct grade
xtrateams=xtrateams.append(newrow) # add single row to temp df
df.loc[:,'Grade']=df.Grade.replace('K','0', regex=True)
# get rid of K string problem
df.loc[:,'Graderange']=df.Graderange.replace('K','0', regex=True)
df.loc[:,'Graderange']=df.Graderange.astype('int')
# now handle numbered multiteams (e.g. 45 78 two digit ints)
multiteams=df.loc[df['Graderange']>9] # subset of teams comprised of multiple grades
for index, row in multiteams.iterrows(): # check for 3 or more grades
# TODO make sure it's not 3 grades (i.e. K-2)
tempstr= str(multiteams.loc[index]['Graderange'])
gr1=int(tempstr[0])
gr2=int(tempstr[1])
for gr in range(gr1,gr2):
newrow=multiteams.loc[index] # grabs row as series
newrow.loc['Grade'] = gr # set to correct grade
xtrateams=xtrateams.append(newrow) # add single row to temp df
# Detect gender-grade-sport w/ two teams
# now combine with original df
df=pd.concat([df,xtrateams], ignore_index=True) # complete team set
df=df[mycols] # back in original order
df=df.sort_values(['Gender','Grade'], ascending=True)
# After cloning by grade, look for two teams per grade options
twoteams=df[df.duplicated(['Sport','Gender','Grade'])]
return df, twoteams
def detectrosterchange(PMroster, myroster):
'''Compare submitted and returned rosters to look for unique rows (altered by <NAME>)
first row is <NAME> version (presumably correct to match CYC database) and second row is my
submitted version... make any corrections to appropriate source data files
datetime format conversions can be problematic '''
# all columns by default, false drops both duplicates leaving unique rows
bothrosters=pd.concat([PMroster,myroster])
mycols=bothrosters.columns
nanrows=bothrosters[pd.isnull(bothrosters['Birthdate'])]
nanrows=nanrows.drop_duplicates(keep=False)
# ensure player rows are both in correct format
myroster=myroster[pd.notnull(myroster['Birthdate'])]
PMroster=PMroster[pd.notnull(PMroster['Birthdate'])]
def removeLeadZero(val):
if val.startswith('0'):
return val[1:]
else:
return val
myroster.loc[:,'Birthdate']=myroster['Birthdate'].apply(lambda x:pd.to_datetime(x).strftime('%m/%d/%Y'))
PMroster.loc[:,'Birthdate']=PMroster['Birthdate'].apply(lambda x:pd.to_datetime(x).strftime('%m/%d/%Y'))
myroster.loc[:,'Birthdate']=myroster['Birthdate'].apply(lambda x:removeLeadZero(x))
PMroster.loc[:,'Birthdate']=PMroster['Birthdate'].apply(lambda x:removeLeadZero(x))
bothrosters=pd.concat([PMroster,myroster])
bothrosters=bothrosters.sort_values(['Fname','Lname'])
# Fix date string differences
alteredrows=bothrosters.drop_duplicates(keep=False)
alteredrows=alteredrows.append(nanrows)
alteredrows=alteredrows[mycols]
alteredrows=alteredrows.sort_values(['Lname','Fname'])
return alteredrows
def saveteams(teams):
'''Save teams tab into teams_coaches.xlsx after changes have been made '''
book=load_workbook('Teams_coaches.xlsx')
writer=pd.ExcelWriter('Teams_coaches.xlsx', engine='openpyxl')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
teams.to_excel(writer,sheet_name='Teams',index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
def assigntoteams(df, season, year, teams, overwrite=False):
'''From mastersignups finds CYC team name based on year, grade, gender and sport from teams tab
(which only contains names from this season/year to avoid screwing up old custom team assignments'''
# teamsmult has multi grade range teams with duplicates for merge matching
# twoteams is multiple teams for same grade
Teamsmult, Twoteams =makemultiteam(teams) # makes duplicates team entries to match both grades
# compare grades as ints with K=0
df.loc[:,'Grade']=df.Grade.replace('K','0', regex=True) # convert Ks to zeros
df.loc[:,'Grade']=df['Grade'].astype('int')
Teamsmult.loc[:,'Grade']=Teamsmult['Grade'].astype('int') # ensure these are ints
# left merge keeps all master_signups oentries
df=pd.merge(df, Teamsmult, how='left', on=['Year','Grade','Gender','Sport'], suffixes=('','_r'))
# need to drop SUkey duplicates (keeping first)... occurs if >1 team per grade
df=df.drop_duplicates(subset=['SUkey']) # drops any duplicates by unique SUkey
# Consider all sports except Track (team assignment done separately by DOB)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
# this is post-merge so no chance of getting indices screwed up
# select current sports & year and subset with new team assignment
CurrentSU=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year) & (pd.notnull(df['Team_r']))]
if overwrite==False: # if no overwrite, keep only those with nan for team
CurrentSU=CurrentSU.loc[pd.isnull(CurrentSU['Team'])]
# Never overwrite team assignment for known drops
CurrentSU=CurrentSU[CurrentSU['Team']!='drop']
counter=0
for index, row in CurrentSU.iterrows():
# all remaining can be overwritted (those w/ existing team dropped above)
match=df[df['SUkey']==CurrentSU.loc[index]['SUkey']]
if len(match)==1:
thisind=match.index[0]
# add new team assignment to correct index in original master signups
df.loc[thisind, 'Team'] = CurrentSU.loc[index]['Team_r']
counter+=1
print(str(counter),' player(s) newly assigned to teams')
# now drop extra columns and sort
mycols=['SUkey','First', 'Last', 'Grade', 'Gender', 'Sport', 'Year', 'Team', 'Plakey','Famkey', 'Family',
'SUdate', 'Issue date', 'Uniform#','UniReturnDate']
df.loc[:,'Grade']=df.Grade.replace('K',0)
df=df.sort_values(['Year','Sport', 'Gender', 'Grade'], ascending=True)
df.loc[:,'Grade']=df.Grade.replace('0','K', regex=True) # make sure any 0 grades are again replaced with K
df=df[mycols]
autocsvbackup(df,'master_signups', newback=True) # autobackup of master signups
df.to_csv(cnf._INPUT_DIR + '\\master_signups.csv', index=False) # save/overwrite existing csv
return df
def assigntrackgroup(df, year, players):
'''Assign to different track team based on age on May 31 of this year (school year+1)
'''
Track=df[(df['Sport']=='Track') & (df['Year']==year)]
Track=pd.merge(Track,players, how='left', on=['Plakey'], suffixes=('','2'))
numunassigned=len(Track[pd.isnull(Track['Team'])])
for index, row in Track.iterrows():
DOB=Track.loc[index]['DOB'] # merged from players.csv
if isinstance(DOB,str):
DOB=datetime.strptime(DOB,"%m/%d/%Y").date() # convert string to datetime
elif isinstance(DOB, pd.tslib.Timestamp):
DOB=DOB.date() # convert timestamp to datetime
trackage=date(year+1,5,31)-DOB # age on prior year's May 31st (same as school year in current convention)
trackage=(trackage.days + trackage.seconds/86400)/365.2425 # as decimal
trackage=math.floor(trackage)
if trackage <=7:
team='Track7'
elif 8 <= trackage <=9:
team='Track89'
elif 10 <= trackage <=11:
team='Track1011'
elif 12 <= trackage <=13:
team='Track1213'
elif 14 <= trackage <=15:
team='Track1415'
else: # probably some entry error
mystr=Track.loc[index]['First']+' '+Track.loc[index]['Last']+' Grade:'+Track.loc[index]['Grade']
print('Suspected DOB error for',mystr, 'DOB:', datetime.strftime(DOB, "%m/%d/%y") )
team=''
# Now write back altered subset to mastersignups (index is lost so use SUkey)
SUkey=int(Track.loc[index]['SUkey'])
match=df[df['SUkey']==SUkey] # This gives correct index
df.loc[match.index[0], 'Team'] = team # alter/assign team for this signup
newlyassigned=numunassigned-len(Track[pd.isnull(Track['Team'])])
print(newlyassigned,' players assigned to track age group.')
return df
def readbackevents(trackevents):
'''
Reads back choices of track events from summary sheet and prep for
copy to Pat Moore spreadsheet
in 4x100, 4x200, 4x400 col enter start order 1,2,3,4,1A,2A
'''
regcols=['Last', 'First', 'Middle', 'Gender',
'DOB', 'Team Code','Event#1', 'Event#2', 'Event#3', 'Event#4']
# Manually enter order of runners and alternates for relays
events=['50M', '100M', '200M', '400M', '800M', '1600M', 'SoftThrow',
'ShotPut','StdLongJump', 'RunLongJump']
regfile=pd.DataFrame(columns=regcols)
regfile.loc[:,'Team Code']=='SFC'
for index, row in trackevents.iterrows():
# get events for which player is signed up
playerevents=[]
for i, event in enumerate(events):
if str(row[event])!='nan':
playerevents.append(event)
print(event,' for ',row.First, row.Last)
# Check for relay type separately
if row['Relay'] in ['4x100', '4x200','4x400']:
playerevents.append(row['Relay'])
print(row['Relay'],' for ',row.First, row.Last)
if len(playerevents)>4:
print('Too many events for ', row.First, row.Last)
# Now construct player's entry in regfile
thisentry=row
thisentry['Middle']=''
thisentry['Team Code']='SFC'
# Gender is upper case M or F
thisentry['Gender']=thisentry['Gender'].upper()
for i, event in enumerate(playerevents):
colname='Event#'+str(i+1)
thisentry[colname]=event
regfile=regfile.append(thisentry, ignore_index=True)
regfile=regfile[regcols]
return regfile
def maketracksummary(df, year, players):
'''Assign to different track team based on age on May 31 of this year (school year+1)
'''
Track=df[(df['Sport']=='Track') & (df['Year']==year)]
Track=pd.merge(Track,players, how='left', on=['Plakey'], suffixes=('','2'))
Track.loc[Track.index,'Trackage']=np.nan
for index, row in Track.iterrows():
DOB=Track.loc[index]['DOB'] # merged from players.csv
if isinstance(DOB,str):
DOB=datetime.strptime(DOB,"%m/%d/%Y").date() # convert string to datetime
elif isinstance(DOB, pd.tslib.Timestamp):
DOB=DOB.date() # convert timestamp to datetime
trackage=date(year+1,5,31)-DOB # age on prior year's May 31st (same as school year in current convention)
trackage=(trackage.days + trackage.seconds/86400)/365.2425 # as decimal
Track.loc[index,'Trackage'] = trackage
trackage=math.floor(trackage)
if trackage <=7:
team='Track7'
elif 8 <= trackage <=9:
team='Track89'
elif 10 <= trackage <=11:
team='Track1011'
elif 12 <= trackage <=13:
team='Track1213'
elif 14 <= trackage <=15:
team='Track1415'
else: # probably some entry error
mystr=Track.loc[index]['First']+' '+Track.loc[index]['Last']+' Grade:'+Track.loc[index]['Grade']
print('Suspected DOB error for',mystr, 'DOB:', datetime.date.strftime(DOB, "%m/%d/%y") )
team=''
Track.loc[index,'Team'] = team
Track=Track.sort_values(['Trackage'])
mycols=['First', 'Last', 'Grade', 'Gender','DOB','Team','Trackage']
Track=Track[mycols]
return Track
def findrecruits(df, players, famcontact, season, year, signupfile):
'''Read list of signed-up player keys from xls file; compare with last year's set of
players from master Signups log
7/2018 mod... grab DOB to allow easier manual additions to signups '''
mycols=df.columns.tolist() # Same columns as mastersignups
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
Recruits=pd.DataFrame(columns=mycols) # empty frame for recruits
for i, sport in enumerate(sportlist):
thissport=df[df['Sport']==sport]
thissport=thissport.sort_values(['Year'], ascending=False) # most current signups at top
plakeylist=thissport.Plakey.unique() # ndarray with list of unique soccer players
keylist=plakeylist.tolist()
for i, key in enumerate(keylist):
match=thissport[thissport['Plakey']==key]
# recruits ... played in year -1 but not in year
if year-1 in match.Year.unique() and year not in match.Year.unique():
match=match[0:1] # take only last season's signup
Recruits=pd.concat([Recruits,match], ignore_index=True)
# plakey, famkey, first, last, grade, gender,
Recruits.loc[:,'Grade']=Recruits.Grade.replace('K',0) # replace K with zero to allow sorting
Recruits.loc[:,'Grade']=Recruits.Grade.astype(int)
Recruits.loc[:,'Grade']=Recruits.Grade+1 # adjust to correct grade for this year
# Drop if graduated
Recruits=Recruits[Recruits['Grade']<=8]
# adjust grade such that players current grade is in list
# join with famcontact on famkey to get contact info (emails, phones, etc.)
# Inner join on famkey adds the necessary info
Recruits= | pd.merge(Recruits, famcontact,how='inner', on='Famkey', suffixes=('','_r')) | pandas.merge |
import pandas as pd
import numpy as np
import talib
class Indicators(object):
"""
Input: Price DataFrame, Moving average/lookback period and standard deviation multiplier
This function returns a dataframe with 5 columns
Output: Prices, Moving Average, Upper BB, Lower BB and BB Val
"""
def bb(self, l_sym, df_price, time_period, st_dev_u, st_dev_l):
df_bb_u = pd.DataFrame(columns=l_sym, index=df_price.index)
df_bb_m = pd.DataFrame(columns=l_sym, index=df_price.index)
df_bb_l = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_bb_u[sym], df_bb_m[sym], df_bb_l[sym] = talib.BBANDS(np.asarray(df_price[sym]), timeperiod=time_period, nbdevup=st_dev_u, nbdevdn=st_dev_l)
except:
pass
return df_bb_u, df_bb_m, df_bb_l
def ema(self, l_sym, df_price, time_period):
df_ema = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_ema[sym] = talib.EMA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_ema
def ma(self, l_sym, df_price, time_period):
df_ma = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_ma[sym] = talib.MA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_ma
def sma(self, l_sym, df_price, time_period):
df_sma = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_sma[sym] = talib.SMA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_sma
def adx(self, l_sym, df_high, df_low, df_close, time_period):
df_adx = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_adx[sym] = talib.ADX(high=np.asarray(df_high[sym]), low=np.asarray(df_low[sym]), close=np.asarray(df_close[sym]), timeperiod = time_period)
except:
pass
return df_adx
def mom(self, l_sym, df_price, time_period):
df_mom = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_mom[sym] = talib.MOM(np.asarray(df_price[sym]), timeperiod = time_period)
except:
pass
return df_mom
def atr(self, l_sym, df_high, df_low, df_close, time_period):
df_atr = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_atr[sym] = talib.ATR(high=np.asarray(df_high[sym]), low=np.asarray(df_low[sym]), close=np.asarray(df_close[sym]), timeperiod=time_period)
except:
pass
return df_atr
def macd(self, l_sym, df_price, fast_period, slow_period, signal_period):
df_macd = pd.DataFrame(columns=l_sym, index=df_price.index)
df_macdsignal = pd.DataFrame(columns=l_sym, index=df_price.index)
df_macdhist = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_macd[sym], df_macdsignal[sym], df_macdhist[sym] = talib.MACD(np.asarray(df_price[sym]), fastperiod=fast_period, slowperiod=slow_period, signalperiod=signal_period)
except:
pass
return df_macd, df_macdsignal, df_macdhist
def wavec(self, l_sym, df_three, df_four, df_five):
df_ca = pd.DataFrame(columns=l_sym, index=df_three.index)
df_cb = pd.DataFrame(columns=l_sym, index=df_three.index)
for sym in l_sym:
df_ca[sym] = df_four[sym] - df_five[sym]
df_cb[sym] = df_three[sym] - df_four[sym]
return df_ca, df_cb
def waveb(self, l_sym, df_two, df_three, df_four):
df_ba = pd.DataFrame(columns=l_sym, index=df_two.index)
df_bb = pd.DataFrame(columns=l_sym, index=df_two.index)
for sym in l_sym:
df_ba[sym] = df_three[sym] - df_four[sym]
df_bb[sym] = df_two[sym] - df_three[sym]
return df_ba, df_bb
def wavea(self, l_sym, df_one, df_two, df_three):
df_aa = pd.DataFrame(columns=l_sym, index=df_one.index)
df_ab = pd.DataFrame(columns=l_sym, index=df_one.index)
for sym in l_sym:
df_aa[sym] = df_two[sym] - df_three[sym]
df_ab[sym] = df_one[sym] - df_two[sym]
return df_aa, df_ab
def keltner(self, l_sym, df_high, df_low, df_close, ema_period, atr_period, multiplier):
df_kch_u = pd.DataFrame(columns=l_sym, index=df_high.index)
df_kch_l = pd.DataFrame(columns=l_sym, index=df_high.index)
df_kch_m = self.ema(l_sym, df_close, time_period=ema_period)
df_atr = self.atr(l_sym, df_high, df_low, df_close, time_period=atr_period)
for sym in l_sym:
df_kch_u[sym] = df_kch_m[sym] + (multiplier * df_atr[sym])
df_kch_l[sym] = df_kch_m[sym] - (multiplier * df_atr[sym])
return df_kch_u, df_kch_m, df_kch_l
def ichimoku(self, l_sym, df_high, df_low):
df_ichimoku_tenkan_u = | pd.DataFrame(columns=l_sym, index=df_high.index) | pandas.DataFrame |
# Databricks notebook source
import pandas as pd
import numpy as np
import networkx as nx
from nodevectors import Node2Vec as NVVV
from sklearn.decomposition import PCA
import os
import itertools
import pickle
spark.conf.set("spark.databricks.delta.properties.defaults.autoOptimize.optimizeWrite", "true")
spark.conf.set("spark.databricks.delta.properties.defaults.autoOptimize.autoCompact", "true")
# COMMAND ----------
def load_data(save_path, file_name):
df = (spark.read.format("delta")
.option("header", "true")
.option("inferSchema", "true")
.load(os.path.join(save_path, file_name))
)
return df.toPandas()
def filter_non_esg(df):
return df[(df['E']==True) | (df['S'] == True) | (df['G'] == True)]
# COMMAND ----------
class graph_creator:
def __init__(self, df):
self.df = df
def create_graph(self):
# Find Edges
df_edge = pd.DataFrame(self.df.groupby("URL").Organization.apply(list)
).reset_index()
get_tpls = lambda r: (list(itertools.combinations(r, 2)) if
len(r) > 1 else None)
df_edge["SourceDest"] = df_edge.Organization.apply(get_tpls)
df_edge = df_edge.explode("SourceDest").dropna(subset=["SourceDest"])
# Get Weights
source_dest = pd.DataFrame(df_edge.SourceDest.tolist(),
columns=["Source", "Dest"])
sd_mapping = source_dest.groupby(["Source", "Dest"]).size()
get_weight = lambda r: sd_mapping[r.Source, r.Dest]
source_dest["weight"] = source_dest.apply(get_weight, axis=1)
# Get
self.organizations = set(source_dest.Source.unique()).union(
set(source_dest.Dest.unique()))
self.G = nx.from_pandas_edgelist(source_dest, source="Source",
target="Dest", edge_attr="weight", create_using=nx.Graph)
return self.G
# COMMAND ----------
def get_embeddings(G, organizations):
# Fit graph
g2v = NVVV()
g2v.fit(G)
# Embeddings
print("NVVVstart")
embeddings = g2v.model.wv.vectors
print("NVVVend")
pca = PCA(n_components=3)
principalComponents = pca.fit_transform(embeddings)
d_e = | pd.DataFrame(principalComponents) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data=pd.read_table("GSE5583.txt",header=0,index_col=0)
print("Previous 5:\n",data.head())
number_of_genes=len(data.index)
print("Gene Number:",number_of_genes)
# normalization
data2=np.log2(data+0.0001)
print("Previous 5:\n",data2.head())
# diff P
from scipy import stats
pvalue=[]
for i in range(0,number_of_genes):
ttest=stats.ttest_ind(data2.iloc[i,0:3],data2.iloc[i,3:6])
pvalue.append(ttest[1])
# draw P values graph
wt=data2.loc[:,'WT.GSM130365':'WT.GSM130367'].mean(axis=1)
ko=data2.loc[:,'KO.GSM130368':'KO.GSM130370'].mean(axis=1)
fold=ko-wt
plt.hist(-np.log(pvalue))
plt.title("Histogram of P-value")
plt.show()
# draw diff analysis graph
gene_array=np.asarray(pvalue)
result= | pd.DataFrame({'pvalue':gene_array,'FoldChange':fold}) | pandas.DataFrame |
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import re
from math import ceil
import pandas as pd
from sklearn.metrics import classification_report
from scipy.stats import shapiro, boxcox, yeojohnson
from scipy.stats import probplot
from sklearn.preprocessing import LabelEncoder, PowerTransformer
from category_encoders.target_encoder import TargetEncoder
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.linear_model import LinearRegression, LogisticRegression
# from .charts.classification_visualization import classification_visualization
# from .charts.charts import Plot, ScatterChart
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.utils.multiclass import unique_labels
from sklearn.manifold import TSNE
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
import json
from pyod.models.hbos import HBOS
from statsmodels.api import ProbPlot
# from .charts.charts_extras import (
# feature_importances_plot,
# regression_viz,
# classification_viz,
# )
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
RandomForestRegressor,
GradientBoostingRegressor,
)
from sklearn.svm import LinearSVC
import warnings
warnings.filterwarnings("ignore")
sns.set_palette("colorblind")
class CrawtoDS:
def __init__(
self,
data,
target,
test_data=None,
time_dependent=False,
features="infer",
problem="infer",
):
self.input_data = data
self.target = target
self.features = features
self.problem = problem
self.test_data = test_data
self.timedependent = time_dependent
if self.problem == "binary classification":
self.train_data, self.valid_data = train_test_split(
self.input_data, shuffle=True, stratify=self.input_data[self.target],
)
elif self.problem == "regression":
self.train_data, self.valid_data = train_test_split(
self.input_data, shuffle=True,
)
def nan_features(input_data):
"""a little complicated. map creates a %nan values and returns the feature if greater than the threshold.
filter simply filters out the false values """
f = input_data.columns.values
len_df = len(input_data)
nan_features = list(
filter(
lambda x: x is not False,
map(
lambda x: x
if self.input_data[x].isna().sum() / len_df > 0.25
else False,
f,
),
)
)
return nan_features
def problematic_features(self):
f = self.input_data.columns.values
problematic_features = []
for i in f:
if "Id" in i:
problematic_features.append(i)
elif "ID" in i:
problematic_features.append(i)
return problematic_features
def undefined_features(self):
if self.features == "infer":
undefined_features = list(self.input_data.columns)
undefined_features.remove(self.target)
for i in self.nan_features:
undefined_features.remove(i)
for i in self.problematic_features:
undefined_features.remove(i)
return undefined_features
def numeric_features(self):
numeric_features = []
l = self.undefined_features
for i in l:
if self.input_data[i].dtype in ["float64", "float", "int", "int64"]:
if len(self.input_data[i].value_counts()) / len(self.input_data) < 0.1:
pass
else:
numeric_features.append(i)
return numeric_features
def categorical_features(self, threshold=10):
self.undefined_features
categorical_features = []
to_remove = []
l = self.undefined_features
for i in l:
if len(self.input_data[i].value_counts()) / len(self.input_data[i]) < 0.10:
categorical_features.append(i)
return categorical_features
def indicator(self):
indicator = MissingIndicator(features="all")
indicator.fit(self.train_data[self.undefined_features])
return indicator
def train_missing_indicator_df(self):
x = self.indicator.transform(self.train_data[self.undefined_features])
x_labels = ["missing_" + i for i in self.undefined_features]
missing_indicator_df = | pd.DataFrame(x, columns=x_labels) | pandas.DataFrame |
import pandas as pd
def subm_to_df(subm):
data = []
for key in sorted(subm.keys()):
top_imgs = subm[key]
for img_id, score in top_imgs.items():
row = [key, img_id, score]
data.append(row)
return | pd.DataFrame(columns=["topic_id", "image_id", "confidence_score"], data=data) | pandas.DataFrame |
"""
2 - Jan - 2018 / <NAME> / <EMAIL>
datred.py is a module created as part of the FUSS package to help with the data reduction of spectropolarimetric
data (at the present time only used with FORS2 data)
Pre-requisites
--------------
os, astropy.io, numpy, math, matplotlib.pyplot, pysynphot, scipy.special, pandas
Variable
--------
zero_angles : string
Path to the text file containing the chromatic zero angles for FORS2 (needs updating for you own system).
Can be found at: http://www.eso.org/sci/facilities/paranal/instruments/fors/inst/pola.html
Functions
---------
sort_red
rebin
pol_deg
pol_ang
F_from_oeray
Classes
-------
Meta
SpecPol
LinearSpecPol
CircularSpecPol
"""
from __future__ import division
from __future__ import print_function
import os
from astropy.io import fits
import numpy as np
import math as m
import matplotlib.pyplot as plt
import pysynphot as S
from scipy import special as special
from astropy.utils.data import get_pkg_data_filename
import re
import pandas as pd
import sys
if sys.version_info.major < 3:
range = xrange
input = raw_input
# ###### LOCATION OF FILE CONTAINING CHROMATIC ZERO ANGLES ######### #
zero_angles = get_pkg_data_filename('data/theta_fors2.txt')
# ################################################################## #
def sort_red():
"""
Creates back-up of the compressed files, uncompresses them, sorts them and re-names them.
Notes
-----
For the .cl files to work properly, the naming convention used by sort_red is essential.
"""
# Creating backups of the original uncompressed files
os.system('mkdir backup')
os.system('cp * backup')
os.system('mkdir txt')
os.system('mv *.txt txt')
os.system('mkdir FITS')
os.system('mv *.fits* FITS')
os.chdir('FITS')
os.system('uncompress *.Z')
os.system('rm -f *.Z')
filename_list = sorted([filename for filename in os.listdir(".")])
os.system('mkdir Data_reduction')
os.system('mkdir Other')
bias = 0
arc = 0
flat = 0
sky = 0
sci = 0
slit = 0
os.system('mkdir CHIP2')
# We don't use CHIP 2 with FORS2 so I put all those in a separate folder
for filename in filename_list:
if "fits" in filename:
try:
hdulist = fits.open(filename)
chip = hdulist[0].header['EXTNAME']
if chip == 'CHIP2':
os.system('mv ' + filename + ' CHIP2')
hdulist.close()
except:
print("Not CHIP2 " + filename)
for filename in filename_list:
# Here we are renaming the files. The naming convention used here is assumed
# throughout the rest of the datred sub-module.
if "fits" in filename:
try:
hdulist = fits.open(filename)
head = hdulist[0].header['HIERARCH ESO DPR TYPE']
if head == 'BIAS':
bias = bias + 1
new_name = 'BIAS_' + str(bias).zfill(3) + ".fits"
os.rename(filename, new_name)
os.system('mv ' + new_name + ' Data_reduction')
if head == 'FLAT,LAMP':
flat = flat + 1
new_name = 'FLAT_' + str(flat).zfill(3) + ".fits"
os.rename(filename, new_name)
os.system('mv ' + new_name + ' Data_reduction')
if head == 'WAVE,LAMP':
arc = arc + 1
new_name = 'ARC_' + str(arc).zfill(3) + ".fits"
os.rename(filename, new_name)
os.system('mv ' + new_name + ' Data_reduction')
if head == 'OBJECT':
sci = sci + 1
new_name = 'SCIENCE_' + str(sci).zfill(3) + ".fits"
os.rename(filename, new_name)
os.system('mv ' + new_name + ' Data_reduction')
if head == 'STD':
sci = sci + 1
new_name = 'SCIENCE_' + str(sci).zfill(3) + ".fits"
os.rename(filename, new_name)
os.system('mv ' + new_name + ' Data_reduction')
if head == 'SKY':
sky = sky + 1
new_name = 'SKY_' + str(sky).zfill(3) + ".fits"
os.rename(filename, new_name)
os.system('mv ' + new_name + ' Other')
if head == 'SLIT':
slit = slit + 1
new_name = 'SLIT_' + str(slit).zfill(3) + ".fits"
os.rename(filename, new_name)
os.system('mv ' + new_name + ' Other')
hdulist.close()
except:
print("Could not sort this file (type?) " + filename)
os.chdir('Data_reduction')
# If the images have 4096 pixels in x direction they're defo not our science images and if they are calibration
# images they can't calibrate our science because it's the wrong size.
os.system('mkdir wrong_size')
for filename in os.listdir("."):
if "fits" in filename:
try:
hdulist = fits.open(filename)
size_x = hdulist[0].header['HIERARCH ESO DET OUT1 NX']
if size_x == 4096:
os.system('mv ' + filename + ' wrong_size')
except:
print("Could not sort this file (size?) " + filename)
hdulist.close()
print('\nAll Done! :D \n')
def rebin(wl, f, r, bin_siz=15):
"""
To rebin my flux spectra
Parameters
----------
wl : array
1D array containing the wavelengths to be rebinned
f : array
1D array containing the fluxes to be rebinned
r : array
1D array containing the errors on the fluxes to be rebinned
bin_siz : int
Size of the new bins in Angstrom.
Returns
-------
tuple of 1D arrays: wl, f, err all rebinned to the new bin size
if bin_siz is None:
print "No binning"
return wl, f, r
"""
wl = np.array(wl)
f = np.array(f)
r = np.array(r)
small_bin_sizes = []
bins_f = np.zeros(int((max(wl) - min(wl)) / bin_siz) + 1) # new flux bins, empty for now
bins_w = np.zeros(int((max(wl) - min(wl)) / bin_siz) + 1) # new error bins, empty for now
weights = 1 / (r ** 2)
for i in range(len(wl) - 1):
# n = int((wl[i]-min(wl))/bin_siz) # n is the number of the new bin
small_bin_sizes.append((wl[i + 1] - wl[i])) # filling list of small bin sizes
bin_centers = [(min(wl) + bin_siz / 2) + bin_siz * n for n in range(len(bins_f))] # finding the new bin centers
bin_edges = [bin_centers[0] - bin_siz / 2] + [bin1 + bin_siz / 2 for bin1 in
bin_centers] # finding the new bin edges
ind_edge = [] # in this list I'll put the index of the array wl corresponding to the wavelength values
# that are close to the bin edges.
for edge in bin_edges:
i_wl_at_edge = min(range(len(wl[:-1])), key=lambda i: abs(edge - wl[i]))
# this is to find the small bin that is closest to the edge of the new bin
# print wl[i_wl_at_edge], small_bin_sizes[i_wl_at_edge]
ind_edge.append(i_wl_at_edge)
for i in range(len(wl)):
n = int((wl[i] - min(wl)) / bin_siz)
if i in ind_edge:
j = ind_edge.index(i) # finding index j of the wavelength index i I am interested in
edge = bin_edges[j] # the edge to compare to wl[i] will then be at bin_edges[j]
if wl[i] < edge:
frac_overlap = (wl[i] + small_bin_sizes[i] / 2 - edge) / (small_bin_sizes[i])
try:
bins_f[n] += f[i] * weights[i] * (1 - frac_overlap)
bins_w[n] += weights[i] * (1 - frac_overlap)
bins_f[n + 1] += f[i] * weights[i] * frac_overlap
bins_w[n + 1] += weights[i] * frac_overlap
except IndexError:
print("Index Error at ", wl[i])
pass
elif wl[i] > edge:
frac_overlap = (wl[i] + small_bin_sizes[i] / 2 - edge) / (small_bin_sizes[i])
try:
bins_f[n] += f[i] * weights[i] * frac_overlap
bins_w[n] += weights[i] * frac_overlap
bins_f[n + 1] += f[i] * weights[i] * (1 - frac_overlap)
bins_w[n + 1] += weights[i] * (1 - frac_overlap)
except IndexError:
print("Index Error at ", wl[i])
pass
else:
try:
bins_f[n] += f[i] * weights[i]
bins_w[n] += weights[i]
except IndexError:
print("Index Error at ", wl[i])
pass
for i in range(len(bin_centers)):
if bins_w[i] == 0.0:
print(bin_centers[i], bins_w[i], "\nCAREFUL! BIN WLGTH == 0!")
bins_f[:-1] /= bins_w[:-1] # normalise weighted values by sum of weights to get weighted average
bins_err = np.sqrt(1 / bins_w[:-1])
return bin_centers[:-1], bins_f[:-1], bins_err
def pol_deg(q, u, q_r=None, u_r=None):
"""
Finds the degree of polarisation p from Stokes parameters q and u. Does debiasing using step function.
Notes
-----
q, u, q_r and u_r must have the same dimension
Parameters
----------
q : float or 1D numpy.array
Stokes q.
u : float or 1D numpy.array
Stokes u.
q_r : float or 1D numpy.array
Errors on Stokes q.
u_r : float or 1D numpy.array
Errors on Stokes u.
Returns
-------
tuple (p, error on p) if errors on q and u are given.
Only p if errors are not given.
"""
p = np.sqrt(q * q + u * u)
if q_r is not None and u_r is not None:
p_r = (1 / p) * np.sqrt((q * q_r) ** 2 + (u * u_r) ** 2)
# Here we are debiasing using a step function. See Wang et al. (1997)
# (Polarimetry of the Type IA Supernova SN 1996X -- their equation 3)
try:
for i in range(len(p)):
if p[i] - p_r[i] > 0:
p[i] -= (p_r[i]**2)/p[i]
except TypeError:
if p - p_r > 0:
p -= (p_r**2)/p
return p, p_r
else:
return p
def pol_ang(q, u, q_r=None, u_r=None):
"""
Calculates the polarisation angle
Parameters
----------
q : float or 1D numpy.array
Stokes q.
u : float or 1D numpy.array
Stokes u.
q_r : float or 1D numpy.array
Errors on Stokes q.
u_r : float or 1D numpy.array
Errors on Stokes u.
Returns
-------
tuple (theta, error on theta) if errors on q and u are given.
Only theta if errors are not given.
4
"""
if isinstance(q, float):
theta = 0.5 * m.atan2(u, q)
theta = (theta * 180.0) / m.pi
if theta < 0:
theta += 180 # Making sure P.A is within limit 0<theta<180 deg
if q_r is not None and u_r is not None:
theta_r = 0.5 * np.sqrt(((u_r / u) ** 2 + (q_r / q) ** 2) * (1 / (1 + (u / q) ** 2)) ** 2)
theta_r = (theta_r * 180.0) / m.pi
if theta_r > 180:
theta_r = 180
return theta, theta_r
else:
return theta
else:
theta = np.array([])
theta_r = np.array([])
for t in range(len(q)):
theta_t = 0.5 * m.atan2(u[t], q[t])
theta_t = (theta_t * 180.0) / m.pi
if theta_t < 0:
theta_t += 180 # Making sure P.A is within limit 0<theta<180 deg
theta = np.append(theta, theta_t)
if q_r is not None and u_r is not None:
theta_tr = 0.5 * np.sqrt(
((u_r[t] / u[t]) ** 2 + (q_r[t] / q[t]) ** 2) * (1 / (1 + (u[t] / q[t]) ** 2)) ** 2)
theta_tr = (theta_tr * 180.0) / m.pi
if theta_tr > 180:
theta_tr = 180
theta_r = np.append(theta_r, theta_tr)
if q_r is not None and u_r is not None:
return theta, theta_r
else:
return theta
def F_from_oeray(fo, fe, fo_r, fe_r):
"""
Normalised flux (F) from ordinary and extra-ordinary rays
Notes
-----
All the arrays should be the same length.
Parameters
----------
fo : 1D numpy.array
flux of the ordinary ray
fe : 1D numpy.array
flux of the extra-ordinary ray
fo_r : 1D numpy.array
error on the flux of the ordinary ray
fe_r : 1D numpy.array
error on the flux of the extra-ordinary ray
Returns
-------
Normalised flux : np.array
"""
F = (fo - fe) / (fo + fe)
F_r = abs(F) * np.sqrt( ((fo_r ** 2) + (fe_r ** 2)) * ( (1 / (fo - fe) ** 2) + (1 / (fo + fe) ** 2)) )
return F, F_r
class Meta(object):
"""
To find the meta data of the images we are working on, create a data frame and save it to a tab separated file.
Examples
--------
>> import FUSS.datred as r
>> path = "/home/heloise/Data/11hs/88.D-0761/2011-12-23/FITS/Data_reduction/bad_binning/"
>> metadataframe = r.Meta(path=path).dataThe Meta Data file metadata already exists. Would you like to replace it? (Y/n)N
File not replaced
>> metadataframe.iloc[-5:-1]
Flag Filename ESO label Angle Pol. Type Exp. Time Airmass \
24 polstd SCIENCE_84.fits PMOS_NGC2024 0 lin 19.9985 1.095
25 polstd SCIENCE_85.fits PMOS_NGC2024 22.5 lin 19.9922 1.0945
26 polstd SCIENCE_86.fits PMOS_NGC2024 45 lin 19.9986 1.094
27 polstd SCIENCE_87.fits PMOS_NGC2024 67.5 lin 20.0026 1.093
1/gain RON Grism Bin Date
24 0.7 2.9 GRIS_300V 2x2 2011-12-23T03:44:59.590
25 0.7 2.9 GRIS_300V 2x2 2011-12-23T03:46:04.915
26 0.7 2.9 GRIS_300V 2x2 2011-12-23T03:47:09.870
27 0.7 2.9 GRIS_300V 2x2 2011-12-23T03:48:15.905
>> metadataframe.info()
<class 'pandas.core.frame.DataFrame'>
Int64Index: 29 entries, 0 to 28
Data columns (total 12 columns):
Flag 8 non-null object
Filename 29 non-null object
ESO label 29 non-null object
Angle 29 non-null object
Pol. Type 23 non-null object
Exp. Time 29 non-null object
Airmass 29 non-null object
1/gain 29 non-null object
RON 29 non-null object
Grism 29 non-null object
Bin 29 non-null object
Date 29 non-null object
dtypes: object(12)
memory usage: 2.9+ KB
Notes
-----
The files whose headers we will look at must have ".fits" in the name, cannot be "dSCIENCE" or "cal_" files.
(This is to do with the notation used throughout the FUSS package both in the IRAF and python scripts)
Attributes
----------
clobber_flag : bool or None
Whether or not to clobber the output_file if it exists. Initiated as "None".
output : str
Name of the output file
path : str
Path to the location of the images the metadata should be read from. Default: "./".
target_flag : str
String present in ESO labels that uniquely identifies the target.
zeropol_flag : str
String present in ESO labels that uniquely identifies the zero polarisation standard.
polstd_flag : str
String present in ESO labels that uniquely identifies the polarised standard.
sorted_files : list of str
Sorted list containing the names of the files whose headers we want to look at.
data : pandas.core.frame.DataFrame
Data frame containing the meta data.
Columns: ['Flag', 'Filename', 'ESO label', 'Angle', 'Pol. Type', 'Exp. Time', 'Airmass', '1/gain', 'RON', 'Grism', 'Bin', 'Date']
Methods
-------
_write_out()
Writes to file
_clobber_flag()
Sets the clobber file to True or False if it has not been set on initiation.
_flag()
Writes the corresponding flags to each rows. Either 'tar', 'zpol' or 'polstd'
_read_headers()
Reads the fits headers and fills the info attribute data frame.
"""
def __init__(self, path = "./", output_file = 'metadata',
target_flag='CCSN', zpol_flag='Zero_', polstd_flag='NGC2024',
clobber = None, make_file = True):
"""
Initiates MetaData object
Parameters
----------
path : str, optional
path to the location of the images the metadata should be read from. Default: "./".
output_file : str, optional
path to the output file where the data frame containing metadata will be written. Default: 'metadata'.
target_flag : str, optional
String present in ESO labels that uniquely identifies the target.
zpol_flag : str, optional
String present in ESO labels that uniquely identifies the zero polarisation standard.
polstd_flag : str, optional
String present in ESO labels that uniquely identifies the polarised standard.
clobber : bool, optional
If a file "output_file" already exists, whether to replace it or not. Default: False.
Returns
-------
None
"""
self.clobber_flag = clobber
self.output = output_file
self.path = path
self.target_flag = target_flag
self.zeropol_flag = zpol_flag
self.polstd_flag = polstd_flag
# We want to make sure that python reads the images in the right order, we also only need to look at the headers
# of the original images with name format SCIENCE_##.fits where ## is a 2 or 3 digit number.
self.sorted_files = sorted([filename for filename in os.listdir(self.path)
if 'fits' in filename and 'ms' not in filename
and 'dSCIENCE' not in filename and 'cal' not in filename])
self.data = pd.DataFrame(columns = ['Flag', 'Filename', 'ESO label', 'Angle', 'Pol. Type',
'Exp. Time', 'Airmass', '1/gain',
'RON', 'Grism', 'Bin', 'Date'])
self._read_headers() # read headers and fills the dataframce
self._flag() # to flag targets and pol standards
if make_file is False:
return
self._write_out() # writes it out to a file
return
def _write_out(self):
"""
Writes to file
"""
if not os.path.isfile(self.output):
self.data.to_csv(self.output, sep='\t', index=False)
print("File Created")
return
else:
self._clobber_flag()
if self.clobber_flag is True:
os.remove(self.output)
self.data.to_csv(self.output, sep='\t', index=False)
print("File replaced")
if self.clobber_flag is False:
print("File not replaced")
def _clobber_flag(self):
"""
Sets the clobber file to True or False if it has not been set on initiation.
"""
while self.clobber_flag is None:
clobber = input("The Meta Data file "+self.output+" already exists. Would you like to replace it? (Y/n)")
if clobber in ['n', 'N', 'no', 'no']:
self.clobber_flag = False
elif clobber in ['', ' ', 'Y', 'y', 'yes', 'Yes', 'ye', 'Ye']:
self.clobber_flag = True
def _flag(self):
"""
Writes the corresponding flags to each rows. Either 'tar', 'zpol' or 'polstd'
"""
for i in range(len(self.data['ESO label'])):
if self.target_flag in self.data.loc[i,'ESO label']:
self.data.loc[i, 'Flag'] = 'tar'
elif self.zeropol_flag in self.data.loc[i,'ESO label']:
self.data.loc[i, 'Flag'] = 'zpol'
elif self.polstd_flag in self.data.loc[i,'ESO label']:
self.data.loc[i, 'Flag'] = 'polstd'
def _read_headers(self):
"""
Reads the fits headers and fills the info attribute data frame.
"""
for i in range(len(self.sorted_files)):
filename = self.sorted_files[i]
self.data.loc[i, 'Filename'] = filename
# Here we go through the headers and take out the information that we need to create the fits files.
# I think the header names and variable names are explicit enough
hdulist = fits.open(self.path+filename)
exptime = hdulist[0].header['EXPTIME']
self.data.loc[i,'Exp. Time'] = exptime
binx = hdulist[0].header['HIERARCH ESO DET WIN1 BINX']
biny = hdulist[0].header['HIERARCH ESO DET WIN1 BINY']
binning = str(binx) + "x" + str(biny)
self.data.loc[i,'Bin'] = binning
#size_x = hdulist[0].header['HIERARCH ESO DET OUT1 NX']
one_over_gain = hdulist[0].header['HIERARCH ESO DET OUT1 CONAD']
self.data.loc[i,'1/gain'] = one_over_gain
ron = hdulist[0].header['HIERARCH ESO DET OUT1 RON'] # Read Out Noise
self.data.loc[i,'RON'] = ron
try:
grism = hdulist[0].header['HIERARCH ESO INS GRIS1 NAME']
except KeyError:
grism = 'None'
self.data.loc[i,'Grism'] = grism
poltype = None
try:
angle = hdulist[0].header['HIERARCH ESO INS RETA2 ROT']
poltype = 'lin'
except KeyError:
try:
angle = hdulist[0].header['HIERARCH ESO INS RETA2 POSANG']
poltype = 'lin'
except KeyError:
try:
angle = str(hdulist[0].header['HIERARCH ESO INS RETA4 ROT'])
poltype = 'circ'
except KeyError:
angle = 'None'
self.data.loc[i,'Angle'] = angle
self.data.loc[i,'Pol. Type'] = poltype
date = hdulist[0].header['DATE-OBS']
self.data.loc[i,'Date'] = date
try:
esoname = hdulist[0].header['HIERARCH ESO OBS NAME']
except KeyError:
esoname = 'None'
self.data.loc[i,'ESO label'] = esoname
if "fits" and "SCIENCE" in filename:
head = hdulist[0].header['HIERARCH ESO DPR TYPE']
if head == 'OBJECT' or head == 'STD':
airm_i = float(hdulist[0].header['HIERARCH ESO TEL AIRM START'])
airm_f = float(hdulist[0].header['HIERARCH ESO TEL AIRM END'])
airm = (airm_i + airm_f) / 2
else:
airm = 'None'
else:
airm = 'None'
self.data.loc[i,'Airmass'] = airm
# ################# LINEAR SPECPOL ####################### #
class SpecPol(object):
"""
Base class for LinearSpecPol and CircularSpecPol.
Notes
-----
SpecPol is not particularly useful on its own, it is simply a basic framework of methods and attributes we
need for both linear and circular polarimetry calculations.
Attributes
---------
metadata : pandas.core.frame.DataFrame
Data frame containing the metadata as built by MetaData. Can be initiated from tab separated file containing
the data frame or from the data frame directly.
oray : str
Which aperture flag that corresponds to the ordinary ray files. In our naming convention the flag can be either
'ap1' or 'ap2'.
eray : str
Which aperture flag that corresponds to the extra-ordinary ray files. In our naming convention the flag can be
either 'ap1' or 'ap2'.
bin_size : int or None
Size of the bins in Angstrom if rebinning. Otherwise None.
snrplot : bool
Whether or not to plot the signal to noise ratio plots (expectd and calculated -- to check how well a job the
binning did). Default is False.
pol_file : Initiated as None
Name of the output polarisation file. Will be defined through user input.
flag : Initiated as None
Flag to focus on in the metadata: 'tar', 'zpol' or 'polstd'. Defined in the calculate() method of LinearSpecPol
and CircularSpecPol
"""
def __init__(self, oray='ap2', metadata = 'metadata', bin_size=None,
snrplot=False):
if oray == 'ap2':
self.oray, self.eray = 'ap2', 'ap1'
elif oray == 'ap1':
self.oray, self.eray = 'ap1', 'ap2'
if isinstance(metadata, str):
assert os.path.isfile(metadata), metadata+" is not a valid file path."
self.metadata = pd.read_csv(metadata, sep='\t')
elif isinstance(metadata, pd.core.frame.DataFrame):
self.metadata = metadata
self.bin_size=bin_size
self.snrplot = snrplot
self.pol_file = None
self.flag = None
def _flux_diff_from_file(self, files, check_bin=False):
"""
Calculates the normalised flux differences from files.
Parameters
----------
files : list of str
list of files
check_bin : bool
Whether to check the binning with snr plots.
"""
# Extracting polarised fluxes of o and e ray and their errors according to filenames
for filename in files:
if self.oray in filename:
if 'err' not in filename:
self.wl, fo = np.loadtxt(filename, unpack=True, usecols=(0, 1))
else:
self.wl, fo_r = np.loadtxt(filename, unpack=True, usecols=(0, 1))
if self.eray in filename:
if 'err' not in filename:
self.wl, fe = np.loadtxt(filename, unpack=True, usecols=(0, 1))
else:
self.wl, fe_r = np.loadtxt(filename, unpack=True, usecols=(0, 1))
# BINNING
if self.bin_size is None:
self.wl_bin, fo_bin, fo_bin_r, fe_bin, fe_bin_r = self.wl, fo, fo_r, fe, fe_r
else:
print("Binning to ", self.bin_size, "Angstrom")
self.wl_bin, fo_bin, fo_bin_r = rebin(self.wl, fo, fo_r, bin_siz=self.bin_size)
self.wl_bin, fe_bin, fe_bin_r = rebin(self.wl, fe, fe_r, bin_siz=self.bin_size)
# To perform a few checks on the binning (in particular the SNR yielded by binning)
if check_bin is True:
self._check_binning(fo, fe, fo_r, fe_r, fo_bin, fe_bin, fo_bin_r, fe_bin_r)
# Finding flux difference
F, F_r = F_from_oeray(fo_bin, fe_bin, fo_bin_r, fe_bin_r)
return self.wl_bin, F, F_r
def _list_files(self, angle, linpol = True, fileloc='.'):
"""
Creates list of files of interest.
Parameters
----------
angle : float
Which HWRP angle we are interested in.
linpol : bool, optional
Whether we are doing linear (True) or circular (False) polarisation. Default is True.
fileloc : str
Path to location of files. Usually will be '.' so this is the default.
"""
if linpol is True:
poltype = 'lin'
else:
poltype = 'circ'
angle = float(angle)
root_list = [str(self.metadata.loc[i,"Filename"])[:-5]\
for i in range(len(self.metadata["Filename"])) \
if self.metadata.loc[i, 'Flag'] == self.flag \
if float(self.metadata.loc[i, 'Angle']) == angle \
if self.metadata.loc[i, 'Pol. Type'] == poltype]
mylist = []
sorted_files = sorted([filename for filename in os.listdir(fileloc) if '.txt' in filename and '1D' in filename])
for filename in sorted_files:
for root in root_list:
if root in filename:
mylist.append(filename)
return mylist
def _check_binning(self, fo, fe, fo_r, fe_r, bin_fo, bin_fe, bin_fo_r, bin_fe_r):
"""
This performs a few checks on the binning: Calculates expected values of SNR and compares to the values of
SNR after binning: does the median and the values at central wavelength. Also produces a plot of whole spectrum
"""
snr_not_binned = np.array((fo + fe) / np.sqrt(fo_r ** 2 + fe_r ** 2))
snr_expected = snr_not_binned * np.sqrt(self.bin_size / (self.wl[1] - self.wl[0]))
ind_not_binned_central_wl = int(np.argwhere(self.wl == min(self.wl, key=lambda x: abs(x - 6204)))[0])
snr_not_binned_central_wl = snr_not_binned[ind_not_binned_central_wl]
snr_central_expected = snr_not_binned_central_wl * np.sqrt(self.bin_size / (self.wl[1] - self.wl[0]))
snr_binned = np.array((bin_fo + bin_fe) / np.sqrt(bin_fo_r ** 2 + bin_fe_r ** 2))
ind_central_wl = int(np.argwhere(self.wl_bin == min(self.wl_bin, key=lambda x: abs(x - 6204)))[0])
snr_central_wl = (bin_fo[ind_central_wl] + bin_fe[ind_central_wl]) / \
np.sqrt(bin_fo_r[ind_central_wl] ** 2 + bin_fe_r[ind_central_wl] ** 2)
print("\n======== BEFORE BINNING ======")
print("MEDIAN SNR ")
print(np.median(snr_not_binned))
print("CENTRAL SNR at (", self.wl[ind_not_binned_central_wl], " A)")
print(snr_not_binned_central_wl)
print("======== AFTER BINNING ======")
print("MEDIAN SNR / EXPECTED ")
print(np.median(snr_binned), np.median(snr_expected))
print("CENTRAL SNR / EXPECTED (at ", self.wl[ind_not_binned_central_wl], " A)")
print(snr_central_wl, snr_central_expected)
print("\n")
if self.snrplot is True:
plt.plot(self.wl, snr_expected, marker='o', label='Expected')
plt.plot(self.wl_bin, snr_binned, marker='x', label='Calculated after binning')
plt.legend()
plt.title("SNR")
plt.show()
return
class LinearSpecPol(SpecPol):
"""
class for Linear sepctropolarimetry reduction. Inherits from SpecPol
Examples
--------
>> import FUSS.datred as r
>> import pandas as pd
>> metadataframe = pd.read_csv('metadata', sep='\t') # or can just feed a freshly created Meta.data data frame
>> pol = r.LinearSpecPol(metadata = metadataframe, bin_size = 15)
>> poldataframe = pol.calculate() # One full set of specpol data (4 images and 4 files to rebin per image)
Binning to 15 Angstrom
Index Error at 9316.2 # This can happen on the last bin and isn't a problem. If it is in the middle of the spcetrum
Index Error at 9326.1 # then you should look into it.
Index Error at 9316.2
Index Error at 9326.1
======== BEFORE BINNING ======
MEDIAN SNR
86.6505323611
CENTRAL SNR at ( 6204.3 A)
117.809391701
======== AFTER BINNING ======
MEDIAN SNR / EXPECTED
185.294052885 184.73955572
CENTRAL SNR / EXPECTED (at 6204.3 A)
250.397252588 251.17046704
Binning to 15 Angstrom
Index Error at 9316.2
Index Error at 9326.1
Index Error at 9316.2
Index Error at 9326.1
Binning to 15 Angstrom
Index Error at 9316.2
Index Error at 9326.1
Index Error at 9316.2
Index Error at 9326.1
Binning to 15 Angstrom
Index Error at 9316.2
Index Error at 9326.1
Index Error at 9316.2
Index Error at 9326.1
What do you want to name the polarisation file? [filename] # the file created will be filename.pol
>> poldataframe.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 421 entries, 0 to 420
Data columns (total 10 columns):
wl 421 non-null float64
p 421 non-null float64
p_r 421 non-null float64
q 421 non-null float64
q_r 421 non-null float64
u 421 non-null float64
u_r 421 non-null float64
theta 421 non-null float64
theta_r 421 non-null float64
delta_eps0 421 non-null float64
dtypes: float64(10)
memory usage: 33.0 KB
Attributes
----------
wl : 1D np.array
Initiated as None. Will contains original wavelength bins
wl_bin : 1D np.array
Initiated as None. Will contain the binned wavelength. If bin_size is None then wl_bin will == wl.
poldata : pandas.core.frame.DataFrame
Initiated with columns: 'wl', 'p', 'p_r', 'q', 'q_r', 'u', 'u_r', 'theta', 'theta_r' and dtype='float64'
delta_epsilon# (# being a number) will be added for delta epsilon spectra corresponding to each set of 4
Half Wave Retarder Plate Angles.
flag : str
Flag corresponding to object of interest in the metadata data frame column "Flag".
Initiated as None and defined in the method calculate() to specify which object to do the specpol on
(the target, the zero polarisation standard or the polarisation standard -- usually 'tar', 'zpol', 'polstd',
respectively)
Methods
-------
calculate()
Calculates the Stokes parameters, degree and angle polarisation from ordinary and extra-ordinary ray fluxes.
Creates an output file containing the poldata data frame (filled by this method)
plot()
Plots the Stokes parameters, degree and angle of polarisation calculated.
_get_data()
_linspecpol()
+ inherited from SpecPol
"""
def __init__(self, oray='ap2', metadata = 'metadata',
bin_size=None, snrplot=False):
SpecPol.__init__(self, oray, metadata,bin_size, snrplot)
self.wl= None
self.wl_bin = None
self.poldata = pd.DataFrame(columns = ['wl', 'p', 'p_r', 'q', 'q_r',
'u', 'u_r', 'theta', 'theta_r'], dtype='float64')
self.flag = None
def calculate(self, flag='tar'):
"""
Calculates the Stokes parameters, degree and angle polarisation, their errors and delta_epsilon for each set
from the ordinary and extra-ordinary ray fluxes.
Notes
-----
Creates an output file containing the poldata data frame (filled by this method)
Parameters
----------
flag : str
Which flag to look for in the "Flag" column of metadata. Default is 'tar' which identifies target data.
'zpol' can be used for zero pol. standard and 'polstd' for the polarisation standard.
Custom flags can be input if you have written your own in the "Flag" column of the metadata Data Frame
Returns
-------
poldata : pandas.core.frame.DataFrame
Contains the polarisation data frame.
columns = ['wl', 'p', 'p_r', 'q', 'q_r', 'u', 'u_r', 'theta', 'theta_r'], dtype='float64'
"""
self.flag = flag
# Now getting the data from the files in lists that will be used by the specpol() function.
ls_F0, ls_F0_r, ls_F1, ls_F1_r, ls_F2, ls_F2_r, ls_F3, ls_F3_r = self._get_data()
self.poldata["wl"] = self.wl_bin # even if have not rebinned since in that case we do self.wl_bin = self.wl
q_ls = []
q_r_ls = []
u_ls = []
u_r_ls = []
for i in range(len(ls_F0)):
p, pr, q, q_r, u, u_r, theta, theta_r, delta_e= self._linspecpol(ls_F0[i], ls_F0_r[i], ls_F1[i],
ls_F1_r[i], ls_F2[i], ls_F2_r[i],
ls_F3[i], ls_F3_r[i])
q_ls.append(q)
q_r_ls.append(q_r)
u_ls.append(u)
u_r_ls.append(u_r)
self.poldata["delta_eps"+str(i)] = delta_e
for num in range(len(q_ls[0])):
# num indexes the bins each list of Stokes parameters values
q_to_avg = []
u_to_avg = []
q_r_to_sum = np.array([])
u_r_to_sum = np.array([])
for s in range(len(q_ls)):
# s indexes the data set from which we are taking a particular Stoke parameter
# We want to average values fo all data sets at each wavelength bins. For example say I have
# 3 data sets, at 5000 A say, I am gonna take the 3 values of q in each data set at 5000 A and
# average them. Do the same accross the whole spectrum and with each Stoke parameter to get final results.
q_to_avg.append(q_ls[s][num])
u_to_avg.append(u_ls[s][num])
# For the next 4 lines I am calculating the error on the mean and putting it in final list of errors on
# Stokes parameters
q_r_to_sum = np.append(q_r_to_sum, 1 / ((q_r_ls[s][num]) ** 2))
u_r_to_sum = np.append(u_r_to_sum, 1 / ((u_r_ls[s][num]) ** 2))
self.poldata.loc[num, 'q'] = np.average(q_to_avg, weights=q_r_to_sum)
self.poldata.loc[num, 'u'] = np.average(u_to_avg, weights=u_r_to_sum)
self.poldata.loc[num, 'q_r'] = np.sqrt(1 / np.sum(q_r_to_sum))
self.poldata.loc[num, 'u_r'] = np.sqrt(1 / np.sum(u_r_to_sum))
# Once I have my final Stokes parameters I can calculate the final debiases degree of polarisation (and error).
self.poldata['p'], self.poldata['p_r'] = pol_deg(self.poldata['q'].values, self.poldata['u'].values,
self.poldata['q_r'].values, self.poldata['u_r'].values)
# And finally the P.A !
self.poldata['theta'], self.poldata['theta_r'] = pol_ang(self.poldata['q'].values, self.poldata['u'].values,
self.poldata['q_r'].values, self.poldata['u_r'].values)
# ###### CREATING THE TEXT FILE ###### #
self.pol_file = input('What do you want to name the polarisation file? ')
try:
os.remove(self.pol_file + ".pol")
except OSError:
pass
self.poldata.to_csv(self.pol_file+".pol", index=False, sep="\t")
return self.poldata
def plot(self):
if self.poldata is None:
return "You should run the `calculate`method before trying to make plots "
wavelength = self.poldata['wl'].values
p = self.poldata['p'].values
p_r = self.poldata['p_r'].values
q = self.poldata['q'].values
q_r = self.poldata['q_r'].values
u = self.poldata['u'].values
u_r = self.poldata['u_r'].values
theta = self.poldata['theta'].values
theta_r = self.poldata['theta_r'].values
f, axarr = plt.subplots(5, 1, figsize=(8, 8), sharex=True)
plt.subplots_adjust(hspace=0)
# First axis is p
axarr[0].errorbar(wavelength, p,yerr=p_r, c='#D92F2F')
axarr[0].axhline(0, 0, ls='--', c='k')
pmax = -1000
for i in range(len(wavelength)):
if wavelength[i] > 4500 and p[i] > pmax:
pmax = p[i]
axarr[0].set_ylim([-0.1, pmax + 0.4])
axarr[0].set_ylabel('p(%)', fontsize=14)
# Then q
axarr[1].errorbar(wavelength, q, yerr=q_r, c='#D92F2F')
axarr[1].axhline(0, 0, ls='--', c='k')
qmax = -1000
qmin = 1000
for i in range(len(wavelength)):
if wavelength[i] > 4500 and q[i] > qmax:
qmax = q[i]
if wavelength[i] > 4500 and q[i] < qmin:
qmin = q[i]
axarr[1].set_ylim([qmin - 0.3, qmax + 0.3])
axarr[1].set_ylabel('q(%)', fontsize=14)
# And u
axarr[2].errorbar(wavelength, u, yerr=u_r, c='#D92F2F')
axarr[2].axhline(0, 0, ls='--', c='k')
umax = -1000
umin = 1000
for i in range(len(wavelength)):
if wavelength[i] > 4500 and u[i] > umax:
umax = u[i]
if wavelength[i] > 4500 and u[i] < umin:
umin = u[i]
axarr[2].set_ylim([umin - 0.3, umax + 0.3])
axarr[2].set_ylabel('u(%)', fontsize=14)
# P.A
axarr[3].errorbar(wavelength, theta, yerr=theta_r, c='#D92F2F')
axarr[3].axhline(0, 0, ls='--', c='k')
axarr[3].set_ylim([-0, 180])
axarr[3].set_ylabel('theta', fontsize=14)
# And finally the Delta epsilons of each data set.
delta_cols = [col for col in self.poldata.columns if 'delta' in col]
for column in delta_cols:
axarr[4].plot(wavelength, self.poldata[column], alpha=0.8)
print("MEAN Delta epsilon =", self.poldata[column].values.mean(),
"STDV =", self.poldata[column].values.std() )
axarr[4].set_ylabel(r"$\Delta \epsilon$", fontsize=16)
axarr[4].set_ylim([-4.0, 4.0])
plt.xlim([3500, 10000])
save_cond = input("do you want to save the plot?(Y/n): ")
if save_cond == "y" or save_cond == "Y" or save_cond == "":
plt.savefig(self.pol_file + ".png")
print("Plot saved")
else:
print("Plot not saved")
plt.show()
def _get_data(self):
"""
This takes the flux data from the text files given by IRAF and sorts them in lists for later use.
Returns
-------
tuple of 8 lists
Each list corresponds to the normalised flux difference for a given HWRP angle or its error.
Each lsit contains N 1D numpy arrays containing N noral;ised flux differences spectra (or error), one
for each set of spectropolarimetric data.
"""
# Need to do this because python doesn't read files in alphabetical order but in order they
# are written on the disc
#sorted_files = sorted([filename for filename in os.listdir(".")
# if 'dSCIENCE' in filename and 'fits' not in filename and 'c_' not in filename])
files_0_deg = SpecPol._list_files(self, 0.0)
files_22_deg = SpecPol._list_files(self, 22.5)
files_45_deg = SpecPol._list_files(self, 45.0)
files_67_deg = SpecPol._list_files(self, 67.5)
errormessage = "It seems you don't have the same number of images for each retarder plate angle. " \
"This may not be code breaking but only complete sets of retarder plate angles can be used."
assert len(files_0_deg) == len(files_22_deg) == len(files_45_deg) == len(files_67_deg), errormessage
ls_F0 = []
ls_F0_r = []
ls_F1 = []
ls_F1_r = []
ls_F2 = []
ls_F2_r = []
ls_F3 = []
ls_F3_r = []
for file_list in [files_0_deg, files_22_deg, files_45_deg, files_67_deg]:
nbre_sets = len(file_list) / 4
nbre_sets_remainder = len(file_list) % 4
assert nbre_sets_remainder == 0, "There should be 4 data files for each image (2 apertures x (flux + err))"
print("4 Files per images... All good here")
for i in range(int(nbre_sets)):
step = i * 4
files_0_deg_subset = files_0_deg[0 + step:4 + step]
files_22_deg_subset = files_22_deg[0 + step:4 + step]
files_45_deg_subset = files_45_deg[0 + step:4 + step]
files_67_deg_subset = files_67_deg[0 + step:4 + step]
if i == 0:
check = True
wl0, F0, F0_r = SpecPol._flux_diff_from_file(self, files_0_deg_subset, check_bin=check)
ls_F0.append(F0)
ls_F0_r.append(F0_r)
wl1, F1, F1_r = SpecPol._flux_diff_from_file(self, files_22_deg_subset)
ls_F1.append(F1)
ls_F1_r.append(F1_r)
wl2, F2, F2_r = SpecPol._flux_diff_from_file(self, files_45_deg_subset)
ls_F2.append(F2)
ls_F2_r.append(F2_r)
wl3, F3, F3_r = SpecPol._flux_diff_from_file(self, files_67_deg_subset)
ls_F3.append(F3)
ls_F3_r.append(F3_r)
assert len(wl0) == len(wl1) == len(wl2) == len(wl3), "Wavelength bins not homogeneous. This will be an issue."
return ls_F0, ls_F0_r, ls_F1, ls_F1_r, ls_F2, ls_F2_r, ls_F3, ls_F3_r
def _linspecpol(self, F0, F0_r, F1, F1_r, F2, F2_r, F3, F3_r):
"""
Calculates the spectropolarimetric data from the normalised flux differences.
Parameters
----------
Eight 1D numpy arrays
The normalised flux differences (and errors) for each HWRP angle.
Returns
-------
arrays
p, q, and u in percent, with associated errors, as well as theta in degrees ( 0 < theta < 180) and its
errors, and delta epsilon (see Maund 2008)
"""
# Now Stokes parameters and degree of pol.
q = 0.5 * (F0 - F2)
u = 0.5 * (F1 - F3)
q_r = 0.5 * np.sqrt(F0_r ** 2 + F2_r ** 2)
u_r = 0.5 * np.sqrt(F1_r ** 2 + F3_r ** 2)
p, p_r = pol_deg(q, u, q_r, u_r)
# Arrays where we're going to store the values of p and Stokes parameters and P.A
# after we've applied corrections.
pf = np.array([])
qf = np.array([])
uf = np.array([])
theta = np.array([])
# We take our chromatic zero-angles and interpolate them to match the wavlength bins of our data.
wl2, thetaz = np.loadtxt(zero_angles, unpack=True, usecols=(0, 1))
theta0 = np.interp(self.wl_bin, wl2, thetaz)
# Now we apply corrections to the P.A
for t in range(len(q)):
theta_t = 0.5 * m.atan2(u[t], q[t])
theta_r = 0.5 * np.sqrt(((u_r[t] / u[t]) ** 2 + (q_r[t] / q[t]) ** 2) * (1 / (1 + (u[t] / q[t]) ** 2)) ** 2)
theta_t = (theta_t * 180.0) / m.pi
theta_r = (theta_r * 180.0) / m.pi
if theta_t < 0:
theta_t += 180 # Making sure P.A is within limit 0<theta<180 deg
theta_cor = theta_t - theta0[t]
theta_cor_rad = (theta_cor / 180.0) * m.pi
theta = np.append(theta, theta_cor)
q_t = p[t] * m.cos(2 * theta_cor_rad) # Re-calculating Stokes parameters
u_t = p[t] * m.sin(2 * theta_cor_rad)
qf = np.append(qf, q_t * 100) # Filling our arrays of final Stokes parameters and p.
uf = np.append(uf, u_t * 100)
pf = np.append(pf, np.sqrt(q_t ** 2 + u_t ** 2) * 100)
# Now calculating epsilon q and epsilon u and Delta epsilon.
eq = (0.5 * F0 + 0.5 * F2) * 100 # in percent
eu = (0.5 * F1 + 0.5 * F3) * 100
delta_e = eq - eu
return pf, p_r * 100, qf, q_r * 100, uf, u_r * 100, theta, theta_r, delta_e
class CircularSpecPol(SpecPol):
"""
class for Circular sepctropolarimetry reduction. Inherits from SpecPol
Examples
--------
Similar to LinearSpecpol examples.
Attributes
----------
wl : 1D np.array
Initiated as None. Will contains original wavelength bins
wl_bin : 1D np.array
Initiated as None. Will contain the binned wavelength. If bin_size is None then wl_bin will == wl.
poldata : pandas.core.frame.DataFrame
Initiated with columns: 'wl', 'p', 'p_r', 'q', 'q_r', 'u', 'u_r', 'theta', 'theta_r' and dtype='float64'
delta_epsilon# (# being a number) will be added for delta epsilon spectra corresponding to each set of 4
Half Wave Retarder Plate Angles.
flag : str
Flag corresponding to object of interest in the metadata data frame column "Flag".
Initiated as None and defined in the method calculate() to specify which object to do the specpol on
(the target, the zero polarisation standard or the polarisation standard -- usually 'tar', 'zpol', 'polstd',
respectively)
Methods
-------
calculate()
Calculates the Stokes parameters, degree and angle polarisation from ordinary and extra-ordinary ray fluxes.
Creates an output file containing the poldata data frame (filled by this method)
plot()
Plots the Stokes parameters, degree and angle of polarisation calculated.
_get_data()
_circspecpol()
+ inherited from SpecPol
"""
def __init__(self, oray='ap2', metadata = 'metadata',
bin_size=None, snrplot=False):
SpecPol.__init__(self, oray, metadata, bin_size, snrplot)
self.wl= None
self.wl_bin = None
self.poldata = pd.DataFrame(columns = ['wl', 'v', 'v_r'], dtype='float64')
def calculate(self, flag='tar'):
"""
Calculates the Stokes v, its error and epsilon from the ordinary and extra-ordinary ray fluxes.
Notes
-----
Creates an output file containing the poldata data frame (filled by this method)
Parameters
----------
flag : str
Which flag to look for in the "Flag" column of metadata. Default is 'tar' which identifies target data.
'zpol' can be used for zero pol. standard and 'polstd' for the polarisation standard.
Custom flags can be input if you have written your own in the "Flag" column of the metadata Data Frame
Returns
-------
poldata : pandas.core.frame.DataFrame
Contains the polarisation data frame. columns = ['wl', 'v', 'v_r'], dtype='float64'
"""
self.flag = flag
# Now getting the data from the files in lists that will be used by the specpol() function.
ls_F0, ls_F0_r, ls_F1, ls_F1_r = self._get_data()
self.poldata["wl"] = self.wl_bin # even if have not rebinned since in that case we do self.wl_bin = self.wl
v_ls = []
v_r_ls = []
for i in range(len(ls_F0)):
v, v_r, epsilon = self._circspecpol(ls_F0[i], ls_F0_r[i], ls_F1[i], ls_F1_r[i])
v_ls.append(v)
v_r_ls.append(v_r)
self.poldata["epsilon"+str(i)] = epsilon
for num in range(len(v_ls[0])):
# num indexes the bins each list of Stokes parameters values
v_to_avg = []
v_r_to_sum = np.array([])
for s in range(len(v_ls)):
v_to_avg.append(v_ls[s][num])
v_r_to_sum = np.append(v_r_to_sum, (1 / ((v_r_ls[s][num]) ** 2)))
self.poldata.loc[num, 'v'] = np.average(v_to_avg, weights = v_r_to_sum)
self.poldata.loc[num, 'v_r'] = np.sqrt(1 / np.sum(v_r_to_sum))
# ###### CREATING THE TEXT FILE ###### #
self.pol_file = input('What do you want to name the polarisation file? ')
try:
os.remove(self.pol_file + ".pol")
except OSError:
pass
self.poldata.to_csv(self.pol_file+".pol", index=False, sep="\t")
return self.poldata
def _get_data(self):
"""
This takes the flux data from the text files given by IRAF and sorts them in lists for later use.
Returns
-------
tuple of 4 lists
Each list corresponds to the normalised flux difference for a given HWRP angle or its error.
Each lsit contains N 1D numpy arrays containing N noral;ised flux differences spectra (or error), one
for each set of spectropolarimetric data.
"""
files_45_deg = SpecPol._list_files(self, 45.0, linpol=False)
files_315_deg = SpecPol._list_files(self, 315, linpol=False)
errormessage = "It seems you don't have the same number of images for each retarder plate angle. " \
"This may not be code breaking but only complete sets of retarder plate angles can be used."
assert len(files_45_deg) == len(files_315_deg), errormessage
ls_F0 = []
ls_F0_r = []
ls_F1 = []
ls_F1_r = []
for file_list in [files_45_deg, files_315_deg]:
nbre_sets = len(file_list) / 4
nbre_sets_remainder = len(file_list) % 4
assert nbre_sets_remainder == 0, "There should be 4 data files for each image (2 apertures x (flux + err))"
print("4 Files per images... All good here")
for i in range(int(nbre_sets)):
step = i * 4
files_45_deg_subset = files_45_deg[0 + step:4 + step]
files_315_deg_subset = files_315_deg[0 + step:4 + step]
if i == 0:
check = True
wl0, F0, F0_r = SpecPol._flux_diff_from_file(self, files_45_deg_subset, check_bin=check)
ls_F0.append(F0)
ls_F0_r.append(F0_r)
wl1, F1, F1_r = SpecPol._flux_diff_from_file(self, files_315_deg_subset)
ls_F1.append(F1)
ls_F1_r.append(F1_r)
assert len(wl0) == len(wl1), "Wavelength bins not homogeneous. This will be an issue."
return ls_F0, ls_F0_r, ls_F1, ls_F1_r
def _circspecpol(self, F0, F0_r, F1, F1_r):
"""
Calulates the circular polarisation from the normalised flux differences
Parameters
----------
Four 1D numpy arrays
The normalised flux differences (and errors) for each HWRP angle.
Returns
-------
v, v_r and epsilon
"""
# Now Stokes parameters and degree of pol.
v = 0.5 * (F0 - F1)
v_r = 0.5*np.sqrt(F0_r**2 + F1_r**2)
# Now calculating epsilon q and epsilon u and Delta epsilon.
epsilon = 0.5 * (F0 + F1)
return v*100, v_r * 100, epsilon*100
def plot(self):
f, axarr = plt.subplots(2, 1, figsize=(10, 10), sharex=True)
plt.subplots_adjust(hspace=0)
# First axis is v
wl = self.poldata["wl"]
v = self.poldata["v"]
v_r = self.poldata['v_r']
axarr[0].errorbar(wl, v, yerr=v_r, c='#D92F2F')
axarr[0].axhline(0, 0, ls='--', c='k')
vmax = -1000
vmin = 10000
for i in range(len(wl)):
if wl[i] > 4500 and v[i] > vmax:
vmax = v[i]
if wl[i] > 4500 and v[i] < vmin:
vmin = v[i]
axarr[0].set_ylim([vmin - 0.4, vmax + 0.4])
axarr[0].set_ylabel('v(%)', fontsize=14)
# And then the Delta epsilons of each data set.
delta_cols = [col for col in self.poldata.columns if 'epsilon' in col]
for column in delta_cols:
axarr[1].plot(wl, self.poldata[column], alpha=0.8)
print("MEAN Delta epsilon =", self.poldata[column].values.mean(),
"STDV =", self.poldata[column].values.std() )
axarr[1].set_ylabel(r"$\Delta \epsilon$", fontsize=16)
axarr[1].set_ylim([-6, 6])
plt.xlim([3500, 10000])
save_cond = input("do you want to save the plot?(Y/n): ")
if save_cond == "y" or save_cond == "Y" or save_cond == "":
plt.savefig(self.pol_file+ ".png")
print("Plot saved")
else:
print("Plot not saved")
plt.show()
def mk_flx_spctr(metadata = 'metadata', fileloc='.', flag = 'tar', output = None, header = True, front="1D_c_d"):
"""
Combines all the flux calibrated apertures to create the flux spectrum.
Notes
-----
Creates a text file with 3 columns columns: wavelength flux errors
"""
if isinstance(metadata, str):
metadataframe = pd.read_csv(metadata, sep='\t')
assert "Filename" in list(metadataframe), "Are you sure "+metadata+" is a (or location to a) Meta Data frame? "
fluxdata = | pd.DataFrame(columns=['wl', 'f', 'f_r'], dtype='float64') | pandas.DataFrame |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import datetime
import yfinance as yf
from pandas_datareader import data as pdr
from flask import current_app
from stk_predictor.extensions import db
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
def get_ticker_from_yahoo(ticker, start_date, end_date):
yf.pdr_override()
try:
new_trading_df = pdr.get_data_yahoo(
ticker, start_date, end_date, interval='1d')
new_trading_df = new_trading_df.drop(
['Open', 'High', 'Low', 'Adj Close'], axis=1)
new_trading_df = new_trading_df.dropna('index')
new_trading_df = new_trading_df.reset_index()
new_trading_df.columns = ['trading_date',
'intraday_close', 'intraday_volumes']
his_trading_df = pd.read_sql('aapl', db.engine, index_col='id')
df = pd.concat([his_trading_df, new_trading_df]
).drop_duplicates('trading_date')
df = df.sort_values(by='trading_date')
df = df.reset_index(drop=True)
if len(df) > 0:
df.to_sql("aapl", db.engine, if_exists='replace', index_label='id')
return df
else:
# t = pd.read_sql('aapl', db.engine, index_col='id')
return None
except Exception as ex:
raise RuntimeError(
"Catch Excetion when retrieve data from Yahoo...", ex)
return None
def get_news_from_finviz(ticker):
"""Request news headline from finviz, according to
company ticker's name
Parameters
-----------
ticker: str
the stock ticker name
Return
----------
df : pd.DataFrame
return the latest 2 days news healines.
"""
current_app.logger.info("Job >> Enter Finviz news scrape step...")
base_url = 'https://finviz.com/quote.ashx?t={}'.format(ticker)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36'
}
parsed_news = []
try:
res = requests.get(base_url, headers=headers)
if res.status_code == 200:
texts = res.text
soup = BeautifulSoup(texts)
news_tables = soup.find(id="news-table")
for x in news_tables.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
else:
date = date_scrape[0]
time = date_scrape[1]
parsed_news.append([date, time, text])
# filter the recent day news
df = pd.DataFrame(parsed_news, columns=['date', 'time', 'texts'])
df['date'] = pd.to_datetime(df.date).dt.date
one_day_period = (datetime.datetime.today() -
datetime.timedelta(days=1)).date()
df_sub = df[df.date >= one_day_period]
return df_sub
else:
raise RuntimeError("HTTP response Error {}".format(
res.status_code)) from None
except Exception as ex:
current_app.logger.info("Exception in scrape Finviz.", ex)
raise RuntimeError("Exception in scrape Finviz.") from ex
def prepare_trading_dataset(df):
"""Prepare the trading data set.
Time series analysis incoporate previous data for future prediction,
We need to retrieve historical data to generate features.
Parameters
-----------
df: DataFrame
the stock ticker trading data, including trading-date, close-price, volumes
window: int, default = 400
feature engineer windows size. Using at most 400 trading days to construct
features.
Return
----------
array_lstm : np.array
return the array with 3 dimensions shape -> [samples, 1, features]
"""
if len(df) == 0:
raise RuntimeError(
"Encounter Error in >>make_dataset.prepare_trading_dataset<<... \
Did not catch any news.") from None
else:
df['log_ret_1d'] = np.log(df['intraday_close'] / df['intraday_close'].shift(1))
df['log_ret_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).sum()
df['log_ret_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).sum()
df['log_ret_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).sum()
df['log_ret_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).sum()
df['log_ret_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).sum()
df['log_ret_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).sum()
df['log_ret_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).sum()
df['log_ret_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).sum()
df['log_ret_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).sum()
df['log_ret_28w'] = pd.Series(df['log_ret_1d']).rolling(window=140).sum()
df['log_ret_32w'] = pd.Series(df['log_ret_1d']).rolling(window=160).sum()
df['log_ret_36w'] = pd.Series(df['log_ret_1d']).rolling(window=180).sum()
df['log_ret_40w'] = pd.Series(df['log_ret_1d']).rolling(window=200).sum()
df['log_ret_44w'] = pd.Series(df['log_ret_1d']).rolling(window=220).sum()
df['log_ret_48w'] = pd.Series(df['log_ret_1d']).rolling(window=240).sum()
df['log_ret_52w'] = pd.Series(df['log_ret_1d']).rolling(window=260).sum()
df['log_ret_56w'] = pd.Series(df['log_ret_1d']).rolling(window=280).sum()
df['log_ret_60w'] = pd.Series(df['log_ret_1d']).rolling(window=300).sum()
df['log_ret_64w'] = pd.Series(df['log_ret_1d']).rolling(window=320).sum()
df['log_ret_68w'] = pd.Series(df['log_ret_1d']).rolling(window=340).sum()
df['log_ret_72w'] = pd.Series(df['log_ret_1d']).rolling(window=360).sum()
df['log_ret_76w'] = pd.Series(df['log_ret_1d']).rolling(window=380).sum()
df['log_ret_80w'] = pd.Series(df['log_ret_1d']).rolling(window=400).sum()
df['vol_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).std()*np.sqrt(5)
df['vol_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).std()*np.sqrt(10)
df['vol_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).std()*np.sqrt(15)
df['vol_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).std()*np.sqrt(20)
df['vol_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).std()*np.sqrt(40)
df['vol_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).std()*np.sqrt(60)
df['vol_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).std()*np.sqrt(80)
df['vol_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).std()*np.sqrt(100)
df['vol_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).std()*np.sqrt(120)
df['vol_28w'] = pd.Series(df['log_ret_1d']).rolling(window=140).std()*np.sqrt(140)
df['vol_32w'] = pd.Series(df['log_ret_1d']).rolling(window=160).std()*np.sqrt(160)
df['vol_36w'] = pd.Series(df['log_ret_1d']).rolling(window=180).std()*np.sqrt(180)
df['vol_40w'] = pd.Series(df['log_ret_1d']).rolling(window=200).std()*np.sqrt(200)
df['vol_44w'] = pd.Series(df['log_ret_1d']).rolling(window=220).std()*np.sqrt(220)
df['vol_48w'] = pd.Series(df['log_ret_1d']).rolling(window=240).std()*np.sqrt(240)
df['vol_52w'] = pd.Series(df['log_ret_1d']).rolling(window=260).std()*np.sqrt(260)
df['vol_56w'] = pd.Series(df['log_ret_1d']).rolling(window=280).std()*np.sqrt(280)
df['vol_60w'] = pd.Series(df['log_ret_1d']).rolling(window=300).std()*np.sqrt(300)
df['vol_64w'] = pd.Series(df['log_ret_1d']).rolling(window=320).std()*np.sqrt(320)
df['vol_68w'] = pd.Series(df['log_ret_1d']).rolling(window=340).std()*np.sqrt(340)
df['vol_72w'] = pd.Series(df['log_ret_1d']).rolling(window=360).std()*np.sqrt(360)
df['vol_76w'] = pd.Series(df['log_ret_1d']).rolling(window=380).std()*np.sqrt(380)
df['vol_80w'] = pd.Series(df['log_ret_1d']).rolling(window=400).std()*np.sqrt(400)
df['volume_1w'] = pd.Series(df['intraday_volumes']).rolling(window=5).mean()
df['volume_2w'] = pd.Series(df['intraday_volumes']).rolling(window=10).mean()
df['volume_3w'] = pd.Series(df['intraday_volumes']).rolling(window=15).mean()
df['volume_4w'] = pd.Series(df['intraday_volumes']).rolling(window=20).mean()
df['volume_8w'] = pd.Series(df['intraday_volumes']).rolling(window=40).mean()
df['volume_12w'] = pd.Series(df['intraday_volumes']).rolling(window=60).mean()
df['volume_16w'] = pd.Series(df['intraday_volumes']).rolling(window=80).mean()
df['volume_20w'] = pd.Series(df['intraday_volumes']).rolling(window=100).mean()
df['volume_24w'] = | pd.Series(df['intraday_volumes']) | pandas.Series |
import json
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from scipy.optimize import basinhopping
N_EPISODES = 10
TRAINING_SIZE = 0.5
VALIDATION_SIZE = 0.1
LEARNING_RATE = 0.01
#TD_LAMBDA = 0.9
DISCOUNT = 0.95
class Environment:
def __init__(self, data, initial_btc = 1.0):
self.data = data
self.btc = initial_btc
self.eth = 0
self.time_step = 0
def done(self):
return self.time_step == len(self.data) - 1
def agent_state(self):
state = self.data.iloc[[self.time_step]].squeeze()
state['btc'] = self.btc
state['eth'] = self.eth
return state
def score(self):
state = self.agent_state()
return state['btc'] + state['eth'] * state['close']
def step(self, action):
self.time_step += 1
state = self.agent_state()
reward = 0
next_btc = self.btc
next_eth = self.eth
#print(state)
#print(action)
if action['btc_amt'] > 0 and action['btc_amt'] <= state['btc'] and action['btc_price'] >= state['low']:
amt = action['btc_amt']
price = action['btc_price']
next_btc -= amt
next_eth += amt / price
reward -= amt
if action['eth_amt'] > 0 and action['eth_amt'] <= state['eth'] and action['eth_price'] <= state['high'] and action['eth_price'] >= 0:
amt = action['eth_amt']
price = action['eth_price']
next_btc += amt * price
next_eth -= amt
reward += amt * price
self.btc = next_btc
self.eth = next_eth
next_state = self.agent_state()
#print(next_state)
#print(reward)
return next_state, reward
state_vector = ['btc', 'eth', 'high', 'low', 'open', 'close', 'volume', 'weightedAverage']
action_vector = ['btc_amt', 'btc_price', 'eth_amt', 'eth_price']
class History:
def __init__(self, max_size):
self.max_size = max_size
self.idx = 0
self.queue = np.zeros((max_size, 2), dtype = object)
def size(self):
return min(self.idx, self.max_size)
def sample(self, size):
size = min(size, self.size())
sample_indices = np.random.choice(range(self.size()), replace = True)
x = np.atleast_2d(self.queue[sample_indices, 0])
y = np.atleast_1d(self.queue[sample_indices, 1])
return x, y
def append(self, sample, target):
idx = self.idx % self.max_size
self.queue[idx, 0] = sample
self.queue[idx, 1] = target
self.idx += 1
class QEstimator:
def __init__(self, n_hidden_layers = 2, batch_size = 32):
self.train_step = 0
self.batch_size = batch_size
self.n_hidden_layers = n_hidden_layers
self.history = History(self.batch_size * self.batch_size)
self.model = self._build_model()
self.frozen_model = self._build_model()
self._update_model()
def _update_model(self):
for i in range(len(self.model.layers)):
weights = self.model.layers[i].get_weights()
self.frozen_model.layers[i].set_weights(weights)
def _build_model(self):
model = Sequential()
dim = len(state_vector) + len(action_vector)
model.add(Dense(dim, input_dim = len(state_vector) + len(action_vector), init = 'he_normal'))
model.add(BatchNormalization(axis = 1))
model.add(Activation('relu'))
for i in range(1, self.n_hidden_layers):
model.add(Dense(model.output_shape[1], init = 'he_normal'))
if i == self.n_hidden_layers - 1:
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(1, init = 'he_normal'))
model.add(Activation('linear'))
model.compile(loss = 'mean_squared_error', optimizer = SGD(lr = LEARNING_RATE, momentum = 0.9, nesterov = True))
return model
def _get_input_vector(self, state, action):
x = []
for s in state_vector:
x.append(state[s])
for a in action_vector:
x.append(action[a])
return np.array(x)
def train(self, state, action, target):
sample = self._get_input_vector(state, action)
self.history.append(sample, target)
x, y = self.history.sample(self.batch_size)
self.model.train_on_batch(x, y)
self.train_step += 1
if self.train_step % (self.batch_size * self.batch_size) == 0:
self._update_model()
def predict(self, state, action):
x = self._get_input_vector(state, action)
return self.frozen_model.predict(np.atleast_2d(x), batch_size = 1)
def best_action(self, state):
def objective(action_values):
action = {}
for i, a in enumerate(action_vector):
action[a] = action_values[i]
value = np.asscalar(self.predict(state, action))
return value
start_action_values = np.random.rand(len(action_vector))
res = basinhopping(objective, start_action_values, niter = 25, niter_success = 5)
action = {}
for i, a in enumerate(action_vector):
action[a] = res.x[i]
value = -res.fun
return action, value
def random_action(self, state):
lo_price = max(0, 2 * state['low'] - state['weightedAverage'])
hi_price = 2 * state['high'] - state['weightedAverage']
action = {
'btc_amt': np.random.rand() * state['btc'],
'btc_price': np.random.rand() * (hi_price - lo_price) + lo_price,
'eth_amt': np.random.rand() * state['eth'],
'eth_price': np.random.rand() * (hi_price - lo_price) + lo_price
}
return action, self.predict(state, action)
class Agent:
def __init__(self, q_estimator, discount = 0.95, epsilon = 0.5):
self.Q = q_estimator
self.discount = discount
self.epsilon = epsilon
def policy(self, state):
return self.Q.best_action(state)
def epsilon_policy(self, state):
if np.random.rand() < self.epsilon:
return self.Q.random_action(state)
return self.policy(state)
def train(self, env):
state = env.agent_state()
action, value = self.epsilon_policy(state)
next_state, reward = env.step(action)
next_action, next_value = self.policy(next_state)
target = reward + self.discount * next_value
self.Q.train(state, action, target)
def act(self, env):
state = env.agent_state()
action, value = self.policy(state)
env.step(action)
def main():
np.random.seed(0)
data = []
with open('data/btc_eth.json') as data_file:
data = json.load(data_file)
n_samples = len(data)
n_train_samples = int(TRAINING_SIZE * n_samples)
n_validation_samples = int(VALIDATION_SIZE * n_samples)
n_test_samples = n_samples - n_train_samples - n_validation_samples
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
from functools import reduce
from scipy.stats.stats import pearsonr
from matplotlib import pyplot as plt
data_path=r'./SWI closing price.xlsx'
#columns_list=['801040.SWI','801180.SWI','801710.SWI']
data=pd.read_excel(data_path)
columns_list=list(data.head(0))[1:]
data_question1=data[columns_list]
for industry in list(data_question1.head(0)):
data_question1[industry+'_Lag1'] = data_question1[industry].shift(periods=-1,axis=0)
data_question1[industry+'_rate'] = data_question1[industry]/data_question1[industry+'_Lag1']
data_question1[industry+'_lograte'] = np.log(data_question1[industry+'_rate'])
data_question1.dropna(inplace=True)
data_question1_rate=data_question1[[x+'_rate' for x in columns_list]]
out=[]
columns_list_rate=[x+'_lograte' for x in list(data.head(0))[1:]]
for pair_one in columns_list_rate:
for pair_two in columns_list_rate:
pair_one_l = list(data_question1[pair_one])
pair_two_l = list(data_question1[pair_two])
start_one = 0
for i in range(10):
start_two =0
for j in range(10):
if start_one < start_two:
sli_one = pair_one_l[start_one: start_one+300]
sli_two=pair_two_l[start_two: start_two+300]
corr = pearsonr(sli_one,sli_two)[0]
if corr >0.1 and corr <1.0:
out.append([pair_one,pair_two,start_one,start_two,corr])
start_two+=30
start_one+=30
autocorr = [item for item in out if item[0]==item[1]]
cross = [item for item in out if item[0]!=item[1]]
data_score = pd.DataFrame()
data_score[columns_list] = data_question1[[x+'_lograte' for x in columns_list]]
for field in columns_list:
data_score[field+'_score'] = 0
data_score.dropna(inplace=True)
for i in range(len(cross)):
field1 = cross[i][0][:-8]
field2 = cross[i][1][:-8]
lag1 = cross[i][2]
lag2 = cross[i][3]
coef = cross[i][4]
for t in range(1,301):
if data_score.loc[t+lag1,field1] > 0:
data_score.loc[t+lag2,field2+'_score'] += coef
elif data_score.loc[t+lag1,field1] < 0:
data_score.loc[t+lag2,field2+'_score'] -= coef
score_list=[x+'_score' for x in columns_list]
data_score_n=data_score[score_list]
def Score_rank(t,score_list,data_score_n,data_score):
total=1
for i in range(len(score_list)):
total+=math.exp(np.array(data_score_n[score_list])[t][i])
weight = [math.exp(np.array(data_score_n[score_list])[t][i])/total for i in range(len(score_list))]
weight = weight+[1/total]
value_rate = np.dot(np.array(data_question1_rate)[t],np.array(weight)[:-1])+np.array(weight)[-1]
return weight,value_rate
value_total=[1]
weight_total=[[0]*27+[1]]
for t in range(len(np.array(data_score))):
weight_total.append(Score_rank(t,score_list,data_score_n,data_score)[0])
value_total.append(Score_rank(t,score_list,data_score_n,data_score)[1])
value_day=[1]
for i in range(1,len(value_total)):
value_day.append(reduce(lambda x,y:x*y, value_total[:i]))
plt.plot(value_day[:500])
Annual_rate=(value_day[500])**(250/500)-1
def max_withdrawal(data):
mw=(( | pd.DataFrame(data) | pandas.DataFrame |
import logging
import os
from pathlib import Path
import click
import pandas as pd
from scipy import stats
from tqdm import tqdm
logging.basicConfig(level=logging.INFO)
CORRECT_NER_ENTAILS = "Entails"
CORRECT_NER_NOT_ENTAILS = "Not Entails/Error"
CORRECT_NER_VALS = [CORRECT_NER_ENTAILS, CORRECT_NER_NOT_ENTAILS]
AGG_BY_SAFE_FIRST = [
"pmid",
"input",
"premise",
"gold_label",
"hypothesis",
"proba_entails",
"hypothesis_food",
"proba_ambiguity",
"proba_not_entails",
"hypothesis_chemical",
"foodb_food_name",
"foodb_food_id",
"foodb_chemical_name",
"foodb_chemical_id",
]
def calculate_annotator_agreement_matrix(df: pd.DataFrame):
pass
# TODO
def first(x):
# return x.values[0]
return None
def safe_first(x):
if not (len(x.unique()) == 1 or x.isna().all()):
print(x)
raise ValueError
return x.values[0]
def match_food_by_name(hypothesis_food: str, foods: pd.DataFrame):
food_match = foods[
foods.name.str.lower() == hypothesis_food.lower()
] # try exact match by lowercase name
if len(food_match) > 0:
food_match = food_match.iloc[0, :]
return food_match["name"], food_match.id
return None, None
def match_food_by_taxid(hypothesis_food_id: str, foods: pd.DataFrame):
if hypothesis_food_id is not None and str(hypothesis_food_id) != "nan":
food_match = foods[
foods.ncbi_taxonomy_id.astype(str) == str(int(hypothesis_food_id))
]
if len(food_match) > 0:
food_match = food_match.iloc[0, :]
return food_match["name"], food_match.id
return None, None
def match_food(row, foods):
hypothesis_food = row["hypothesis_food"]
hypothesis_food_id = row["hypothesis_food_id"]
# we prefer id-based matches first
food_name, food_id = match_food_by_taxid(hypothesis_food_id, foods)
if food_name is not None:
return food_name, food_id, "id"
else: # if we can't match by id, match by name
food_name, food_id = match_food_by_name(hypothesis_food, foods)
if food_name is not None:
return food_name, food_id, "name"
return None, None, None
def find_compound_synonym(hypothesis_chemical, compound_synonyms):
synonym_match = compound_synonyms[
compound_synonyms.synonym.str.lower() == hypothesis_chemical.lower()
]
if len(synonym_match) > 0:
synonym_match = synonym_match.iloc[0, :]
return (
synonym_match.synonym,
synonym_match.source_id,
synonym_match.source_type,
)
else:
return None, None, None
def match_chemical(
row, compounds, nutrients, compound_synonyms, synonym_matching: bool
):
hypothesis_chemical = row["hypothesis_chemical"]
def _match_chemical_inner(hypothesis_chemical):
chemical_match = compounds[
compounds.name.str.lower() == hypothesis_chemical.lower()
] # exact match compound name
if len(chemical_match) > 0:
chemical_match = chemical_match.iloc[0, :]
return (
chemical_match["name"],
chemical_match.id,
"name",
"compound",
)
return None, None, None, None
def match_nutrient(hypothesis_chemical):
chemical_match = nutrients[
nutrients.name.str.lower() == hypothesis_chemical.lower()
] # exact match compound name
if len(chemical_match) > 0:
chemical_match = chemical_match.iloc[0, :]
return (
chemical_match["name"],
chemical_match.id,
"name",
"nutrient",
)
return None, None, None, None
res = _match_chemical_inner(hypothesis_chemical)
if res[0] is not None:
return res
else: # if that doesn't work, exact match nutrients
res = match_nutrient(hypothesis_chemical)
if res[0] is not None:
return res
elif synonym_matching: # if that doesn't work, try matching by synonym
syn_name, syn_source_id, syn_source_type = find_compound_synonym(
hypothesis_chemical, compound_synonyms
)
chemical_match = []
if syn_source_type == "Compound":
chemical_match = compounds[compounds.id == syn_source_id]
elif syn_source_type == "Nutrient":
chemical_match = nutrients[nutrients.id == syn_source_id]
if len(chemical_match) > 0:
chemical_match = chemical_match.iloc[0, :]
return (
chemical_match["name"],
chemical_match.id,
"synonym",
syn_source_type.lower(),
)
return None, None, None, None
def match_row(
row, foods, compounds, nutrients, compound_synonyms, synonym_matching: bool
):
food_match_name, food_match_id, food_match_type = match_food(row, foods)
(
chemical_match_name,
chemical_match_id,
chemical_match_type,
chemical_foodb_compound_status,
) = match_chemical(
row, compounds, nutrients, compound_synonyms, synonym_matching
)
return (
food_match_name,
food_match_id,
food_match_type,
chemical_match_name,
chemical_match_id,
chemical_match_type,
chemical_foodb_compound_status,
)
def get_citation_info(content_row):
if len(content_row) > 0:
if len(content_row) > 1:
return (
content_row.citation.unique(),
content_row.citation_type.unique(),
)
else:
return content_row.citation, content_row.citation_type
return None, None
def match_df(
data,
foods,
compounds,
nutrients,
compound_synonyms,
contents,
synonym_matching: bool,
):
newrows = []
for idx, row in tqdm(data.iterrows(), total=len(data)):
(
food_match_name,
food_match_id,
food_match_type,
chemical_match_name,
chemical_match_id,
chemical_match_type,
chemical_foodb_compound_status,
) = match_row(
row,
foods,
compounds,
nutrients,
compound_synonyms,
synonym_matching,
)
newrow = row.copy()
(
newrow["foodb_food_name"],
newrow["foodb_food_id"],
newrow["foodb_food_match_type"],
newrow["foodb_chemical_name"],
newrow["foodb_chemical_id"],
newrow["foodb_chemical_match_type"],
newrow["foodb_compound_status"],
) = (
food_match_name,
food_match_id,
food_match_type,
chemical_match_name,
chemical_match_id,
chemical_match_type,
chemical_foodb_compound_status,
)
if food_match_id is not None and chemical_match_id is not None:
content_row = contents[
(contents.food_id == food_match_id)
& (contents.source_id == chemical_match_id)
]
(
newrow["foodb_citation"],
newrow["foodb_citation_type"],
) = get_citation_info(content_row)
newrows.append(newrow)
results = | pd.DataFrame(newrows) | pandas.DataFrame |
import numpy as np
import pandas as pd
# 1. load dataset
ratings = pd.read_csv('chapter02/data/movie_rating.csv')
movie_ratings = pd.pivot_table(
ratings,
values='rating',
index='title',
columns='critic'
)
# 2. calculate similarity
def calcualte_norm(u):
norm_u = 0.0
for ui in u:
if np.isnan(ui):
continue
norm_u += (ui ** 2)
return np.sqrt(norm_u)
def calculate_cosine_similarity(u, v):
norm_u = calcualte_norm(u)
norm_v = calcualte_norm(v)
denominator = norm_u * norm_v
numerator = 0.0
for ui, vi in zip(u, v):
if np.isnan(ui) or np.isnan(vi):
continue
numerator += (ui * vi)
similarity = numerator / denominator
return similarity
titles = movie_ratings.index
sim_items = pd.DataFrame(0, columns=titles, index=titles, dtype=float)
for src in titles:
for dst in titles:
src_vec = movie_ratings.loc[src, :].values
dst_vec = movie_ratings.loc[dst, :].values
similarity = calculate_cosine_similarity(src_vec, dst_vec)
sim_items.loc[src, dst] = similarity
print(sim_items)
# 3. Make Prediction & Recommendation
user_id = 5
ratings_critic = movie_ratings.loc[:, [movie_ratings.columns[user_id]]]
ratings_critic.columns = ['rating']
titles_na_critic = ratings_critic[ | pd.isna(ratings_critic.rating) | pandas.isna |
import os
import warnings
import itertools
import pandas
import time
class SlurmJobArray():
""" Selects a single condition from an array of parameters using the SLURM_ARRAY_TASK_ID environment variable.
The parameters need to be supplied as a dictionary. if the task is not in a slurm environment,
the test parameters will supersede the parameters, and the job_id would be taken as 0. Example:
parameters={"epsilon":[100],
"aligned":[True,False],
"actinLen":[20,40,60,80,100,120,140,160,180,200,220,240,260,280,300],
"repetition":range(5),
"temperature":[300],
"system2D":[False],
"simulation_platform":["OpenCL"]}
test_parameters={"simulation_platform":"CPU"}
sjob=SlurmJobArray("ActinSimv6", parameters, test_parameters)
:var test_run: Boolean: This simulation is a test
:var job_id: SLURM_ARRAY_TASK_ID
:var all_parameters: Parameters used to initialize the job
:var parameters: Parameters for this particular job
:var name: The name (and relative path) of the output
"""
def __init__(self, name, parameters, test_parameters={}, test_id=0):
self.all_parameters = parameters
self.test_parameters = test_parameters
# Parse the slurm variables
self.slurm_variables = {}
for key in os.environ:
if len(key.split("_")) > 1 and key.split("_")[0] == 'SLURM':
self.slurm_variables.update({key: os.environ[key]})
# Check if there is a job id
self.test_run = False
try:
self.job_id = int(self.slurm_variables["SLURM_ARRAY_TASK_ID"])
except KeyError:
self.test_run = True
warnings.warn("Test Run: SLURM_ARRAY_TASK_ID not in environment variables")
self.job_id = test_id
keys = parameters.keys()
self.all_conditions = list(itertools.product(*[parameters[k] for k in keys]))
self.parameter = dict(zip(keys, self.all_conditions[self.job_id]))
# The name only includes enough information to differentiate the simulations.
self.name = f"{name}_{self.job_id:03d}_" + '_'.join(
[f"{a[0]}_{self[a]}" for a in self.parameter if len(self.all_parameters[a]) > 1])
def __getitem__(self, name):
if self.test_run:
try:
return self.test_parameters[name]
except KeyError:
return self.parameter[name]
else:
return self.parameter[name]
def __getattr__(self, name: str):
""" The keys of the parameters can be called as attributes
"""
if name in self.__dict__:
return object.__getattribute__(self, name)
elif name in self.parameter:
return self[name]
else:
return object.__getattribute__(self, name)
def __repr__(self):
return str(self.parameter)
def keys(self):
return str(self.parameters.keys())
def print_parameters(self):
print(f"Number of conditions: {len(self.all_conditions)}")
print("Running Conditions")
for k in self.parameter.keys():
print(f"{k} :", f"{self[k]}")
print()
def print_slurm_variables(self):
print("Slurm Variables")
for key in self.slurm_variables:
print(key, ":", self.slurm_variables[key])
print()
def write_csv(self, out=""):
s = pandas.concat([pandas.Series(self.parameter), | pandas.Series(self.slurm_variables) | pandas.Series |
### import used modules first
from TPM.localization import select_folder
from glob import glob
import random
import string
import numpy as np
import os
import datetime
import pandas as pd
import scipy.linalg as la
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from numpy import unique
from numpy import where
from sklearn.datasets import make_classification
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score
### get analyzed sheet names
## path_dat:list of path; sheet_names:list of string, axis=0(add vertically)
def get_df_dict(path_data, sheet_names, axis):
df_dict = dict()
for i, path in enumerate(path_data):
for sheet_name in sheet_names:
if i==0: ## initiate df_dict
df = pd.read_excel(path, sheet_name=sheet_name)
df_dict[f'{sheet_name}'] = df
else: ## append df_dict
df = | pd.read_excel(path, sheet_name=sheet_name) | pandas.read_excel |
# -*- coding: utf-8 -*-
import numpy as np
from numpy.linalg import cholesky
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import tensorflow as tf
from random import choice, shuffle
from numpy import array
############<NAME>的基于tensorflow写的一个kmeans模板###############
def KMeansCluster(vectors, noofclusters):
"""
K-Means Clustering using TensorFlow.
`vertors`应该是一个n*k的二维的NumPy的数组,其中n代表着K维向量的数目
'noofclusters' 代表了待分的集群的数目,是一个整型值
"""
noofclusters = int(noofclusters)
assert noofclusters < len(vectors)
#找出每个向量的维度
dim = len(vectors[0])
#辅助随机地从可得的向量中选取中心点
vector_indices = list(range(len(vectors)))
shuffle(vector_indices)
#计算图
#我们创建了一个默认的计算流的图用于整个算法中,这样就保证了当函数被多次调用
#时,默认的图并不会被从上一次调用时留下的未使用的OPS或者Variables挤满
graph = tf.Graph()
with graph.as_default():
#计算的会话
sess = tf.Session()
##构建基本的计算的元素
##首先我们需要保证每个中心点都会存在一个Variable矩阵
##从现有的点集合中抽取出一部分作为默认的中心点
centroids = [tf.Variable((vectors[vector_indices[i]]))
for i in range(noofclusters)]
##创建一个placeholder用于存放各个中心点可能的分类的情况
centroid_value = tf.placeholder("float64", [dim])
cent_assigns = []
for centroid in centroids:
cent_assigns.append(tf.assign(centroid, centroid_value))
##对于每个独立向量的分属的类别设置为默认值0
assignments = [tf.Variable(0) for i in range(len(vectors))]
##这些节点在后续的操作中会被分配到合适的值
assignment_value = tf.placeholder("int32")
cluster_assigns = []
for assignment in assignments:
cluster_assigns.append(tf.assign(assignment,
assignment_value))
##下面创建用于计算平均值的操作节点
#输入的placeholder
mean_input = tf.placeholder("float", [None, dim])
#节点/OP接受输入,并且计算0维度的平均值,譬如输入的向量列表
mean_op = tf.reduce_mean(mean_input, 0)
##用于计算欧几里得距离的节点
v1 = tf.placeholder("float", [dim])
v2 = tf.placeholder("float", [dim])
euclid_dist = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(
v1, v2), 2)))
##这个OP会决定应该将向量归属到哪个节点
##基于向量到中心点的欧几里得距离
#Placeholder for input
centroid_distances = tf.placeholder("float", [noofclusters])
cluster_assignment = tf.argmin(centroid_distances, 0)
##初始化所有的状态值
##这会帮助初始化图中定义的所有Variables。Variable-initializer应该定
##义在所有的Variables被构造之后,这样所有的Variables才会被纳入初始化
init_op = tf.global_variables_initializer()
#初始化所有的变量
sess.run(init_op)
##集群遍历
#接下来在K-Means聚类迭代中使用最大期望算法。为了简单起见,只让它执行固
#定的次数,而不设置一个终止条件
noofiterations = 20
for iteration_n in range(noofiterations):
##期望步骤
##基于上次迭代后算出的中心点的未知
##the _expected_ centroid assignments.
#首先遍历所有的向量
for vector_n in range(len(vectors)):
vect = vectors[vector_n]
#计算给定向量与分配的中心节点之间的欧几里得距离
distances = [sess.run(euclid_dist, feed_dict={
v1: vect, v2: sess.run(centroid)})
for centroid in centroids]
#下面可以使用集群分配操作,将上述的距离当做输入
assignment = sess.run(cluster_assignment, feed_dict = {
centroid_distances: distances})
#接下来为每个向量分配合适的值
sess.run(cluster_assigns[vector_n], feed_dict={
assignment_value: assignment})
##最大化的步骤
#基于上述的期望步骤,计算每个新的中心点的距离从而使集群内的平方和最小
for cluster_n in range(noofclusters):
#收集所有分配给该集群的向量
assigned_vects = [vectors[i] for i in range(len(vectors))
if sess.run(assignments[i]) == cluster_n]
#计算新的集群中心点
new_location = sess.run(mean_op, feed_dict={
mean_input: array(assigned_vects)})
#为每个向量分配合适的中心点
sess.run(cent_assigns[cluster_n], feed_dict={
centroid_value: new_location})
#返回中心节点和分组
centroids = sess.run(centroids)
assignments = sess.run(assignments)
return centroids, assignments
############生成测试数据###############
sampleNo = 100 #数据数量
mu =3
# 二维正态分布
mu = np.array([[1, 5]])
Sigma = np.array([[1, 0.5], [1.5, 3]])
R = cholesky(Sigma)
srcdata= np.dot(np.random.randn(sampleNo, 2), R) + mu
plt.plot(srcdata[:,0],srcdata[:,1],'bo')
############kmeans算法计算###############
k=4
center,result=KMeansCluster(srcdata,k)
print(center)
############利用seaborn画图###############
res={"x":[],"y":[],"kmeans_res":[]}
for i in range(len(result)):
res["x"].append(srcdata[i][0])
res["y"].append(srcdata[i][1])
res["kmeans_res"].append(result[i])
pd_res= | pd.DataFrame(res) | pandas.DataFrame |
"""
Copyright (c) 2020 <NAME>
This software is released under the MIT License.
https://opensource.org/licenses/MIT
"""
import pandas as pd
import json
import os
class Fhirndjson(object):
def __init__(self):
self._df = | pd.DataFrame(columns=[]) | pandas.DataFrame |
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = | concat([s1, df, s2], axis=1) | pandas.concat |
import dotenv
import os
from pyairtable import Table
import pandas as pd
def load_airtable(key, base_id, table_name):
at = Table(key, base_id, table_name)
return at
def get_info(airtable_tab):
yt_links, emails = [], []
for record in range(len(airtable_tab.all())):
talk = airtable_tab.all()[record]
yt_link = talk.get('fields').get('youtube_url')
email = talk.get('fields').get('email')
yt_links.append(yt_link)
emails.append(email)
return yt_links, emails
def save_to_df(yt_links, emails):
df = pd.DataFrame({'emails': emails,
'youtube_url': yt_links})
# people can submit multiple times and with different videos - keep the
# last one only
df.drop_duplicates(keep='last', inplace=True)
df.to_csv('videos/data.csv', index=False)
def update_df(yt_links, emails):
file_path = 'videos/data.csv'
df = pd.read_csv(file_path)
df_new = pd.DataFrame({'emails': emails,
'youtube_url': yt_links})
df_new.drop_duplicates(keep='last', inplace=True)
df = | pd.merge(df, df_new, how='right') | pandas.merge |
"""
A simple library of functions that provide scikit-learn-esque
feature engineering and pre-processing tools.
MIT License
<NAME>, https://www.linkedin.com/in/tjpell
Target encoding inspired by the following Kaggle kernel:
https://www.kaggle.com/tnarik/likelihood-encoding-of-categorical-features
"""
import numpy as np
import pandas as pd
from collections import defaultdict
from sklearn.model_selection import KFold
__all__ = ["TargetEncoder"]
class TargetEncoder:
"""
Encode validation data with a preset "unknown value" for observations that were not
in the test set.
Attributes
----------
method : which statistical method to apply to target variable
data : categorical variable used as a predictor
values_dict :
k : what order of cross validation to use for regularization
fill_na : whether or not to fill NA's with global method (such as mean)
"""
def __init__(self):
self.method = np.mean()
self.data = None
self.values_dict = None
self.k = None
self.fill_na = True
self.fill_val = None
def fit(self, x, y):
"""
Fit target encoder. Builds dictionary mapping {category: list(target values)}
Parameters
----------
:param x: array-like of shape (n_samples,)
Categorical predictor values.
:param y: array-like of shape (n_samples,)
Target values.
:param method : which statistical method to apply to input data.
Recommended uses include np.mean, max, variance
k : which order of regularization to apply.
fill_na : whether or not to apply global method (such as mean)
when filling in NA values
Returns
-------
self : returns an instance of self.
"""
if len(x) != len(y):
print("Size mismatch")
# put all values associated with each variable in a list
values_dict = defaultdict(list)
[values_dict[key].append(val) for key, val in zip(x,y)]
self.values_dict = values_dict
self.data = y
return self
def transform(self, x, method=np.mean(), k=None, fill_na=True):
"""
Replace categorical data with target encoded version
Parameters
----------
:param x: array-like of shape (n_samples,)
Categorical predictor values.
:param method: which statistical method to apply to input data.
Recommended uses include np.mean, max, variance
:param k: which order of regularization to apply.
:param fill_na: whether or not to apply global method (such as mean)
when filling in NA values
Returns
-------
:return: an instance of self
"""
self.method = method
self.k = k
self.fill_na = fill_na
if fill_na:
self.fill_val = self.method(self.data)
return self.regularized_encode(x) if k else self.encode(x)
def encode(self, x):
"""
Apply standard target encoding, using the stored method.
Best used on new data. For Training data, use fit_transform.
:param x: Data that we are going to encode
:return: Transformed data
"""
apply_dict = dict()
applied_method = [apply_dict.update({key, self.method(self.values_dict[key])})
for key in self.values_dict.keys()]
return [applied_method[val] if applied_method.get(val) else self.fill_val for val in x]
def regularized_encode(self, x):
"""
Not yet implemented
:param x: Data to be transformed
:return: Message
"""
# Todo: prepare the regularized encoding functionality, or verify that it doesn't make sense
print("Not yet implemented, for now, please use fit_transform")
def fit_transform(self, x, y, method="mean", k=5, fill_na=True):
"""
For a given pair of categorical and response data, create a column of target encoded data. Default behavior
expects that we are going to fit transform training data, with k fold regularization. For test data, it is
recommended that you fit on training data and transform the new data.
:param x: Categorical variable to apply the encoding for
:param y: Target variable, either label encoded categories or continuous values
:param method: Numpy method that we wish to apply to target data
:param k: Order of k-fold cross validation that we wish to apply
:param fill_na: Whether or not to fill Nones in x with the result of applying method to all target data
:return:
"""
if k > 1:
df = | pd.DataFrame([x, y], columns=["x", "y"]) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.