prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
This builds a script to convert raw madis sourced csv into min and max temperature.
Version 1.0
"""
import sys
import pytz
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from os.path import exists
#DEFINE CONSTANTS--------------------------------------------------------------
SOURCE = 'madis'
SRC_VARNAME = 'temperature'
SRC_KEY = 'stationId'
SRC_TIME = 'time'
SRC_VARKEY = 'varname'
MASTER_KEY = 'NWS.id'
INT_EXCEPT = {'E3941':144.,'F4600':96.}
MASTER_LINK = r'https://raw.githubusercontent.com/ikewai/hawaii_wx_station_mgmt_container/main/Hawaii_Master_Station_Meta.csv'
MASTER_DIR = r'/home/hawaii_climate_products_container/preliminary/'
SOURCE_DIR = MASTER_DIR + r'data_aqs/data_outputs/madis/parse/'
PROC_OUTPUT_DIR = MASTER_DIR + r'air_temp/working_data/processed_data/' + SOURCE + r'/'
TRACK_DIR = MASTER_DIR + r'air_temp/data_outputs/tables/air_temp_station_tracking/'
#END CONSTANTS-----------------------------------------------------------------
#DEFINE FUNCTIONS--------------------------------------------------------------
def get_max_counts(temp_df,uni_stns):
max_counts = {}
for stn in uni_stns:
if stn in INT_EXCEPT.keys():
max_count = INT_EXCEPT[stn]
else:
stn_df = temp_df[temp_df[SRC_KEY]==stn].drop_duplicates(subset=['time']).sort_values(by=SRC_TIME)
stn_times = pd.to_datetime(stn_df[SRC_TIME])
stn_ints = (stn_times.round('min').diff().dropna().dt.total_seconds()/60).values
if len(stn_ints) < 1:
continue
vals, counts = np.unique(stn_ints,return_counts=True)
mode_id = np.argmax(counts)
mode_val = vals[mode_id]
max_count = (24*60)/mode_val
max_counts[stn] = max_count
return max_counts
def get_tmin_tmax(temp_df):
uni_stns = temp_df[SRC_KEY].unique()
max_counts = get_max_counts(temp_df,uni_stns)
temp_data = []
for stn in uni_stns:
stn_df = temp_df[temp_df[SRC_KEY]==stn].sort_values(by=SRC_TIME)
st_date = stn_df[SRC_TIME].values[0]
date_str = pd.to_datetime(st_date).strftime('X%Y.%m.%d')
if stn in max_counts.keys():
stn_max = max_counts[stn]
stn_counts = stn_df[~stn_df['value'].isna()].drop_duplicates(subset=[SRC_TIME]).shape[0]
valid_pct = stn_counts/stn_max
tmin = stn_df[~stn_df['value'].isna()]['value'].min()
tmax = stn_df[~stn_df['value'].isna()]['value'].max()
if tmin<tmax:
temp_data.append([stn,'Tmin',date_str,tmin,valid_pct])
temp_data.append([stn,'Tmax',date_str,tmax,valid_pct])
min_max_df = pd.DataFrame(temp_data,columns=[MASTER_KEY,'var','date','value','percent_valid'])
return min_max_df
def convert_dataframe(long_df,varname):
var_df = long_df[long_df['var']==varname]
valid_df = var_df[var_df['percent_valid']>=0.95]
wide_df = pd.DataFrame(index=valid_df[MASTER_KEY].values)
for stn in wide_df.index.values:
stn_temp = valid_df[valid_df[MASTER_KEY]==stn].set_index('date')[['value']]
wide_df.loc[stn,stn_temp.index.values] = stn_temp['value']
wide_df.index.name = MASTER_KEY
wide_df = wide_df.reset_index()
return wide_df
""" def update_csv(csv_name,new_data_df):
master_df = pd.read_csv(MASTER_LINK)
merged_new_df = master_df.merge(new_data_df,on=MASTER_KEY,how='inner')
merged_new_df = merged_new_df.set_index('SKN')
meta_cols = list(master_df.columns)
if exists(csv_name):
old_df = pd.read_csv(csv_name)
old_df = old_df.set_index('SKN')
updated_df = old_df.merge(merged_new_df,on=meta_cols,how='outer')
updated_df = updated_df.fillna('NA')
updated_df = updated_df.reset_index()
updated_df.to_csv(csv_name,index=False)
else:
merged_new_df = merged_new_df.fillna('NA')
merged_new_df = merged_new_df.reset_index()
merged_new_df.to_csv(csv_name,index=False) """
def update_csv(csv_name,new_data_df):
master_df = pd.read_csv(MASTER_LINK)
prev_ids = new_data_df[MASTER_KEY].values
merged_new_df = master_df.merge(new_data_df,on=MASTER_KEY,how='inner')
merged_new_df = merged_new_df.set_index('SKN')
merged_ids = merged_new_df[MASTER_KEY].values
unkn_ids = np.setdiff1d(prev_ids,merged_ids)
master_df = master_df.set_index('SKN')
meta_cols = list(master_df.columns)
if exists(csv_name):
old_df = pd.read_csv(csv_name)
old_df = old_df.set_index('SKN')
old_cols = list(old_df.columns)
old_inds = old_df.index.values
upd_inds = np.union1d(old_inds,merged_new_df.index.values)
updated_df = pd.DataFrame(index=upd_inds)
updated_df.index.name = 'SKN'
updated_df.loc[old_inds,old_cols] = old_df
updated_df.loc[merged_new_df.index.values,merged_new_df.columns] = merged_new_df
updated_df = sort_dates(updated_df,meta_cols)
updated_df = updated_df.fillna('NA')
updated_df = updated_df.reset_index()
updated_df.to_csv(csv_name,index=False)
else:
merged_new_df = merged_new_df.fillna('NA')
merged_new_df = merged_new_df.reset_index()
merged_new_df.to_csv(csv_name,index=False)
return unkn_ids
def sort_dates(df,meta_cols):
non_meta_cols = [col for col in list(df.columns) if col not in meta_cols]
date_keys_sorted = sorted(pd.to_datetime([dt.split('X')[1] for dt in non_meta_cols]))
date_cols_sorted = [dt.strftime('X%Y.%m.%d') for dt in date_keys_sorted]
sorted_cols = meta_cols + date_cols_sorted
sorted_df = df[sorted_cols]
return sorted_df
def update_unknown(unknown_file,unknown_ids,date_str):
if exists(unknown_file):
prev_df = pd.read_csv(unknown_file)
preex_ids = np.intersect1d(unknown_ids,list(prev_df['sourceID'].values))
new_ids = np.setdiff1d(unknown_ids,list(prev_df['sourceID'].values))
prev_df = prev_df.set_index('sourceID')
prev_df.loc[preex_ids,'lastDate'] = date_str
prev_df = prev_df.reset_index()
data_table = [[new_ids[i],SOURCE,date_str] for i in range(len(new_ids))]
unknown_df = | pd.DataFrame(data_table,columns=['sourceID','datastream','lastDate']) | pandas.DataFrame |
# %% [Algorithm 1c Loop]
# # MUSHROOMS
# %% [markdown]
# ## Binary Classification
# %% [markdown]
# ### Imports
# %%
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
# %% [markdown]
# ### Load Data
dataset = pd.read_csv(r"C:\Users\yxie367\Documents\GitHub\Mushrooms\DATA\mushrooms.csv")
#dataset = pd.read_csv(r"C:\Users\xieya\Documents\GitHub\Mushrooms\DATA\mushrooms.csv")
# %% [markdown]
# ### View Data and Informations
# %%
dataset.head()
# %%
dataset.info()
# %%
edible, poisonous = dataset['class'].value_counts()
# print("Edible:\t ", edible,"\nPoisonous:", poisonous)
# %%
# Categorical to numerical
labels = {'e': 0, 'p': 1}
dataset['class'].replace(labels, inplace=True)
edible, poisonous = dataset['class'].value_counts()
#print("0 - Edible: ", edible,"\n1 - Poisonous:", poisonous)
# %% [markdown]
# # NN1 Stalk Root - Rooted (r)
# %% [markdown]
# ### Split Dataset
# %% [markdown]
# #### Get the Labels
# %%
X, y = dataset.drop('class', axis=1), dataset['class'].copy()
#print("X:",X.shape,"\ny:",y.shape)
# %% [markdown]
# #### Train Set and Test Set
total_error_1 = 0
total_error_2 = 0
total_error_comb = 0
randnum = np.arange(2,44,4)
num_trials = len(randnum)
record = ""
wrong_record = ""
run = 1
# %% Data cleaning
from sklearn.model_selection import train_test_split
X_white = pd.DataFrame()
X_not_white = pd.DataFrame()
y_white = pd.Series(dtype='float64')
y_not_white = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"stalk-root"] == "r":
X_white = X_white.append(X.iloc[i,:])
y_white = y_white.append(pd.Series(y.iloc[i]))
else:
X_not_white = X_not_white.append(X.iloc[i,:])
y_not_white = y_not_white.append(pd.Series(y.iloc[i]))
# %% Data cleaning pt2
X_green = pd.DataFrame()
X_not_green = pd.DataFrame()
y_green = pd.Series(dtype='float64')
y_not_green = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"odor"] == "a":
X_green = X_green.append(X.iloc[i,:])
y_green = y_green.append(pd.Series(y.iloc[i]))
else:
X_not_green = X_not_green.append(X.iloc[i,:])
y_not_green = y_not_green.append(pd.Series(y.iloc[i]))
# %%
for j in randnum:
X_train_not_white, X_test_not_white, y_train_not_white, y_test_not_white = train_test_split(X_not_white, y_not_white, test_size=1-(6905/(8124-len(X_white))), random_state=j)
X_train_not_green, X_test_not_green, y_train_not_green, y_test_not_green = train_test_split(X_not_green, y_not_green, test_size=1-(6905/(8124-len(X_green))), random_state=j)
X_train_green = (X_train_not_green)
y_train_green = (y_train_not_green)
X_train_white = (X_train_not_white)
y_train_white = (y_train_not_white)
# %%
from sklearn.utils import shuffle
X_train_full1 = shuffle(X_train_white, random_state=j)
X_test = shuffle(X, random_state=j).iloc[4000:8000]
y_train_full1 = shuffle(y_train_white, random_state=j)
y_test = shuffle(y, random_state=j).iloc[4000:8000]
# %% [markdown]
# #### Validation Set
# %%
X_valid1, X_train1 = X_train_full1[:500], X_train_full1[500:]
y_valid1, y_train1 = y_train_full1[:500], y_train_full1[500:]
# print("X_train:", X_train1.shape[0], "y_train", y_train1.shape[0])
# print("X_valid: ", X_valid1.shape[0], "y_valid ", y_valid1.shape[0])
# print("X_test: ", X_test.shape[0], "y_test ", X_test.shape[0])
# %% [markdown]
# ### Prepare the Data
# %% [markdown]
# #### Data Transformation
# %%
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
cat_attr_pipeline = Pipeline([
('encoder', OrdinalEncoder())
])
cols = list(X)
pipeline = ColumnTransformer([
('cat_attr_pipeline', cat_attr_pipeline, cols)
])
X_train1 = pipeline.fit_transform(X_train1)
X_valid1 = pipeline.fit_transform(X_valid1)
X_test1 = pipeline.fit_transform(X_test)
# %% [markdown]
# ### Neural Network
# %% [markdown]
# #### Model
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, Dense
# %%
# tf.random.set_seed(j)
tf.random.set_random_seed(j)
# %%
model1 = Sequential([
InputLayer(input_shape=(22,)), # input layer
Dense(45, activation='relu'), # hidden layer
Dense(1, activation='sigmoid') # output layer
])
# %%
#model1.summary()
# %% [markdown]
# #### Compile the Model
# %%
model1.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# %% [markdown]
# #### Prepare Callbacks
# %%
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint_cb = ModelCheckpoint('../SavedModels/best_model.h5',
save_best_only=True)
early_stopping_cb = EarlyStopping(patience=3,
restore_best_weights=True)
# %% [markdown]
# ### Training
# %%
train_model1 = model1.fit(X_train1, y_train1,
epochs=100,
validation_data=(X_valid1, y_valid1),
callbacks=[checkpoint_cb, early_stopping_cb])
# %% [markdown]
# ### Evaluate the Best Model on Test Set
# %%
results1 = model1.evaluate(X_test1, y_test)
# print("test loss, test acc:", results1)
# %% [markdown]
# ### Make Some Predictions
# %%
X_new1 = X_test1[:5]
y_prob1 = model1.predict(X_new1)
# print(y_prob.round(3))
# %%
y_pred1 = (model1.predict(X_new1) > 0.5).astype("int32")
# print(y_pred)
y_test_pred = (model1.predict(X_test1) > 0.5).astype("int32")
# %% [markdown]
# ## KL Divergence
# %%
# X_new = X_test[:5]
X_df1 = pd.DataFrame(model1.predict(X_test1))
y_test_pred1 = | pd.DataFrame(y_test_pred) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import datetime
from multiprocessing import Pool
from functools import partial
from pathos import pools as pp
import pickle as pkl
from UserCentricMeasurements import *
from ContentCentricMeasurements import *
from CommunityCentricMeasurements import *
from TEMeasurements import *
from collections import defaultdict
import jpype
import json
import os
basedir = os.path.dirname(__file__)
class BaselineMeasurements(UserCentricMeasurements, ContentCentricMeasurements, TEMeasurements, CommunityCentricMeasurements):
def __init__(self,
dfLoc,
content_node_ids=[],
user_node_ids=[],
metaContentData=False,
metaUserData=False,
contentActorsFile=os.path.join(basedir, './baseline_challenge_data/filtUsers-baseline.pkl'),
contentFile=os.path.join(basedir, './baseline_challenge_data/filtRepos-baseline.pkl'),
topNodes=[],
topEdges=[],
previousActionsFile='',
community_dictionary='',
# community_dictionary=os.path.join(basedir, './baseline_challenge_data/baseline_challenge_community_dict.pkl'),
te_config=os.path.join(basedir, './baseline_challenge_data/te_params_baseline.json'),
platform='github',
use_java=True):
super(BaselineMeasurements, self).__init__()
self.platform = platform
try:
# check if input is a data frame
dfLoc.columns
df = dfLoc
except:
# if not it should be a csv file path
df = pd.read_csv(dfLoc)
self.contribution_events = ['PullRequestEvent',
'PushEvent',
'IssuesEvent',
'IssueCommentEvent',
'PullRequestReviewCommentEvent',
'CommitCommentEvent',
'CreateEvent',
'post',
'tweet']
self.popularity_events = ['WatchEvent',
'ForkEvent',
'comment',
'post',
'retweet',
'quote',
'reply']
print('preprocessing...')
self.main_df = self.preprocess(df)
print('splitting optional columns...')
# store action and merged columns in a seperate data frame that is not used for most measurements
if platform == 'github' and len(self.main_df.columns) == 6 and 'action' in self.main_df.columns:
self.main_df_opt = self.main_df.copy()[['action', 'merged']]
self.main_df = self.main_df.drop(['action', 'merged'], axis=1)
else:
self.main_df_opt = None
# For content centric
print('getting selected content IDs...')
if content_node_ids != ['all']:
if self.platform == 'reddit':
self.selectedContent = self.main_df[self.main_df.root.isin(content_node_ids)]
elif self.platform == 'twitter':
self.selectedContent = self.main_df[self.main_df.root.isin(content_node_ids)]
else:
self.selectedContent = self.main_df[self.main_df.content.isin(content_node_ids)]
else:
self.selectedContent = self.main_df
# For userCentric
self.selectedUsers = self.main_df[self.main_df.user.isin(user_node_ids)]
print('processing repo metatdata...')
# read in external metadata files
# repoMetaData format - full_name_h,created_at,owner.login_h,language
# userMetaData format - login_h,created_at,location,company
if metaContentData != False:
self.useContentMetaData = True
meta_content_data = | pd.read_csv(metaContentData) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, re, sys
import preprocess.EMGprocess
import preprocess.plot_features
from typing import List, Dict, Sized
from sklearn.metrics import confusion_matrix
import pickle
import pandas as pds
from time import sleep
# ------- ------- ------- ------- General settings ------- ------- ------- ------- ------- ------- ------- -------
cond = 'OFF' # conditions to test for
COI = [4,5,8] # channels of interest
task = 'rst' # selected task
n_splits = 10 # n-fold cross validation
fileobj = preprocess.EMGprocess.EMGfileworks(task=task, subj='all', scaling=False)
svmobj = preprocess.EMGprocess.EMGpredict()
EMGdetails = preprocess.EMGprocess.ExtractEMGfeat(task=task, filename_append='')
details = EMGdetails.extract_details(subj=fileobj.subj, act="return")
# ------- ------- ------- ------- ------- ------- ------- ------- ------- ------- ------- ------- ------- -------
listEMGallOFF, listEMGallON = fileobj.get_EMGfilelist_all(task='tap')
for k in range(8, 9, 1): # loop through all segments in order to read how much recording time is necessary
fileobj.filter_data_and_extract_features(listEMGallOFF, '',
os.path.join(fileobj.datobj.wdir, 'data', 'EMG', 'filtered_data' + str(k)),
os.path.join(fileobj.datobj.wdir, 'data', 'EMG',
'features_split' + str(k)), k)
fileobj.filter_data_and_extract_features(listEMGallON, '',
os.path.join(fileobj.datobj.wdir, 'data', 'EMG', 'filtered_data' + str(k)),
os.path.join(fileobj.datobj.wdir, 'data', 'EMG',
'features_split' + str(k)), k)
listEMGallOFF, listEMGallON = fileobj.get_EMGfilelist_all(task='rst')
for k in range(1, 9, 1): # loop through all segments in order to read how much recording time is necessary
fileobj.filter_data_and_extract_features(listEMGallOFF, '',
os.path.join(fileobj.datobj.wdir, 'data', 'EMG', 'filtered_data' + str(k)),
os.path.join(fileobj.datobj.wdir, 'data', 'EMG',
'features_split' + str(k)), k)
#fileobj.filter_data_and_extract_features(listEMGallON, '',
# os.path.join(fileobj.datobj.wdir, 'data', 'EMG', 'filtered_data' + str(k)),
# os.path.join(fileobj.datobj.wdir, 'data', 'EMG',
# 'features_split' + str(k)), k)
# Obtain the names and metadata for both groups according to the excel file (see <dataprocess> for mor information)
trem_details = details[details.type == 1]
bradkin_details = details[(details.type == 0) | (details.type == 2)]
# Sort data into two categories: a) bradikintic-rigid and b) tremordominant iPS-patients
list_type = {}
tremdom = []
brakin = []
for r in listEMGallOFF:
filename = os.path.splitext(r)[0]
if any(bradkin_details["Name"].str.find(filename[0:11]) == 0):
brakin.append(r)
else:
tremdom.append(r)
list_type.update({"brakin": brakin, "tremdom": tremdom})
# Load EMG features to memory, according to file-list <listEMGallOFF> (here only 8secs. are used)
progbar_size = len(listEMGallOFF)
print('Reading extracted features for all subjects ...')
dfs: Dict = {}
for idx, r in enumerate(listEMGallOFF):
j = (idx + 1) / progbar_size
sys.stdout.write('\r')
sys.stdout.write("[%-20s] %d%%" % ('=' * int(20 * j), 100 * j))
sys.stdout.flush()
sleep(0.25)
filename = os.path.splitext(r)[0]
infile = open(os.path.join(fileobj.datobj.wdir, 'data', 'EMG', 'features_split8', filename + '_8secs_features.pkl'),
'rb')
dfs[r] = pickle.load(infile)
infile.close()
if any(bradkin_details["Name"].str.find(filename[0:11]) == 0):
dfs[r].insert(0, "output_0", 0)
else:
dfs[r].insert(0, "output_0", 1)
print()
print("DONE reading features!")
# Plot routine comparisons between both groups:
features = ["IAV", "MAV2", "RMS", "VAR", "ZC", "SSC"] # features of interest
column_regex = re.compile("^((" + ")|(".join(features) + "))_[0-9]+")
feat_br = pds.DataFrame(columns=features)
feat_td = pds.DataFrame(columns=features)
feat_all = {}
listEMGtremOFF = fileobj.get_EMGfilelist_per_subj(cond=cond, task=task, subj=list(trem_details.Name))
for idx, r in enumerate(listEMGtremOFF):
print("Reading features per subject, reading: ", r)
data_temp = pds.DataFrame()
for f in listEMGtremOFF[r]:
filename = os.path.splitext(f)[0]
columns_input = list(filter(column_regex.match, list(dfs[f])))
data_temp = pds.concat([data_temp, dfs[f][columns_input]])
feat_all[idx + 1] = pds.DataFrame.mean(data_temp, skipna=True)
feat_tdAllchan = | pds.DataFrame.from_dict(feat_all, orient='index') | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 12:07:57 2019
@author: johnmount
"""
import numpy
import pandas
import vtreat.util
import vtreat.transform
class VarTransform:
"""build a treatment plan for a numeric outcome (regression)"""
def __init__(self, incoming_column_name, derived_column_names, treatment):
self.incoming_column_name_ = incoming_column_name
self.derived_column_names_ = derived_column_names.copy()
self.treatment_ = treatment
self.need_cross_treatment_ = False
self.refitter_ = None
def transform(self, data_frame):
raise NotImplementedError("base method called")
class MappedCodeTransform(VarTransform):
def __init__(self, incoming_column_name, derived_column_name, treatment, code_book):
VarTransform.__init__(
self, incoming_column_name, [derived_column_name], treatment
)
self.code_book_ = code_book
def transform(self, data_frame):
incoming_column_name = self.incoming_column_name_
derived_column_name = self.derived_column_names_[0]
sf = pandas.DataFrame({incoming_column_name: data_frame[incoming_column_name]})
bad_posns = vtreat.util.is_bad(sf[incoming_column_name])
sf.loc[bad_posns, incoming_column_name] = "_NA_"
res = pandas.merge(
sf, self.code_book_, on=[self.incoming_column_name_], how="left", sort=False
) # ordered by left table rows
res = res[[derived_column_name]].copy()
res.loc[vtreat.util.is_bad(res[derived_column_name]), derived_column_name] = 0
return res
class YAwareMappedCodeTransform(MappedCodeTransform):
def __init__(
self,
incoming_column_name,
derived_column_name,
treatment,
code_book,
refitter,
extra_args,
params,
):
MappedCodeTransform.__init__(
self,
incoming_column_name=incoming_column_name,
derived_column_name=derived_column_name,
treatment=treatment,
code_book=code_book,
)
self.need_cross_treatment_ = True
self.refitter_ = refitter
self.extra_args_ = extra_args
self.params_ = params
class CleanNumericTransform(VarTransform):
def __init__(self, incoming_column_name, replacement_value):
VarTransform.__init__(
self, incoming_column_name, [incoming_column_name], "clean_copy"
)
self.replacement_value_ = replacement_value
def transform(self, data_frame):
col = numpy.asarray(data_frame[self.incoming_column_name_].copy()).astype(float)
bad_posns = vtreat.util.is_bad(col)
col[bad_posns] = self.replacement_value_
res = pandas.DataFrame({self.derived_column_names_[0]: col})
return res
class IndicateMissingTransform(VarTransform):
def __init__(self, incoming_column_name, derived_column_name):
VarTransform.__init__(
self, incoming_column_name, [derived_column_name], "missing_indicator"
)
def transform(self, data_frame):
col = vtreat.util.is_bad(data_frame[self.incoming_column_name_])
res = | pandas.DataFrame({self.derived_column_names_[0]: col}) | pandas.DataFrame |
import pandas as pd
from paso3proyecto1 import eliminarRepetidos
def juntarTablas(Dataframe1,Dataframe2):
Dataframe1= eliminarRepetidos(Dataframe1)
Dataframe2= eliminarRepetidos(Dataframe2)
Dataframe3= pd.DataFrame()
if "id_suite" in Dataframe1.columns and "id_suite" in Dataframe2.columns:
Dataframe3= pd.merge(Dataframe1, Dataframe2, on='id_suite', how='outer',suffixes=("_nav","_conv"))
elif "gclid" in Dataframe1.columns and "gclid" in Dataframe2.columns:
Dataframe3= pd.merge(Dataframe1, Dataframe2, on='gclid', how='outer',suffixes=("_nav","_conv"))
else:
Dataframe3= pd.merge(Dataframe1, Dataframe2, on='url_landing', how='outer',suffixes=("_nav","_conv"))
return Dataframe3
if __name__ == "__main__":
conversiones=pd.read_csv("conversiones.csv", sep=";")
navegacion=pd.read_csv("navegacion.csv", sep=";")
nav= | pd.DataFrame(navegacion) | pandas.DataFrame |
'''
preprocessing_example.py
This script is an example of pre-processing message data before applying the
python code. Please refer to Section 3 of the Code and Data Appendix. For the
meaning of the values of each column in the LSE message data, please refer to
"London Stock Exchange MIT201 - Guide to the Trading System Issue 12.3"
(https://web.archive.org/web/20150703141903/https://www.londonstockexchange.com/products-and-services/trading-services/guide-to-new-trading-system.pdf)
Note that this script may not be directly used by researchers using message data
from other exchanges or from LSE but during a different time period, as LSE makes
changes to the trading protocol from time to time. We provide this script as an
example and hope it can help the users of our code pre-process their message data.
'''
import pandas as pd
import numpy as np
import os
import multiprocessing
import datetime
import random
import sys
base = os.path.expanduser('~') + '/Dropbox/Project - HFT Measurement of the Arms Race/FCA Collaboration - confidential/Materials for Data Appendix/PackageTesting'
path = base + '/Testing/'
path_reference_data = path + '/ReferenceData/'
file_symdates = path_reference_data + '/Sample_All_SymDates.csv.gz'
def preprocess_msgs(date, sym, in_dir, out_dir):
infile_msgs = '%s/%s/CleanMsgData_%s_%s.csv.gz' % (in_dir, date, date, sym)
# Note: variable names and data types in user's data may be different from the ones below.
dtypes_msgs = {'Source': 'O', 'SourceID': 'int64', 'StreamID': 'O','ConnectionID': 'O',
'GatewayIPAddress': 'O', 'GatewayPort': 'O', 'ID': 'int64', 'MessageTimestamp': 'O',
'UserID': 'O', 'MessageType': 'O', 'InstrumentID': 'O', 'ClientOrderID': 'O',
'OrderID': 'O', 'OrderStatus': 'O', 'OrderType': 'O', 'PublicOrderID': 'O',
'ExecType': 'O', 'TIF': 'O', 'ClOrdLinkID': 'O', 'ExpireDateTime': 'O',
'RawSide': 'O', 'OrderQty': 'float64', 'DisplayQty': 'float64',
'LimitPrice': 'int64', 'Capacity': 'O', 'OrderSubType': 'O', 'StoppedPrice': 'int64',
'Anonymity': 'O', 'PassiveOnlyOrder': 'O', 'OriginalClientOrderID': 'O',
'BidPrice': 'int64', 'BidSize': 'float64', 'AskPrice': 'int64',
'AskSize': 'float64', 'ExecutionID': 'O', 'OrderRejectCode': 'O',
'ExecutedPrice': 'int64', 'ExecutedQty': 'float64', 'LeavesQty': 'float64',
'Container': 'O', 'TradeMatchID': 'O', 'TransactTime': 'O', 'TypeOfTrade': 'O',
'MinQty': 'float64', 'DisplayMethod': 'O', 'PriceDifferential': 'O',
'CancelRejectReason': 'O', 'Symbol_Type': 'O', 'Segment_ID': 'O', 'Symbol': 'O',
'Date': 'O', 'Timestamp': 'O', 'FirmClass': 'O', 'FirmNum': 'float64',
'UserNum': 'float64', 'OrderNum': 'float64', 'QuoteRelated': 'bool',
'UniqueOrderID': 'O', 'Side': 'O', 'UnifiedMessageType': 'O',
'PrevPriceLvl': 'int64', 'PrevQty': 'float64', 'PriceLvl': 'int64',
'Classified': 'bool', 'EventNum': 'float64', 'Event': 'O', 'MinExecPriceLvl':'int64',
'MaxExecPriceLvl':'int64', 'PrevBidPriceLvl': 'int64', 'PrevBidQty': 'float64',
'BidPriceLvl': 'int64', 'BidClassified': 'bool', 'BidEventNum': 'float64',
'BidEvent': 'O', 'BidMinExecPriceLvl':'int64', 'BidMaxExecPriceLvl':'int64',
'PrevAskPriceLvl': 'int64', 'PrevAskQty': 'float64', 'AskPriceLvl': 'int64',
'AskClassified': 'bool', 'AskEventNum': 'float64', 'AskEvent': 'O',
'MinExecPriceLvl':'int64', 'MaxExecPriceLvl':'int64','PostOpenAuction':'bool','OpenAuction':'bool','AuctionTrade':'bool'}
msgs = pd.read_csv(infile_msgs, dtype = dtypes_msgs, parse_dates=['MessageTimestamp'])
msgs.drop(['Symbol'], axis=1, inplace=True)
msgs = msgs.rename(columns = {'FirmNum':'FirmID','OpenAuction':'OpenAuctionTrade',
'StoppedPrice':'StopPrice','InstrumentID':'Symbol',
'OriginalClientOrderID':'OrigClientOrderID',
'OrderID':'MEOrderID'})
### Get source format (if the exchange has multiple message formats, eg. Fix/native
is_ntv, is_fix = msgs['Source'] == 'Native', msgs['Source'] == 'FIX'
### MessageType
MessageTypes = {'D':'New_Order', 'F':'Cancel_Request','q':'Mass_Cancel_Request','G':'Cancel_Replace_Request',
'S':'New_Quote', '8':'Execution_Report','9':'Cancel_Reject','r':'Mass_Cancel_Report',
'3':'Other_Reject','j':'Other_Reject'}
msgs['MessageType'] = msgs['MessageType'].map(MessageTypes)
### OrderType
OrderTypes = {'1':'Market', '2':'Limit', '3':'Stop', '4':'Stop_Limit', 'P':'Pegged'}
msgs['OrderType'] = msgs['OrderType'].map(OrderTypes)
# Set native messages pegged orders based on OrderSubType
msgs.loc[is_ntv & (msgs['MessageType'] == 'New_Order') & (msgs['OrderType'].isin({'Market','Limit'})) & (msgs['OrderSubType'] == '5'), 'OrderType'] = 'Pegged'
# Set passive-only orders to OrderType Passive_Only
msgs.loc[(msgs['MessageType'] == 'New_Order') & (msgs['OrderType'] == 'Limit') & (msgs['PassiveOnlyOrder'].notnull()), 'OrderType'] = 'Passive_Only'
msgs.loc[msgs['MessageType'] != 'New_Order', 'OrderType'] = np.nan
### TIF - check data type
TIFs_ntv = {'5':'GFA','10':'GFA','12':'GFA','50':'GFA','51':'GFA','52':'GFA',
'3':'IOC','4':'FOK',
'0':'GoodTill','6':'GoodTill','7':'GoodTill'}
TIFs_fix = {'2':'GFA','7':'GFA','9':'GFA','8':'GFA','C':'GFA',
'3':'IOC','4':'FOK',
'0':'GoodTill','6':'GoodTill'}
msgs.loc[is_ntv, 'TIF'] = msgs['TIF'].map(TIFs_ntv)
msgs.loc[is_fix, 'TIF'] = msgs['TIF'].map(TIFs_fix)
### ExecType
ExecTypes = {'0':'Order_Accepted','8':'Order_Rejected','F':'Order_Executed',
'C':'Order_Expired','4':'Order_Cancelled','5':'Order_Replaced',
'D':'Order_Restated','9':'Order_Suspended'}
msgs['ExecType'] = msgs['ExecType'].map(ExecTypes)
### Cancel Reject Reason # others are NA. check whether this is fine
msgs.loc[is_ntv & (msgs['MessageType'] == 'Cancel_Reject') & (msgs['CancelRejectReason'] == '2000'), 'CancelRejectReason'] = 'TLTC'
msgs.loc[is_fix & (msgs['MessageType'] == 'Cancel_Reject') & (msgs['CancelRejectReason'] == '1'), 'CancelRejectReason'] = 'TLTC'
msgs.loc[(msgs['MessageType'] == 'Cancel_Reject') & (msgs['CancelRejectReason'] != 'TLTC'), 'CancelRejectReason'] = 'Other'
### OrderStatus
OrderStatus = {'1':'Partial_Fill','2':'Full_Fill'}
msgs.loc[(msgs['MessageType'] == 'Execution_Report') & (msgs['ExecType'] == 'Order_Executed'), 'OrderStatus'] = msgs['OrderStatus'].map(OrderStatus)
msgs.loc[(~msgs['OrderStatus'].isin(['Partial_Fill','Full_Fill'])), 'OrderStatus'] = np.nan
### TradeInitiator: A P Other
# Set auction trades to Other
msgs.loc[(msgs['MessageType'] == 'Execution_Report') & (msgs['ExecType'] == 'Order_Executed') & (msgs['AuctionTrade'] == True), 'TradeInitiator'] = 'Other'
# Set TradeInitiator for native msgs
msgs.loc[is_ntv & (msgs['MessageType'] == 'Execution_Report') & (msgs['ExecType'] == 'Order_Executed') & (msgs['AuctionTrade'] == False), 'TradeInitiator'] = 'Other'
msgs.loc[is_ntv & (msgs['MessageType'] == 'Execution_Report') & (msgs['ExecType'] == 'Order_Executed') & (msgs['AuctionTrade'] == False) & (msgs['Container'].isin({'1'})), 'TradeInitiator'] = 'Passive'
msgs.loc[is_ntv & (msgs['MessageType'] == 'Execution_Report') & (msgs['ExecType'] == 'Order_Executed') & (msgs['AuctionTrade'] == False) & (msgs['Container'].isin({'0','3'})), 'TradeInitiator'] = 'Aggressive'
# Set TradeInitiator for fix msgs
msgs.loc[is_fix & (msgs['MessageType'] == 'Execution_Report') & (msgs['ExecType'] == 'Order_Executed') & (msgs['AuctionTrade'] == False), 'TradeInitiator'] = 'Other'
msgs.loc[is_fix & (msgs['MessageType'] == 'Execution_Report') & (msgs['ExecType'] == 'Order_Executed') & (msgs['AuctionTrade'] == False) & (msgs['TypeOfTrade'].isin({'0','1'})), 'TradeInitiator'] = 'Passive'
msgs.loc[is_fix & (msgs['MessageType'] == 'Execution_Report') & (msgs['ExecType'] == 'Order_Executed') & (msgs['AuctionTrade'] == False) & (msgs['TypeOfTrade'].isin({'2'})), 'TradeInitiator'] = 'Aggressive'
### Price factor conversion
# Set Int values to dtype Int64 to allow np.nan mixed with int64
# Set missing prices to NA
prices = ['LimitPrice', 'StopPrice', 'ExecutedPrice', 'BidPrice', 'AskPrice']
for col in prices:
msgs[col] = msgs[col]/1e8
msgs.loc[msgs[col] <= 0, col] = np.nan
### Make SessionID and RegularHour
# Load symbol-date info
# Note: Depending on the format of the user's reference data,
# it is likely that the way to add session ID info and regular hours info to
# the message data is different from below
info = pd.read_csv(base+'/Testing/ReferenceData/Symbol_Date_Info.csv')
info = info.loc[(info['InstrumentID'].astype('str') == sym) & (info['Date'] == date)]
# Generate reg hour and session ID
reg_hours = pd.Series(False, index=msgs.index)
sess_id = pd.Series(np.nan, index=msgs.index)
i = 1
while i <= info['Sess_Max_N'].iloc[0]:
sess_st = pd.to_datetime(info['Sess_St_%s' % i].iloc[0])
sess_end = pd.to_datetime(info['Sess_End_%s' % i].iloc[0])
# Session starts on first inbound in New Order / New Quote in regular hours
sess_st_id = msgs.index[(msgs['MessageTimestamp'] > sess_st) & (msgs['MessageType'].isin({'New_Order', 'New_Quote'}))][0]
# Session ends on last outbound in regular hours
sess_end_id = msgs.index[(msgs['MessageTimestamp'] < sess_end) & (msgs['MessageType'].isin({'Execution_Report'}))][-1]
sess_msgs = ((msgs.index >= sess_st_id) & (msgs.index <= sess_end_id))
sess_id[sess_msgs] = i
reg_hours = reg_hours | sess_msgs
i += 1
post_open_auction = msgs['PostOpenAuction']
msgs['RegularHour'] = reg_hours & post_open_auction
msgs['SessionID'] = sess_id
### Add unique order id
msgs = add_UniqueOrderID(msgs)
### add quote identifier
msgs['QuoteRelated'] = msgs['UniqueOrderID'].str[-2:] == 'QR'
### Flag for FIX Quote, since we are missing their outbounds.
### Note: this is a specific problem to our LSE dataset and users may not need to
### to do this.
msgs['Flag_FixQuote'] = (msgs['QuoteRelated']) & (msgs['Source'] == 'FIX')
### Replace mass cancel and mass cancel report
msgs.loc[(msgs['MessageType']=='Mass_Cancel_Request'), 'MessageType'] = 'Other_Inbound'
msgs.loc[(msgs['MessageType']=='Mass_Cancel_Report'), 'MessageType'] = 'Other_Outbound'
### Get the columns
msgs['Date'] = date
msgs['Symbol'] = sym
cols = [
'Date','Symbol',
'ClientOrderID', 'UniqueOrderID','TradeMatchID', 'SessionID', 'MEOrderID','OrigClientOrderID',
'UserID', 'FirmID', 'FirmClass',
'MessageTimestamp', 'Side', 'MessageType', 'OrderType', 'ExecType',
'OrderStatus', 'TradeInitiator', 'TIF', 'CancelRejectReason',
'LimitPrice', 'OrderQty', 'DisplayQty', 'ExecutedPrice', 'StopPrice', 'ExecutedQty', 'LeavesQty',
'QuoteRelated', 'BidPrice', 'BidSize', 'AskPrice', 'AskSize',
'OpenAuctionTrade', 'AuctionTrade', 'RegularHour', 'Flag_FixQuote'
]
if not os.path.exists('%s/%s'%(out_dir, date)):
os.makedirs('%s/%s'%(out_dir, date))
msgs[cols].to_csv('%s/%s/Raw_Msg_Data_%s_%s.csv.gz' % (out_dir, date, date, sym), index=False, compression = 'gzip')
return date, sym
def add_UniqueOrderID(msgs):
'''
Add unique order id to the msgs dataframe. This is necessary because the order identifiers in the raw data may not be ideal:
some of them are not populated for all messages in the same order (MEOrderID is only populated for outbounds, missing in inbounds) and
some of them are not fixed during the lifetime of an order (ClientOrderID changes when user modifies the order). Thus, we need to assign
an order identifier that is populated for all messages in the order and does not change when user modifies the order.
Reference: Section 3.2.1 of the code and data appendix.
Note that this function might not apply to other exchanges. User needs to rewrite this function if necessary, based on the specific institution
details of the exchange in the user' data. This function can serve as an example of how to generate UniqueOrderID. The requirement is that the UniqueOrderID
is populated for all messages in the order and does not change when user modifies the order. The UniqueOrderID column should never be missing.
If the user's data contains the columns we use (listed below) to generate our unique order id, he/she may directly apply this function
for generating UniqueOrderID, but the user must make sure that the variables they use fit the descriptions below and the logic behind the function
can indeed generate a unique order id with the desired property (does not change throughout the life time of an order) under the rules of the
exchange in the user's data.
Input: sym-date level message dataset
Used columns are:
'Source' : In many exchanges there are multiple interfaces (different message format).
This field indicates which interface the message is from. Note that messages from all interfaces
for a sym-date should be combined into a single message dataframe and be processed together.
'ClientOrderID': ClientOrderID is provided by users when submitting new orders. They can also change the
ClientOrderID of an existing order when modifying the order. We assume that a user uses
unique ClientOrderID when submitting new orders and modifying orders. This uniqueness
is suggested by LSE and it is required in other exchanges such as NYSE.
'OrigClientOrderID': In cancel and cancel/replace requests users use OrigClientOrderID or MEOrderID
to refer to the order they are trying to cancel/modify. When MEOrderID is populated
in a cancel or cancel/replace request, this field is ignored.
'MEOrderID': MEOrderID is created by the matching engine after users submit new orders. It is populated in
matching engine messages (outbounds) and cancel and cancel/replace messages.
'UserNum': In raw data users have a UserID. We enumerate all users and use index to refer to users
The procedure: We loop over all messages for each User-MessageFormat pair. For new orders and mass cancel requests,
we start a new UniqueOrderID. For cancel requests, cancel/replace requests, execution reports, rejection messages, and mass cancel reports,
in cases where the MEOrderID is populated with a MEOrderID we have seen before, we assign the UniqueOrderID of the earlier message
with the same MEOrderID to the current message. Otherwise, we assign the UniqueOrderID of the earlier message with the same
ClientOrderID/OrigClientOrderID to the current message. The handling of those order is slightly different, please refer to the comments
below to see the difference). We index all orders from the same user by integers (OrderNum). All quote related messages from
the same user are in the same order (OrderNum='QR').
The format of the unique order id is Source_UserNum_OrderNum.
Return: Message dataframe with UniqueOrderID added.
'''
msgs['OrderNum'] = None
msgs['UniqueOrderID'] = None
originalid_orderid_conflict = 0
no_inbound_outbound_id = 0
# Loop over users:
for _, user_msgs in msgs.groupby('UserID'):
# Initialize counter
counter = 0
# Loop over Native and FIX messages
for source in set(msgs['Source'].unique()):
# Initialize sets of previously observed IDs and message times
prev = {'MEOrderID': {}, 'ClientOrderID': {}}
time = {'MEOrderID': {}, 'ClientOrderID': {}}
# Loop over user messages from source
for i in user_msgs.loc[user_msgs['Source'] == source].index:
order_id = msgs.at[i, 'MEOrderID']
client_order_id = msgs.at[i, 'ClientOrderID']
original_client_order_id = msgs.at[i, 'OrigClientOrderID']
# Gateway New Quote
# For a given user, all quotes will have the same UniqueOrderID
if msgs.at[i, 'MessageType'] == 'New_Quote':
msgs.at[i, 'UniqueOrderID'] = '%s_%06d_QR' % (source, msgs.at[i, 'UserNum'])
continue
# Gateway New Order and Gateway Mass Cancel
# Increment counter then set OrderNum to counter
elif msgs.at[i, 'MessageType'] in {'New_Order', 'Mass_Cancel_Request'}:
counter += 1
msgs.at[i, 'OrderNum'] = counter
# Gateway Cancel/Replace and Gateway Cancel
# C/R and Cancel should only happen if there has been a previous Outbound ID.
# Else conditions are added for cases in which
# there was no previous outbound message due to pack loss
elif msgs.at[i, 'MessageType'] in {'Cancel_Replace_Request', 'Cancel_Request'}:
# Case 1: User references both OrigClientOrderID and the ME MEOrderID
if pd.notnull(order_id) & pd.notnull(original_client_order_id):
# If both have already been seen, use the order number from the order ID.
# Otherwise use MEOrderID or ClientOrderID depending on which has been seen
if (order_id in prev['MEOrderID'].keys()) & (original_client_order_id in prev['ClientOrderID'].keys()):
msgs.at[i, 'OrderNum'] = prev['MEOrderID'][order_id]
# If the clientOrderID is associated with a different ordernum increment the counter and test counter
if prev['MEOrderID'][order_id] != prev['ClientOrderID'][original_client_order_id]:
originalid_orderid_conflict += 1
elif order_id in prev['MEOrderID'].keys():
msgs.at[i, 'OrderNum'] = prev['MEOrderID'][order_id]
elif original_client_order_id in prev['ClientOrderID'].keys():
msgs.at[i, 'OrderNum'] = prev['ClientOrderID'][original_client_order_id]
else:
counter += 1
msgs.at[i, 'OrderNum'] = counter
# Case 2: User references Outbound ID (MEOrderID)
elif pd.notnull(order_id):
if order_id in prev['MEOrderID'].keys():
msgs.at[i, 'OrderNum'] = prev['MEOrderID'][order_id]
else:
counter += 1
msgs.at[i, 'OrderNum'] = counter
# Case 3: User references Inbound ID (OrigClientOrderID)
elif pd.notnull(original_client_order_id):
if original_client_order_id in prev['ClientOrderID'].keys():
msgs.at[i, 'OrderNum'] = prev['ClientOrderID'][original_client_order_id]
else:
counter += 1
msgs.at[i, 'OrderNum'] = counter
else:
# It should never happen that there is neither an Inbound nor an Outbound ID.
no_inbound_outbound_id += 1
counter += 1
msgs.at[i, 'OrderNum'] = counter
# Execution Report
# We should either have seen the MEOrderID or the ClientOrderID previously.
# An else condition is added for cases in which we have not previously
# seen the outbound or inbound due to pack loss.
elif msgs.at[i, 'MessageType'] == 'Execution_Report':
# Case 1: Both order id and client order id are populated
if pd.notnull(order_id) & pd.notnull(client_order_id):
# If both have already been seen, use the order number from the order ID.
# Otherwise use MEOrderID or ClientOrderID depending on which has been seen
if (order_id in prev['MEOrderID'].keys()) & (client_order_id in prev['ClientOrderID'].keys()):
msgs.at[i, 'OrderNum'] = prev['MEOrderID'][order_id]
# If the ClientOrderID is associated with a different ordernum increment the testing counter
if prev['MEOrderID'][order_id] != prev['ClientOrderID'][client_order_id]:
originalid_orderid_conflict += 1
elif order_id in prev['MEOrderID'].keys():
msgs.at[i, 'OrderNum'] = prev['MEOrderID'][order_id]
elif client_order_id in prev['ClientOrderID'].keys():
msgs.at[i, 'OrderNum'] = prev['ClientOrderID'][client_order_id]
else:
counter += 1
msgs.at[i, 'OrderNum'] = counter
# Case 2: only MEOrderID is populated
elif order_id in prev['MEOrderID'].keys():
msgs.at[i, 'OrderNum'] = prev['MEOrderID'][order_id]
# Case 3: only ClientOrderID is populated
elif client_order_id in prev['ClientOrderID'].keys():
msgs.at[i, 'OrderNum'] = prev['ClientOrderID'][client_order_id]
else:
counter += 1
msgs.at[i, 'OrderNum'] = counter
# Order Reject or Order Mass Cancel Report
# Note that order Mass Cancel Report is a response to an Order Mass Cancel Request message.
elif msgs.at[i, 'MessageType'] in {'Cancel_Reject', 'Mass_Cancel_Report', 'Other_Reject'}:
# Case 1: both are populated
if pd.notnull(order_id) & pd.notnull(client_order_id):
# If both have already been seen, use the order number from the order ID. If the ClientOrderID
# is associated with a different ordernum increment the testing counter
if (order_id in prev['MEOrderID'].keys()) & (client_order_id in prev['ClientOrderID'].keys()):
msgs.at[i, 'OrderNum'] = prev['MEOrderID'][order_id]
# If the ClientOrderID is associated with a different ordernum increment the testing counter
if prev['MEOrderID'][order_id] != prev['ClientOrderID'][client_order_id]:
originalid_orderid_conflict += 1
elif order_id in prev['MEOrderID'].keys():
msgs.at[i, 'OrderNum'] = prev['MEOrderID'][order_id]
elif client_order_id in prev['ClientOrderID'].keys():
# We require that the message occurs within 1min of an earlier message
# from the same User-ClientOrderID pair.
# In the LSE data the Symbol field is not populated for
# 'Cancel_Reject', 'Mass_Cancel_Report', 'Other_Reject'
# (including protocol reject and business message reject) messages.
# When we split the messages into symbol-dates, we include those messages
# in a symbol-date if we observe the same User-ClientOrderID pair in that
# symbol-date. To further confirm that those messages are actually in that
# symbol-date and avoid mismatch, we require that there is at least one
# messages from the same user-client order id within one minute beofre the
# reject message/mass cancel message. While this method is not theoretically
# perfect, we believe that it is good enough for the purpose of this project,
# because it is reasonable to assume that users use unique Client Order IDs across symbols.
# (While this not a strict requirement in LSE, it appears in our data tht
# most users follow this rule, and in many other exchanges this is a strict requirement)
if msgs.at[i, 'MessageTimestamp'] < (time['ClientOrderID'][client_order_id] + pd.Timedelta('1 m')):
msgs.at[i, 'OrderNum'] = prev['ClientOrderID'][client_order_id]
else:
counter += 1
msgs.at[i, 'OrderNum'] = counter
else:
counter += 1
msgs.at[i, 'OrderNum'] = counter
# Case 2: Only ClientOrderID is populated
# Note that ClientOrderID must be populated, while MEOrderID may not, according to the LSE document
elif client_order_id in prev['ClientOrderID'].keys():
if msgs.at[i, 'MessageTimestamp'] < (time['ClientOrderID'][client_order_id] + pd.Timedelta('1 m')):
msgs.at[i, 'OrderNum'] = prev['ClientOrderID'][client_order_id]
else:
counter += 1
msgs.at[i, 'OrderNum'] = counter
else:
counter += 1
msgs.at[i, 'OrderNum'] = counter
# Update the set of previously observed MEOrderID values and times
if pd.notnull(order_id):
prev['MEOrderID'][order_id] = msgs.at[i, 'OrderNum']
time['MEOrderID'][order_id] = msgs.at[i, 'MessageTimestamp']
# Update the set of previously observed ClientOrderID values and times
if pd.notnull(client_order_id):
prev['ClientOrderID'][client_order_id] = msgs.at[i, 'OrderNum']
time['ClientOrderID'][client_order_id] = msgs.at[i, 'MessageTimestamp']
# the format of UniqueOrderID is Source_UserNum_OrderNum
msgs.at[i, 'UniqueOrderID'] = '%s_%06d_%08d' % (source, msgs.at[i, 'UserNum'], msgs.at[i, 'OrderNum'])
return msgs
def multi_process_wrapper(args):
date, sym, in_dir, out_dir = args
try:
preprocess_msgs(*args)
except:
print(f'Error: {date}, {sym}')
if __name__ == '__main__':
num_workers = 1
pairs = | pd.read_csv(file_symdates, dtype={'Date':'O','Symbol':'O'}) | pandas.read_csv |
import numpy as np
import pandas as pd
from CEBD1260_preprocessing import ohe
from CEBD1260_preprocessing import master_pipe
from CEBD1260_cleaning import dtype_conver
from scipy.sparse import coo_matrix, hstack
# to display maximum rows and columns
pd.set_option('display.max_rows', None)
| pd.set_option('display.max_columns', None) | pandas.set_option |
import numpy as np
import pandas as pd
from datetime import datetime
import pytest
import empyrical
import vectorbt as vbt
from vectorbt import settings
from tests.utils import isclose
day_dt = np.timedelta64(86400000000000)
ts = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [5, 4, 3, 2, 1],
'c': [1, 2, 3, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
ret = ts.pct_change()
settings.returns['year_freq'] = '252 days' # same as empyrical
seed = 42
np.random.seed(seed)
benchmark_rets = pd.DataFrame({
'a': ret['a'] * np.random.uniform(0.8, 1.2, ret.shape[0]),
'b': ret['b'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 2,
'c': ret['c'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 3
})
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert ret.vbt.returns.wrapper.freq == day_dt
assert ret['a'].vbt.returns.wrapper.freq == day_dt
assert ret.vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert ret['a'].vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([1, 2, 3]).vbt.returns.wrapper.freq is None
assert pd.Series([1, 2, 3]).vbt.returns(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([1, 2, 3]).vbt.returns(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
def test_ann_factor(self):
assert ret['a'].vbt.returns(year_freq='365 days').ann_factor == 365
assert ret.vbt.returns(year_freq='365 days').ann_factor == 365
with pytest.raises(Exception) as e_info:
assert pd.Series([1, 2, 3]).vbt.returns(freq=None).ann_factor
def test_from_price(self):
pd.testing.assert_series_equal(pd.Series.vbt.returns.from_price(ts['a']).obj, ts['a'].pct_change())
pd.testing.assert_frame_equal(pd.DataFrame.vbt.returns.from_price(ts).obj, ts.pct_change())
assert pd.Series.vbt.returns.from_price(ts['a'], year_freq='365 days').year_freq == pd.to_timedelta('365 days')
assert pd.DataFrame.vbt.returns.from_price(ts, year_freq='365 days').year_freq == pd.to_timedelta('365 days')
def test_daily(self):
ret_12h = pd.DataFrame({
'a': [0.1, 0.1, 0.1, 0.1, 0.1],
'b': [-0.1, -0.1, -0.1, -0.1, -0.1],
'c': [0.1, -0.1, 0.1, -0.1, 0.1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1, 0),
datetime(2018, 1, 1, 12),
datetime(2018, 1, 2, 0),
datetime(2018, 1, 2, 12),
datetime(2018, 1, 3, 0)
]))
pd.testing.assert_series_equal(
ret_12h['a'].vbt.returns.daily(),
pd.Series(
np.array([0.21, 0.21, 0.1]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
name=ret_12h['a'].name
)
)
pd.testing.assert_frame_equal(
ret_12h.vbt.returns.daily(),
pd.DataFrame(
np.array([
[0.21, -0.19, -0.01],
[0.21, -0.19, -0.01],
[0.1, -0.1, 0.1]
]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
columns=ret_12h.columns
)
)
def test_annual(self):
pd.testing.assert_series_equal(
ret['a'].vbt.returns.annual(),
pd.Series(
np.array([4.]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
name=ret['a'].name
)
)
pd.testing.assert_frame_equal(
ret.vbt.returns.annual(),
pd.DataFrame(
np.array([[4., -0.8, 0.]]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
columns=ret.columns
)
)
def test_cumulative(self):
res_a = empyrical.cum_returns(ret['a']).rename('a')
res_b = empyrical.cum_returns(ret['b']).rename('b')
res_c = empyrical.cum_returns(ret['c']).rename('c')
pd.testing.assert_series_equal(
ret['a'].vbt.returns.cumulative(),
res_a
)
pd.testing.assert_frame_equal(
ret.vbt.returns.cumulative(),
pd.concat([res_a, res_b, res_c], axis=1)
)
def test_total_return(self):
res_a = empyrical.cum_returns_final(ret['a'])
res_b = empyrical.cum_returns_final(ret['b'])
res_c = empyrical.cum_returns_final(ret['c'])
assert isclose(ret['a'].vbt.returns.total(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.total(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('total_return')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_total(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_annualized_return(self):
res_a = empyrical.annual_return(ret['a'])
res_b = empyrical.annual_return(ret['b'])
res_c = empyrical.annual_return(ret['c'])
assert isclose(ret['a'].vbt.returns.annualized(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('annualized_return')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_annualized(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_alpha",
[1., 2., 3.],
)
def test_annualized_volatility(self, test_alpha):
res_a = empyrical.annual_volatility(ret['a'], alpha=test_alpha)
res_b = empyrical.annual_volatility(ret['b'], alpha=test_alpha)
res_c = empyrical.annual_volatility(ret['c'], alpha=test_alpha)
assert isclose(ret['a'].vbt.returns.annualized_volatility(levy_alpha=test_alpha), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized_volatility(levy_alpha=test_alpha),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('annualized_volatility')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_annualized_volatility(ret.shape[0], minp=1, levy_alpha=test_alpha).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_calmar_ratio(self):
res_a = empyrical.calmar_ratio(ret['a'])
res_b = empyrical.calmar_ratio(ret['b'])
res_c = empyrical.calmar_ratio(ret['c'])
assert isclose(ret['a'].vbt.returns.calmar_ratio(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.calmar_ratio(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('calmar_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_calmar_ratio(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free,test_required_return",
[(0.01, 0.1), (0.02, 0.2), (0.03, 0.3)],
)
def test_omega_ratio(self, test_risk_free, test_required_return):
res_a = empyrical.omega_ratio(ret['a'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_a):
res_a = np.inf
res_b = empyrical.omega_ratio(ret['b'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_b):
res_b = np.inf
res_c = empyrical.omega_ratio(ret['c'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_c):
res_c = np.inf
assert isclose(ret['a'].vbt.returns.omega_ratio(
risk_free=test_risk_free, required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.omega_ratio(risk_free=test_risk_free, required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('omega_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_omega_ratio(
ret.shape[0], minp=1, risk_free=test_risk_free, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free",
[0.01, 0.02, 0.03],
)
def test_sharpe_ratio(self, test_risk_free):
res_a = empyrical.sharpe_ratio(ret['a'], risk_free=test_risk_free)
res_b = empyrical.sharpe_ratio(ret['b'], risk_free=test_risk_free)
res_c = empyrical.sharpe_ratio(ret['c'], risk_free=test_risk_free)
assert isclose(ret['a'].vbt.returns.sharpe_ratio(risk_free=test_risk_free), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.sharpe_ratio(risk_free=test_risk_free),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_sharpe_ratio(ret.shape[0], minp=1, risk_free=test_risk_free).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_deflated_sharpe_ratio(self):
pd.testing.assert_series_equal(
ret.vbt.returns.deflated_sharpe_ratio(risk_free=0.01),
pd.Series([np.nan, np.nan, 0.0005355605507117676], index=ret.columns).rename('deflated_sharpe_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.deflated_sharpe_ratio(risk_free=0.03),
pd.Series([np.nan, np.nan, 0.0003423112350834066], index=ret.columns).rename('deflated_sharpe_ratio')
)
@pytest.mark.parametrize(
"test_required_return",
[0.01, 0.02, 0.03],
)
def test_downside_risk(self, test_required_return):
res_a = empyrical.downside_risk(ret['a'], required_return=test_required_return)
res_b = empyrical.downside_risk(ret['b'], required_return=test_required_return)
res_c = empyrical.downside_risk(ret['c'], required_return=test_required_return)
assert isclose(ret['a'].vbt.returns.downside_risk(required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.downside_risk(required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('downside_risk')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_downside_risk(
ret.shape[0], minp=1, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_required_return",
[0.01, 0.02, 0.03],
)
def test_sortino_ratio(self, test_required_return):
res_a = empyrical.sortino_ratio(ret['a'], required_return=test_required_return)
res_b = empyrical.sortino_ratio(ret['b'], required_return=test_required_return)
res_c = empyrical.sortino_ratio(ret['c'], required_return=test_required_return)
assert isclose(ret['a'].vbt.returns.sortino_ratio(required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.sortino_ratio(required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('sortino_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_sortino_ratio(
ret.shape[0], minp=1, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_information_ratio(self):
res_a = empyrical.excess_sharpe(ret['a'], benchmark_rets['a'])
res_b = empyrical.excess_sharpe(ret['b'], benchmark_rets['b'])
res_c = empyrical.excess_sharpe(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.information_ratio(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.information_ratio(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('information_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_information_ratio(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_beta(self):
res_a = empyrical.beta(ret['a'], benchmark_rets['a'])
res_b = empyrical.beta(ret['b'], benchmark_rets['b'])
res_c = empyrical.beta(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.beta(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.beta(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('beta')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_beta(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free",
[0.01, 0.02, 0.03],
)
def test_alpha(self, test_risk_free):
res_a = empyrical.alpha(ret['a'], benchmark_rets['a'], risk_free=test_risk_free)
res_b = empyrical.alpha(ret['b'], benchmark_rets['b'], risk_free=test_risk_free)
res_c = empyrical.alpha(ret['c'], benchmark_rets['c'], risk_free=test_risk_free)
assert isclose(ret['a'].vbt.returns.alpha(benchmark_rets['a'], risk_free=test_risk_free), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.alpha(benchmark_rets, risk_free=test_risk_free),
| pd.Series([res_a, res_b, res_c], index=ret.columns) | pandas.Series |
import glob
import logging
import os
import sys
from enum import Enum
import geopandas as gpd
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s || %(levelname)s || %(name)s || %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger("COVID19")
cwd = os.path.dirname(os.path.abspath(__file__))
class COVIDerror(Exception):
pass
class DataUrls(Enum):
zip_to_city_broken = (
"https://public.opendatasoft.com/explore/dataset/us-zip-code-latitude-and-longitude"
"/download/?format=csv&timezone=America/New_York"
"&lang=en&use_labels_for_header=true&csv_separator=%3B"
)
zip_to_city = (
"https://public.opendatasoft.com/explore/dataset"
"/georef-united-states-of-america-zc-point/download"
"/?format=csv&timezone=America/New_York&lang=en&use_labels_for_header=true&csv_separator=%3B"
)
maryland_zip_population = (
"https://www.maryland-demographics.com/zip_codes_by_population"
)
geoshape = (
"https://www2.census.gov/geo/tiger/TIGER2019/ZCTA5/tl_2019_us_zcta510.zip"
)
zip_covid_data_old = "https://opendata.arcgis.com/datasets/5f459467ee7a4ffda968139011f06c46_0.geojson"
zip_covid_data = (
"https://services.arcgis.com/njFNhDsUCentVYJW/arcgis/rest/services/MDCOVID19_MASTER_ZIP_CODE_CASES"
"/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json"
)
class Data:
def __init__(self, state="MD", datadir="./data"):
# data and URL path
self.state = state
self.data_path = datadir
self.zip_map_url = DataUrls.zip_to_city.value
self.population_url = DataUrls.maryland_zip_population.value
self.geo_shape_url = DataUrls.geoshape.value
self.MD_zip_data_url = DataUrls.zip_covid_data.value
# actual reading dat
self.geo = None
self.zip_map = None
self.zip_covid = None
self.zip_population = None
def get(self, use_db=False):
"""
fill in data
"""
if use_db:
self.read_zip_COVID_web()
else:
self.read_zip_COVID()
self.read_map()
self.read_zip_map()
self.zip_codes = self.zip_map.Zip
self.read_population()
def _download(self, zipfile):
logger.info("Downloading: %s to %s" % (self.geo_shape_url, zipfile))
r = requests.get(self.geo_shape_url, stream=True)
total_size = int(r.headers.get("content-length", 0))
block_size = 1024 # 1 Kibibyte
t = tqdm(total=total_size, unit="iB", unit_scale=True)
with open(zipfile, "wb") as zf:
for data in r.iter_content(block_size):
t.update(len(data))
zf.write(data)
t.close()
if total_size == 0 and t.n != total_size:
logger.error("Failed downloading")
sys.exit()
else:
logger.info("Downloaded %s" % zipfile)
def download_zipfile(self, zipfile):
"""
get the zip file for geo information
"""
self._download(zipfile)
command = "unzip %s -d %s" % (zipfile, self.data_path)
logger.info(command)
os.system(command)
logger.info("unzipped %s" % zipfile)
def read_map(self):
"""
read geoshape of zip codes
"""
zipfile = self.data_path + "/tl_2019_us_zcta510.zip"
shapefile = zipfile.replace(".zip", ".shp")
if not os.path.isfile(shapefile):
self.download_zipfile(zipfile)
# out = gpd.read_file('zip://' + zipfile) \
self.geo = (
gpd.read_file(shapefile)
.rename(columns={"ZCTA5CE10": "Zip"})
.assign(Zip=lambda d: d.Zip.astype(int))
)
logger.info("Loaded geo shape")
def read_zip_COVID_web(self):
"""
cases count per zip code per day
"""
self.zip_covid = (
gpd.read_file(self.MD_zip_data_url)
.pipe(lambda d: d[~pd.isnull(d.ZIP_CODE)])
.drop(["OBJECTID", "geometry"], axis=1)
.pipe(pd.DataFrame)
.pipe(
pd.melt,
id_vars=["ZIP_CODE"],
var_name="Date",
value_name="Cases",
)
.assign(Cases=lambda d: d.Cases.fillna(0))
.assign(
Date=lambda d: d.Date.str.extract(
"([0-9]+_[0-9]+_[0-9]+)$", expand=False
)
)
.assign(Date=lambda d: pd.to_datetime(d.Date, format="%m_%d_%Y"))
.assign(ZIP_CODE=lambda d: d.ZIP_CODE.where(d.ZIP_CODE != "21802", "21804"))
.groupby(["ZIP_CODE", "Date"], as_index=False)
.agg({"Cases": "sum"})
.assign(ZIP_CODE=lambda d: d.ZIP_CODE.astype(int))
.rename(columns={"ZIP_CODE": "Zip"})
)
min_date = str(self.zip_covid.Date.min().date())
max_date = str(self.zip_covid.Date.max().date())
logger.info("Loaded %s to %s" % (min_date, max_date))
def read_zip_COVID(self):
"""
cases count per zip code per day
"""
logger.info("Using data from %s" % self.data_path)
covid_data = {}
data_files = glob.glob(self.data_path + "/*.tsv")
if len(data_files) == 0:
raise COVIDerror("No data from %s" % self.data_path)
data_files.sort()
logger.info("Latest file: %s" % data_files[-1])
for i, csv in enumerate(data_files):
date = os.path.basename(csv.replace(".tsv", ""))
covid_data[date] = pd.read_csv(
csv,
names=["Zip", "Cases"],
sep="\t",
dtype={"Zip": "Int64", "Cases": "str"},
).assign(Cases=lambda d: d.Cases.str.replace(" Cases", "").astype(int))
self.zip_covid = pd.concat(
date_data.assign(Date=date) for date, date_data in covid_data.items()
).assign(Date=lambda d: pd.to_datetime(d.Date, format="%Y-%m-%d"))
logger.info("Loaded daily COVID cases (%i days)" % (i + 1))
def read_zip_map(self):
"""
zip city information
"""
self.zip_map = (
| pd.read_csv(self.zip_map_url, sep=";") | pandas.read_csv |
import os
import random
import pytest
import pathlib
import tempfile
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from cytominer_eval.transform import metric_melt
from cytominer_eval.transform.util import set_pair_ids
from cytominer_eval.operations.util import assign_replicates, calculate_precision_recall
random.seed(123)
tmpdir = tempfile.gettempdir()
example_file = "SQ00015054_normalized_feature_select.csv.gz"
example_file = pathlib.Path(
"{file}/../../example_data/compound/{eg}".format(
file=os.path.dirname(__file__), eg=example_file
)
)
df = | pd.read_csv(example_file) | pandas.read_csv |
# encoding: utf-8
##################################################
# This script shows how to create animated plots using matplotlib and a basic dataset
# Multiple tutorials inspired the current design but they mostly came from:
# hhttps://towardsdatascience.com/how-to-create-animated-graphs-in-python-bb619cc2dec1
# Note: the project keeps updating every course almost yearly
##################################################
#
##################################################
# Author: <NAME>
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: <NAME>
# Email: <EMAIL>
# Status: development
##################################################
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
# We need to import numpy and matplotlib library
# importing libraries
import pandas as pd
import seaborn as sns
# Read files and prepare data
data = pd.read_csv('../data/2021_seguiment-covid19-bcn.csv')
#data = pd.read_csv('https://opendata-ajuntament.barcelona.cat/data/dataset/4f3ffbda-d5be-4f2a-a836-26a77be6df1a/resource/f627ac0a-d05f-416d-9773-eeb464a3fc44/download')
data.columns = ['date_indicator', 'frequency_indicator', 'place', 'name_indicator',
'name_variable', 'value', 'unit', 'source']
# We will use two datasets to generate plots
data_daily = data[data['name_indicator'] == 'Casos de COVID-19 a Barcelona (diari)']
data_accumulated = data[data['name_indicator'] == 'Casos de COVID-19 a Barcelona (acumulat)']
# We need the data to be in time format to calculate values in days after day zero
data_daily.loc[:, 'date_indicator'] = pd.to_datetime(data_daily['date_indicator'])
initial_day = data_daily['date_indicator'].min()
data_daily.loc[:, 'day_after_zero'] = data_daily['date_indicator'] - initial_day
data_daily.loc[:, 'day_after_zero'] = data_daily['day_after_zero']/np.timedelta64(1, 'D')
# We need the data to be in time format to calculate values in days after day zero
data_accumulated.loc[:, 'date_indicator'] = | pd.to_datetime(data_accumulated['date_indicator']) | pandas.to_datetime |
from django.contrib import messages
from django.shortcuts import render
from . forms import ApprovalForm
from . models import Approvals
import pickle
from keras import backend as K
import joblib
import numpy as np
from sklearn import preprocessing
import pandas as pd
from collections import defaultdict, Counter
""" ONE HOT ENCODED FUNCTION"""
def ohevalue(df):
ohe_col = joblib.load("/Users/pro/Documents/python/MyDjangoProjects/Portfolio/loanapp/ohe_column.pkl")
cat_columns=['Gender','Married','Education','Self_Employed','Property_Area']
df_processed = pd.get_dummies(df, columns=cat_columns)
newdict={}
for i in ohe_col:
if i in df_processed.columns:
newdict[i]=df_processed[i].values
else:
newdict[i]=0
newdf= | pd.DataFrame(newdict) | pandas.DataFrame |
from pathlib import Path
import copy
import pickle as pkl
from mmap import mmap
from scipy import stats as st
from scipy.stats._continuous_distns import FitDataError
import torch
from sklearn import svm
from sklearn import linear_model
import pandas as pd
import seaborn as sns
import warnings
import numpy as np
import os
import matplotlib.colors as mcolors
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from mpl_toolkits.mplot3d.art3d import juggle_axes
from matplotlib.ticker import MaxNLocator
from joblib import Memory
import math
import lyap
import model_loader_utils as loader
import initialize_and_train as train
import utils
memory = Memory(location='./memoization_cache', verbose=2)
# memory.clear()
## Functions for computing means and error bars for the plots. 68% confidence
# intervals and means are currently
# implemented in this code. The commented out code is for using a gamma
# distribution to compute these, but uses a
# custom version of seaborn plotting library to plot.
def orth_proj(v):
n = len(v)
vv = v.reshape(-1, 1)
return torch.eye(n) - ([email protected])/(v@v)
USE_ERRORBARS = True
# USE_ERRORBARS = False
LEGEND = False
# LEGEND = True
folder_root = '../results/figs/'
def ci_acc(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=1.,
shift=-.0001, reflect=True)
return bounds[1], bounds[0]
# ci_acc = 68
# ci_acc = 95
def est_acc(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=1.,
shift=-.0001, reflect=True)
return median
# est_acc = "mean"
def ci_dim(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=.9999)
return bounds[1], bounds[0]
# ci_dim = 68
# ci_dim = 95
def est_dim(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=.9999)
return median
# est_dim = "mean"
def point_replace(a_string):
a_string = str(a_string)
return a_string.replace(".", "p")
def get_color(x, cmap=plt.cm.plasma):
"""Get normalized color assignments based on input data x and colormap
cmap."""
mag = torch.max(x) - torch.min(x)
x_norm = (x.float() - torch.min(x))/mag
return cmap(x_norm)
def median_and_bound(samples, perc_bound, dist_type='gamma', loc=0., shift=0,
reflect=False):
"""Get median and probability mass intervals for a gamma distribution fit
of samples."""
samples = np.array(samples)
def do_reflect(x, center):
return -1*(x - center) + center
if dist_type == 'gamma':
if np.sum(samples[0] == samples) == len(samples):
median = samples[0]
interval = [samples[0], samples[0]]
return median, interval
if reflect:
samples_reflected = do_reflect(samples, loc)
shape_ps, loc_fit, scale = st.gamma.fit(samples_reflected,
floc=loc + shift)
median_reflected = st.gamma.median(shape_ps, loc=loc, scale=scale)
interval_reflected = np.array(
st.gamma.interval(perc_bound, shape_ps, loc=loc, scale=scale))
median = do_reflect(median_reflected, loc)
interval = do_reflect(interval_reflected, loc)
else:
shape_ps, loc, scale = st.gamma.fit(samples, floc=loc + shift)
median = st.gamma.median(shape_ps, loc=loc, scale=scale)
interval = np.array(
st.gamma.interval(perc_bound, shape_ps, loc=loc, scale=scale))
else:
raise ValueError("Distribution option (dist_type) not recognized.")
return median, interval
## Set parameters for figure aesthetics
plt.rcParams['font.size'] = 6
plt.rcParams['font.size'] = 6
plt.rcParams['lines.markersize'] = 1
plt.rcParams['lines.linewidth'] = 1
plt.rcParams['axes.labelsize'] = 7
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.titlesize'] = 8
# Colormaps
class_style = 'color'
cols11 = np.array([90, 100, 170])/255
cols12 = np.array([37, 50, 120])/255
cols21 = np.array([250, 171, 62])/255
cols22 = np.array([156, 110, 35])/255
cmap_activation_pnts = mcolors.ListedColormap([cols11, cols21])
cmap_activation_pnts_edge = mcolors.ListedColormap([cols12, cols22])
rasterized = False
dpi = 800
ext = 'pdf'
# Default figure size
figsize = (1.5, 1.2)
ax_pos = (0, 0, 1, 1)
def make_fig(figsize=figsize, ax_pos=ax_pos):
"""Create figure."""
fig = plt.figure(figsize=figsize)
ax = fig.add_axes(ax_pos)
return fig, ax
def out_fig(fig, figname, subfolder='', show=False, save=True, axis_type=0,
name_order=0, data=None):
""" Save figure."""
folder = Path(folder_root)
figname = point_replace(figname)
# os.makedirs('../results/figs/', exist_ok=True)
os.makedirs(folder, exist_ok=True)
ax = fig.axes[0]
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_rasterized(rasterized)
if axis_type == 1:
ax.tick_params(axis='both', which='both',
# both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
left=False, top=False,
# ticks along the top edge are off
labelbottom=False,
labelleft=False) # labels along the bottom edge are off
elif axis_type == 2:
ax.axis('off')
if name_order == 0:
fig_path = folder/subfolder/figname
else:
fig_path = folder/subfolder/figname
if save:
os.makedirs(folder/subfolder, exist_ok=True)
fig_file = fig_path.with_suffix('.' + ext)
print(f"Saving figure to {fig_file}")
fig.savefig(fig_file, dpi=dpi, transparent=True, bbox_inches='tight')
if show:
fig.tight_layout()
fig.show()
if data is not None:
os.makedirs(folder/subfolder/'data/', exist_ok=True)
with open(folder/subfolder/'data/{}_data'.format(figname),
'wb') as fid:
pkl.dump(data, fid, protocol=4)
plt.close('all')
def autocorrelation(train_params, figname='autocorrelation'):
train_params_loc = train_params.copy()
model, params, run_dir = train.initialize_and_train(**train_params_loc)
class_datasets = params['datasets']
# val_loss = params['history']['losses']['val']
# val_losses[i0, i1] = val_loss
# val_acc = params['history']['accuracies']['val']
# val_accs[i0, i1] = val_acc
train_samples_per_epoch = len(class_datasets['train'])
class_datasets['train'].max_samples = 10
torch.manual_seed(params['model_seed'])
X = class_datasets['train'][:][0]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif train_params_loc['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# X = utils.extend_input(X, 10)
loader.load_model_from_epoch_and_dir(model, run_dir, -1)
hid = []
hid += model.get_post_activations(X)[:-1]
# auto_corr_mean = []
# auto_corr_var = []
auto_corr_table = pd.DataFrame(columns=['t_next', 'autocorr'])
h = hid[0]
for i0 in range(len(hid)):
h_next = hid[i0]
overlap = torch.sum(h*h_next, dim=1)
norms_h = torch.sqrt(torch.sum(h**2, dim=1))
norms_h_next = torch.sqrt(torch.sum(h_next**2, dim=1))
corrs = overlap/(norms_h*norms_h_next)
avg_corr = torch.mean(corrs)
d = {'t_next': i0, 'autocorr': corrs}
auto_corr_table = auto_corr_table.append(pd.DataFrame(d),
ignore_index=True)
fig, ax = make_fig(figsize)
sns.lineplot(ax=ax, x='t_next', y='autocorr', data=auto_corr_table)
out_fig(fig, figname)
def snapshots_through_time(train_params, figname="snap", subdir="snaps"):
"""
Plot PCA snapshots of the representation through time.
Parameters
----------
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training.
"""
subdir = Path(subdir)
X_dim = train_params['X_dim']
FEEDFORWARD = train_params['network'] == 'feedforward'
num_pnts_dim_red = 800
num_plot = 600
train_params_loc = copy.deepcopy(train_params)
model, params, run_dir = train.initialize_and_train(**train_params_loc)
class_datasets = params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(train_params_loc['model_seed'])
X, Y = class_datasets['train'][:]
if FEEDFORWARD:
T = 10
y = Y
X0 = X
else:
T = 30
# T = 100
X = utils.extend_input(X, T + 2)
X0 = X[:, 0]
y = Y[:, -1]
loader.load_model_from_epoch_and_dir(model, run_dir, 0, 0)
hid_0 = [X0]
hid_0 += model.get_post_activations(X)[:-1]
loader.load_model_from_epoch_and_dir(model, run_dir,
train_params_loc['num_epochs'], 0)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
if FEEDFORWARD:
r = model.layer_weights[-1].detach().clone().T
else:
r = model.Wout.detach().clone()
# r0_n = r[0] / torch.norm(r[0])
# r1_n = r[1] / torch.norm(r[1])
#
# r0_n_v = r0_n.reshape(r0_n.shape[0], 1)
# r1_n_v = r1_n.reshape(r1_n.shape[0], 1)
# r0_orth = torch.eye(len(r0_n)) - r0_n_v @ r0_n_v.T
# r1_orth = torch.eye(len(r1_n)) - r1_n_v @ r1_n_v.T
# h = hid[10]
# # h_proj = h @ r_orth
# u, s, v = torch.svd(h)
# v0 = v[:, 0]
# def orth_projector(v):
# n = len(v)
# return (torch.eye(n) - v.reshape(n, 1)@v.reshape(1, n))/(v@v)
# v0_orth = (torch.eye(n) - v0.reshape(n,1)@v0.reshape(1,n))/(v0@v0)
# h_v0_orth = h @ v0_orth
# r0_e_p = orth_projector(r0_e)
# r1_e_p = orth_projector(r1_e)
# h_r0_e_p0 = h[y] @ r0_e_p
# h_r0_e_p1 = h[y] @ r1_e_p
coloring = get_color(y, cmap_activation_pnts)[:num_plot]
edge_coloring = get_color(y, cmap_activation_pnts_edge)[:num_plot]
## Now get principal components (pcs) and align them from time point to
# time point
pcs = []
p_track = 0
norm = np.linalg.norm
projs = []
for i1 in range(1, len(hid)):
# pc = utils.get_pcs_covariance(hid[i1], [0, 1])
out = utils.get_pcs_covariance(hid[i1], [0, 1], return_extra=True)
pc = out['pca_projection']
mu = out['mean']
proj = out['pca_projectors']
mu_proj = mu@proj[:, :2]
if i1 > 0:
# Check for the best alignment
pc_flip_x = pc.clone()
pc_flip_x[:, 0] = -pc_flip_x[:, 0]
pc_flip_y = pc.clone()
pc_flip_y[:, 1] = -pc_flip_y[:, 1]
pc_flip_both = pc.clone()
pc_flip_both[:, 0] = -pc_flip_both[:, 0]
pc_flip_both[:, 1] = -pc_flip_both[:, 1]
difference0 = norm(p_track - pc)
difference1 = norm(p_track - pc_flip_x)
difference2 = norm(p_track - pc_flip_y)
difference3 = norm(p_track - pc_flip_both)
amin = np.argmin(
[difference0, difference1, difference2, difference3])
if amin == 1:
pc[:, 0] = -pc[:, 0]
proj[:, 0] = -proj[:, 0]
elif amin == 2:
pc[:, 1] = -pc[:, 1]
proj[:, 1] = -proj[:, 1]
elif amin == 3:
pc[:, 0] = -pc[:, 0]
pc[:, 1] = -pc[:, 1]
proj[:, 0] = -proj[:, 0]
proj[:, 1] = -proj[:, 1]
pc = pc + mu_proj
p_track = pc.clone()
pcs.append(pc[:num_plot])
projs.append(proj)
def take_snap(i0, scats, fig, dim=2, border=False):
# ax = fig.axes[0]
hid_pcs_plot = pcs[i0][:, :dim].numpy()
xm = np.min(hid_pcs_plot[:, 0])
xM = np.max(hid_pcs_plot[:, 0])
ym = np.min(hid_pcs_plot[:, 1])
yM = np.max(hid_pcs_plot[:, 1])
xc = (xm + xM)/2
yc = (ym + yM)/2
hid_pcs_plot[:, 0] = hid_pcs_plot[:, 0] - xc
hid_pcs_plot[:, 1] = hid_pcs_plot[:, 1] - yc
v = projs[i0]
# u, s, v = torch.svd(h)
if r.shape[0] == 2:
r0_p = r[0]@v
r1_p = r[1]@v
else:
r0_p = r.flatten()@v
r1_p = -r.flatten()@v
if class_style == 'shape':
scats[0][0].set_offsets(hid_pcs_plot)
else:
if dim == 3:
scat._offsets3d = juggle_axes(*hid_pcs_plot[:, :dim].T, 'z')
scat._offsets3d = juggle_axes(*hid_pcs_plot[:, :dim].T, 'z')
else:
scats[0].set_offsets(hid_pcs_plot)
scats[1].set_offsets(r0_p[:2].reshape(1, 2))
scats[2].set_offsets(r1_p[:2].reshape(1, 2))
xm = np.min(hid_pcs_plot[:, 0])
xM = np.max(hid_pcs_plot[:, 0])
ym = np.min(hid_pcs_plot[:, 1])
yM = np.max(hid_pcs_plot[:, 1])
max_extent = max(xM - xm, yM - ym)
max_extent_arg = xM - xm > yM - ym
if dim == 2:
x_factor = .4
if max_extent_arg:
ax.set_xlim(
[xm - x_factor*max_extent, xM + x_factor*max_extent])
ax.set_ylim([xm - .1*max_extent, xM + .1*max_extent])
else:
ax.set_xlim(
[ym - x_factor*max_extent, yM + x_factor*max_extent])
ax.set_ylim([ym - .1*max_extent, yM + .1*max_extent])
else:
if max_extent_arg:
ax.set_xlim([xm - .1*max_extent, xM + .1*max_extent])
ax.set_ylim([xm - .1*max_extent, xM + .1*max_extent])
ax.set_zlim([xm - .1*max_extent, xM + .1*max_extent])
else:
ax.set_xlim([ym - .1*max_extent, yM + .1*max_extent])
ax.set_ylim([ym - .1*max_extent, yM + .1*max_extent])
ax.set_zlim([ym - .1*max_extent, yM + .1*max_extent])
# ax.plot([r0_p[0]], [r0_p[1]], 'x', markersize=3, color='black')
# ax.plot([r1_p[0]], [r1_p[1]], 'x', markersize=3, color='black')
ax.set_ylim([-4, 4])
if dim == 3:
out_fig(fig, f"{figname}_{i0}",
subfolder=subdir, axis_type=0,
name_order=1)
else:
out_fig(fig, f"{figname}_{i0}",
subfolder=subdir, axis_type=0,
name_order=1)
return scats,
dim = 2
hid_pcs_plot = pcs[0]
if dim == 3:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim([-10, 10])
ax.set_ylim([-10, 10])
ax.set_zlim([-10, 10])
else:
fig, ax = make_fig()
ax.grid(False)
scat1 = ax.scatter(*hid_pcs_plot[:num_plot, :dim].T, c=coloring,
edgecolors=edge_coloring, s=10, linewidths=.65)
ax.plot([0], [0], 'x', markersize=7)
scat2 = ax.scatter([0], [0], marker='x', s=3, c='black')
scat3 = ax.scatter([0], [0], marker='x', s=3, color='black')
scats = [scat1, scat2, scat3]
# ax.plot([0], [0], 'o', markersize=10)
if FEEDFORWARD:
snap_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
else:
snap_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 21, 26,
31]) # snap_idx = list(range(T + 1))
for i0 in snap_idx:
take_snap(i0, scats, fig, dim=dim, border=False)
print
def _cluster_holdout_test_acc_stat_fun(h, y, clust_identity,
classifier_type='logistic_regression',
num_repeats=5, train_ratio=0.8, seed=11):
np.random.seed(seed)
num_clusts = np.max(clust_identity) + 1
num_clusts_train = int(round(num_clusts*train_ratio))
num_samples = h.shape[0]
test_accs = np.zeros(num_repeats)
train_accs = np.zeros(num_repeats)
for i0 in range(num_repeats):
permutation = np.random.permutation(np.arange(len(clust_identity)))
perm_inv = np.argsort(permutation)
clust_identity_shuffled = clust_identity[permutation]
train_idx = clust_identity_shuffled <= num_clusts_train
test_idx = clust_identity_shuffled > num_clusts_train
hid_train = h[train_idx[perm_inv]]
y_train = y[train_idx[perm_inv]]
y_test = y[test_idx[perm_inv]]
hid_test = h[test_idx[perm_inv]]
if classifier_type == 'svm':
classifier = svm.LinearSVC(random_state=3*i0 + 1)
else:
classifier = linear_model.LogisticRegression(random_state=3*i0 + 1,
solver='lbfgs')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
classifier.fit(hid_train, y_train)
train_accs[i0] = classifier.score(hid_train, y_train)
test_accs[i0] = classifier.score(hid_test, y_test)
return train_accs, test_accs
def clust_holdout_over_layers(seeds, gs, train_params,
figname="clust_holdout_over_layers"):
"""
Logistic regression training and testing error on the representation
through the layers. Compares networks trained
with different choices of g_radius (specified by input parameter gs).
Parameters
----------
seeds : List[int]
List of random number seeds to use for generating instantiations of
the model and dataset. Variation over
these seeds is used to plot error bars.
gs : List[float]
Values of g_radius to iterate over.
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training. Value of g_radius
is overwritten by values in gs.
figname : str
Name of the figure to save.
"""
if not hasattr(gs, '__len__'):
gs = [gs]
layer_label = 'layer'
@memory.cache
def generate_data_table_clust(seeds, gs, train_params):
layer_label = 'layer'
clust_acc_table = pd.DataFrame(
columns=['seed', 'g_radius', 'training', layer_label, 'LR training',
'LR testing'])
train_params_loc = copy.deepcopy(train_params)
for i0, seed in enumerate(seeds):
for i1, g in enumerate(gs):
train_params_loc['g_radius'] = g
train_params_loc['model_seed'] = seed
num_pnts_dim_red = 500
model, params, run_dir = train.initialize_and_train(
**train_params_loc)
class_datasets = params['datasets']
num_train_samples = len(class_datasets['train'])
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(params['model_seed'])
X, Y = class_datasets['train'][:]
if train_params_loc['network'] == 'feedforward':
X0 = X
else:
X0 = X[:, 0]
for epoch, epoch_label in zip([0, -1], ['before', 'after']):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
if len(Y.shape) > 1:
Y = Y[:, -1]
cluster_identity = class_datasets['train'].cluster_identity
ds = []
for lay, h in enumerate(hid):
stat = _cluster_holdout_test_acc_stat_fun(h.numpy(),
Y.numpy(),
cluster_identity)
ds.extend([{
'seed': seed, 'g_radius': g,
'training': epoch_label, layer_label: lay,
'LR training': stat[0][k], 'LR testing': stat[1][k]
} for k in range(len(stat[0]))])
clust_acc_table = clust_acc_table.append( | pd.DataFrame(ds) | pandas.DataFrame |
"""
Created on Fri Fev 13 16:26:00 2020
@author: <NAME>
"""
import sys
import scipy.io as sio
from pandas import DataFrame
from numpy import array
from os import listdir
from os.path import isfile, join
from myMNE import makeMNE
def files_in_path(path):
return [path+"/"+f for f in listdir(path) if isfile(join(path, f))]
# Getting data from MNE strutuct
def data_from_mne(files):
return array([file.get_data().T for file in files])
def read_file(PATH_AUD, PATH_VIS):
path_file_aud = files_in_path(PATH_AUD)
path_file_vis = files_in_path(PATH_VIS)
# Reading files with the MNE library
files_aud = list(map(makeMNE, path_file_aud))
files_vis = list(map(makeMNE, path_file_vis))
# Getting data in numpy format
data_aud = data_from_mne(files_aud)
data_vis = data_from_mne(files_vis)
return data_aud,data_vis, files_aud[0].ch_names
def _bad_trials(file_name):
file = sio.loadmat(file_name,squeeze_me=True, struct_as_record=False)
badtrials = lambda df : df['ft_data_auditory'].badtrials
return badtrials(file)
def get_bad_trials(PATH_AUD, PATH_VIS):
path_file_aud = files_in_path(PATH_AUD)
path_file_vis = files_in_path(PATH_VIS)
bad_trials_aud = list(map(_bad_trials, path_file_aud))
bad_trials_vis = list(map(_bad_trials, path_file_vis))
df_bad_trials_aud = DataFrame([bad_trials_aud],index=['Aud'])
df_bad_trials_vis = DataFrame([bad_trials_vis],index=['Vis'])
return df_bad_trials_aud, df_bad_trials_vis
def get_bad_trials_comportamental(modality: str, N_TRIALS = 120, PATH_INFO = '../data/raw/info_'):
"""Read function to get the time (or the indice) when occurs S2.
Parameters
----------
modality: str
It will only work if the modality equals to 'aud' or 'vis'.
export_as_indice: bool
Control option for export type (indice or time)
Returns
-------
agg_by_person: np.array
TO-DO: text.
"""
# Concatenating with 'aud' or 'vis'
info_path = PATH_INFO+modality
# Mapping the files listed in the folder for reading function.
# Each file contains information about a single individual experiment.
delays_people = list(map(sio.loadmat, files_in_path(info_path)))
# Accumulator variable
agg_by_person = []
for delay_by_person in delays_people:
#import pdb; pdb.set_trace()
# Accessing value in struct from matlab
time_delay_by_person = delay_by_person['report']['all_trials_delay'][0][0][0]
# Values in second
time_reproduce_by_person = delay_by_person['report']['time_action'][0][0][0]
agg_by_person.append(time_reproduce_by_person/time_delay_by_person)
# Export as numpy for simplicity
bad_comport = [DataFrame(array(agg_by_person))[i].apply(lambda x: (
False if ((x >= 2.0) | (x < 0.5)) else True)) for i in range(N_TRIALS)]
df_bad_comport = | DataFrame(bad_comport) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/27 9:55 AM
# @Author : R
# @File : TMDB_Predict_Finally.py
# @Software: PyCharm
# coding: utf-8
# # Kaggle for TMDB
# In[1]:
import numpy as np
import pandas as pd
import warnings
from tqdm import tqdm
from datetime import datetime
from sklearn.preprocessing import LabelEncoder
from collections import Counter
warnings.filterwarnings('ignore')
# get_ipython().run_line_magic('matplotlib', 'inline')
# Data description
# id:每部电影的唯一标志
# belongs_to_collection:json格式下每部电影的tmdb id, 电影名、电影海报和电影背景的URL
# budget:电影预算,数值为0表示未知
# genres:电影风格列表,json文件,包含id、name
# homepage:电影官方主页的URL
# imdb_id:该电影在imdb数据库中的唯一id标志
# original_language:电影制作的原始语言,长度为2的字符串
# original_title:电影的原始名称,可能与belong_to_collection中的名称不同
# overview: 剧情摘要
# popularity: 电影的受欢迎程度,float数值表示
# poster_path: 电影海报的URL
# production_companies:json格式,电影制造公司的id、name
# production_countries:json格式,电影制造国家 2字符简称、全称
# release_date:电影上映时间
# runtime:电影时长
# spoken_languages:电影语言版本,json格式
# status:电影是否已经发布
# tagline: 电影的标语
# title: 电影的英文名称
# keywords:电影关键字,json格式
# cast: json格式,演员列表,包括id,name,性别等
# crew:电影制作人员的信息,包括导演,作者等
# revenue:总收入,待预测值
# # EDA
# EDA已做
# 特征工程以及预测
# 利用两个额外的数据集合
# 1.TMDB Competition Additional Features:本数据包含新的三个特征popularity2、rating、totalVotes
# 2.TMDB Competition Additional Training Data:额外的2000个训练数据,没有给定训练集中所有的属性
# In[52]:
# Feature Engineering & Prediction
# 数据预处理函数,包括将非数值型属性转化为数值型
def prepare(df):
global json_cols
global train_dict
df[['release_month', 'release_day', 'release_year']] = df['release_date'].str.split('/', expand=True).replace(
np.nan, 0).astype(int)
df['release_year'] = df['release_year']
df.loc[(df['release_year'] <= 19) & (df['release_year'] < 100), "release_year"] += 2000
df.loc[(df['release_year'] > 19) & (df['release_year'] < 100), "release_year"] += 1900
# 获取发行日期的星期、季度信息
releaseDate = pd.to_datetime(df['release_date'])
df['release_dayofweek'] = releaseDate.dt.dayofweek
df['release_quarter'] = releaseDate.dt.quarter
# 对rating、totalVotes属性进行填充
rating_na = df.groupby(["release_year", "original_language"])['rating'].mean().reset_index()
df[df.rating.isna()]['rating'] = df.merge(rating_na, how='left', on=["release_year", "original_language"])
vote_count_na = df.groupby(["release_year", "original_language"])['totalVotes'].mean().reset_index()
df[df.totalVotes.isna()]['totalVotes'] = df.merge(vote_count_na, how='left',
on=["release_year", "original_language"])
# df['rating'] = df['rating'].fillna(1.5)
# df['totalVotes'] = df['totalVotes'].fillna(6)
# 构建一个新属性,weightRating
df['weightedRating'] = (df['rating'] * df['totalVotes'] + 6.367 * 1000) / (df['totalVotes'] + 1000)
# 考虑到不同时期的面额意义不同,对其进行“通货膨胀”,通货膨胀比例为1.8%/年
df['originalBudget'] = df['budget']
df['inflationBudget'] = df['budget'] + df['budget'] * 1.8 / 100 * (
2018 - df['release_year']) # Inflation simple formula
df['budget'] = np.log1p(df['budget'])
# 对crew、cast属性中人员性别构成进行统计
df['genders_0_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 0]))
df['genders_1_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 1]))
df['genders_2_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 2]))
df['genders_0_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 0]))
df['genders_1_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 1]))
df['genders_2_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 2]))
# 对belongs_to_collection、Keywords、cast进行统计
df['_collection_name'] = df['belongs_to_collection'].apply(lambda x: x[0]['name'] if x != {} else 0)
le = LabelEncoder()
le.fit(list(df['_collection_name'].fillna('')))
df['_collection_name'] = le.transform(df['_collection_name'].fillna('').astype(str))
df['_num_Keywords'] = df['Keywords'].apply(lambda x: len(x) if x != {} else 0)
df['_num_cast'] = df['cast'].apply(lambda x: len(x) if x != {} else 0)
df['_num_crew'] = df['crew'].apply(lambda x: len(x) if x != {} else 0)
df['_popularity_mean_year'] = df['popularity'] / df.groupby("release_year")["popularity"].transform('mean')
df['_budget_runtime_ratio'] = df['budget'] / df['runtime']
df['_budget_popularity_ratio'] = df['budget'] / df['popularity']
df['_budget_year_ratio'] = df['budget'] / (df['release_year'] * df['release_year'])
df['_budget_year_ratio'] = df['budget'] / (df['release_year'] * df['release_year'])
df['_releaseYear_popularity_ratio'] = df['release_year'] / df['popularity']
df['_releaseYear_popularity_ratio2'] = df['popularity'] / df['release_year']
df['_popularity_totalVotes_ratio'] = df['totalVotes'] / df['popularity']
df['_rating_popularity_ratio'] = df['rating'] / df['popularity']
df['_rating_totalVotes_ratio'] = df['totalVotes'] / df['rating']
df['_totalVotes_releaseYear_ratio'] = df['totalVotes'] / df['release_year']
df['_budget_rating_ratio'] = df['budget'] / df['rating']
df['_runtime_rating_ratio'] = df['runtime'] / df['rating']
df['_budget_totalVotes_ratio'] = df['budget'] / df['totalVotes']
# 对是否有homepage分类
df['has_homepage'] = 1
df.loc[pd.isnull(df['homepage']), "has_homepage"] = 0
# 对belongs_to_collection是否为空分类
df['isbelongs_to_collectionNA'] = 0
df.loc[pd.isnull(df['belongs_to_collection']), "isbelongs_to_collectionNA"] = 1
# 对tagline是否为空分类
df['isTaglineNA'] = 0
df.loc[df['tagline'] == 0, "isTaglineNA"] = 1
# 对original——langues是否为English判定
df['isOriginalLanguageEng'] = 0
df.loc[df['original_language'] == "en", "isOriginalLanguageEng"] = 1
# 对电影名是否不同判定
df['isTitleDifferent'] = 1
df.loc[df['original_title'] == df['title'], "isTitleDifferent"] = 0
# 对电影是否上映判定
df['isMovieReleased'] = 1
df.loc[df['status'] != "Released", "isMovieReleased"] = 0
# 电影是否有摘要
df['isOverviewNA'] = 0
df.loc[pd.isnull(df['overview']), 'isOverviewNA'] = 1
# 获取collection id
df['collection_id'] = df['belongs_to_collection'].apply(lambda x: np.nan if len(x) == 0 else x[0]['id'])
# 对original——title等属性统计长度
df['original_title_letter_count'] = df['original_title'].str.len()
df['original_title_word_count'] = df['original_title'].str.split().str.len()
# 对title、overview、tagline统计长度或个数
df['title_word_count'] = df['title'].str.split().str.len()
df['overview_word_count'] = df['overview'].str.split().str.len()
df['tagline_word_count'] = df['tagline'].str.split().str.len()
df['len_title'] = df['title'].fillna('').apply(lambda x: len(str(x)))
# 对production_conpany、country、cast、crew、spoken_languages统计
df['production_countries_count'] = df['production_countries'].apply(lambda x: len(x))
df['production_companies_count'] = df['production_companies'].apply(lambda x: len(x))
df['cast_count'] = df['cast'].apply(lambda x: len(x))
df['crew_count'] = df['crew'].apply(lambda x: len(x))
df['spoken_languages_count'] = df['spoken_languages'].apply(lambda x: len(x))
df['genres_count'] = df['genres'].apply(lambda x: len(x))
# 进行按年分组计算均值填充
df['meanruntimeByYear'] = df.groupby("release_year")["runtime"].aggregate('mean')
df['meanPopularityByYear'] = df.groupby("release_year")["popularity"].aggregate('mean')
df['meanBudgetByYear'] = df.groupby("release_year")["budget"].aggregate('mean')
df['meantotalVotesByYear'] = df.groupby("release_year")["totalVotes"].aggregate('mean')
df['meanTotalVotesByRating'] = df.groupby("rating")["totalVotes"].aggregate('mean')
df['medianBudgetByYear'] = df.groupby("release_year")["budget"].aggregate('median')
####################################################################################
df['_popularity_theatrical_ratio'] = df['theatrical'] / df['popularity']
df['_budget_theatrical_ratio'] = df['budget'] / df['theatrical']
# runtime
df['runtime_cat_min_60'] = df['runtime'].apply(lambda x: 1 if (x <= 60) else 0)
df['runtime_cat_61_80'] = df['runtime'].apply(lambda x: 1 if (x > 60) & (x <= 80) else 0)
df['runtime_cat_81_100'] = df['runtime'].apply(lambda x: 1 if (x > 80) & (x <= 100) else 0)
df['runtime_cat_101_120'] = df['runtime'].apply(lambda x: 1 if (x > 100) & (x <= 120) else 0)
df['runtime_cat_121_140'] = df['runtime'].apply(lambda x: 1 if (x > 120) & (x <= 140) else 0)
df['runtime_cat_141_170'] = df['runtime'].apply(lambda x: 1 if (x > 140) & (x <= 170) else 0)
df['runtime_cat_171_max'] = df['runtime'].apply(lambda x: 1 if (x >= 170) else 0)
lang = df['original_language']
df_more_17_samples = [x[0] for x in Counter(pd.DataFrame(lang).stack()).most_common(17)]
for col in df_more_17_samples:
df[col] = df['original_language'].apply(lambda x: 1 if x == col else 0)
for col in range(1, 12):
df['month' + str(col)] = df['release_month'].apply(lambda x: 1 if x == col else 0)
# feature engeneering : Release date per quarter one hot encoding
for col in range(1, 4):
df['quarter' + str(col)] = df['release_quarter'].apply(lambda x: 1 if x == col else 0)
for col in range(1, 7):
df['dayofweek' + str(col)] = df['release_dayofweek'].apply(lambda x: 1 if x == col else 0)
# 新加入属性
df['is_release_day_of_1'] = 0
df.loc[df['release_day'] == 1, 'is_release_day_of_1'] = 1
df['is_release_day_of_15'] = 0
df.loc[df['release_day'] == 15, 'is_release_day_of_15'] = 1
# 新属性加入
# df['popularity2'] = np.log1p(df['popularity2'])
# df['popularity'] = np.log1p(df['popularity'])
# for col in range(1, 32):
# df['release_day' + str(col)] = df['release_day'].apply(lambda x: 1 if x == col else 0)
df['is_release_day_of_31'] = 0
df.loc[df['release_day'] == 31, 'is_release_day_of_15'] = 1
# popularity
df['popularity_cat_25'] = df['popularity'].apply(lambda x: 1 if (x <= 25) else 0)
df['popularity_cat_26_50'] = df['popularity'].apply(lambda x: 1 if (x > 25) & (x <= 50) else 0)
df['popularity_cat_51_100'] = df['popularity'].apply(lambda x: 1 if (x > 50) & (x <= 100) else 0)
df['popularity_cat_101_150'] = df['popularity'].apply(lambda x: 1 if (x > 100) & (x <= 150) else 0)
df['popularity_cat_151_200'] = df['popularity'].apply(lambda x: 1 if (x > 150) & (x <= 200) else 0)
df['popularity_cat_201_max'] = df['popularity'].apply(lambda x: 1 if (x >= 200) else 0)
df['_runtime_totalVotes_ratio'] = df['runtime'] / df['totalVotes']
df['_runtime_popularity_ratio'] = df['runtime'] / df['popularity']
#
df['_rating_theatrical_ratio'] = df['theatrical'] / df['rating']
df['_totalVotes_theatrical_ratio'] = df['theatrical'] / df['totalVotes']
df['_budget_mean_year'] = df['budget'] / df.groupby("release_year")["budget"].transform('mean')
df['_runtime_mean_year'] = df['runtime'] / df.groupby("release_year")["runtime"].transform('mean')
df['_rating_mean_year'] = df['rating'] / df.groupby("release_year")["rating"].transform('mean')
df['_totalVotes_mean_year'] = df['totalVotes'] / df.groupby("release_year")["totalVotes"].transform('mean')
###############################################################
# 对某些json属性,具有多个值的,进行类似‘one-hot编码’
for col in ['genres', 'production_countries', 'spoken_languages', 'production_companies','Keywords']:
df[col] = df[col].map(lambda x: sorted(
list(set([n if n in train_dict[col] else col + '_etc' for n in [d['name'] for d in x]])))).map(
lambda x: ','.join(map(str, x)))
temp = df[col].str.get_dummies(sep=',')
df = pd.concat([df, temp], axis=1, sort=False)
# 删除非数值属性和暂时未提出有用信息的属性
df.drop(['genres_etc'], axis=1, inplace=True)
df = df.drop(['id', 'revenue', 'belongs_to_collection', 'genres', 'homepage', 'imdb_id', 'overview', 'runtime'
, 'poster_path', 'production_companies', 'production_countries', 'release_date', 'spoken_languages'
, 'status', 'title', 'Keywords', 'cast', 'crew', 'original_language', 'original_title', 'tagline',
'collection_id'
], axis=1)
# 填充缺失值
df.fillna(value=0.0, inplace=True)
return df
# 对train中的某些数据手动处理
# 处理包括budget、revenue
# 对budget远小于revenue的情况统计,对其进行处理
# 处理原则,对于可以查询到的信息,进行真实数据填充,否则取当年同期同类型电影的均值
train = pd.read_csv('train.csv')
train.loc[train['id'] == 16, 'revenue'] = 192864 # Skinning
train.loc[train['id'] == 90, 'budget'] = 30000000 # Sommersby
train.loc[train['id'] == 118, 'budget'] = 60000000 # Wild Hogs
train.loc[train['id'] == 149, 'budget'] = 18000000 # Beethoven
train.loc[train['id'] == 313, 'revenue'] = 12000000 # The Cookout
train.loc[train['id'] == 451, 'revenue'] = 12000000 # Chasing Liberty
train.loc[train['id'] == 464, 'budget'] = 20000000 # Parenthood
train.loc[train['id'] == 470, 'budget'] = 13000000 # The Karate Kid, Part II
train.loc[train['id'] == 513, 'budget'] = 930000 # From Prada to Nada
train.loc[train['id'] == 797, 'budget'] = 8000000 # Welcome to Dongmakgol
train.loc[train['id'] == 819, 'budget'] = 90000000 # Alvin and the Chipmunks: The Road Chip
train.loc[train['id'] == 850, 'budget'] = 90000000 # Modern Times
train.loc[train['id'] == 1007, 'budget'] = 2 # Zyzzyx Road
train.loc[train['id'] == 1112, 'budget'] = 7500000 # An Officer and a Gentleman
train.loc[train['id'] == 1131, 'budget'] = 4300000 # Smokey and the Bandit
train.loc[train['id'] == 1359, 'budget'] = 10000000 # Stir Crazy
train.loc[train['id'] == 1542, 'budget'] = 1 # All at Once
train.loc[train['id'] == 1570, 'budget'] = 15800000 # Crocodile Dundee II
train.loc[train['id'] == 1571, 'budget'] = 4000000 # Lady and the Tramp
train.loc[train['id'] == 1714, 'budget'] = 46000000 # The Recruit
train.loc[train['id'] == 1721, 'budget'] = 17500000 # Cocoon
train.loc[train['id'] == 1865, 'revenue'] = 25000000 # Scooby-Doo 2: Monsters Unleashed
train.loc[train['id'] == 1885, 'budget'] = 12 # In the Cut
train.loc[train['id'] == 2091, 'budget'] = 10 # Deadfall
train.loc[train['id'] == 2268, 'budget'] = 17500000 # Madea Goes to Jail budget
train.loc[train['id'] == 2491, 'budget'] = 6 # Never Talk to Strangers
train.loc[train['id'] == 2602, 'budget'] = 31000000 # Mr. Holland's Opus
train.loc[train['id'] == 2612, 'budget'] = 15000000 # Field of Dreams
train.loc[train['id'] == 2696, 'budget'] = 10000000 # Nurse 3-D
train.loc[train['id'] == 2801, 'budget'] = 10000000 # Fracture
train.loc[train['id'] == 335, 'budget'] = 2
train.loc[train['id'] == 348, 'budget'] = 12
train.loc[train['id'] == 470, 'budget'] = 13000000
train.loc[train['id'] == 513, 'budget'] = 1100000
train.loc[train['id'] == 640, 'budget'] = 6
train.loc[train['id'] == 696, 'budget'] = 1
train.loc[train['id'] == 797, 'budget'] = 8000000
train.loc[train['id'] == 850, 'budget'] = 1500000
train.loc[train['id'] == 1199, 'budget'] = 5
train.loc[train['id'] == 1282, 'budget'] = 9 # Death at a Funeral
train.loc[train['id'] == 1347, 'budget'] = 1
train.loc[train['id'] == 1755, 'budget'] = 2
train.loc[train['id'] == 1801, 'budget'] = 5
train.loc[train['id'] == 1918, 'budget'] = 592
train.loc[train['id'] == 2033, 'budget'] = 4
train.loc[train['id'] == 2118, 'budget'] = 344
train.loc[train['id'] == 2252, 'budget'] = 130
train.loc[train['id'] == 2256, 'budget'] = 1
train.loc[train['id'] == 2696, 'budget'] = 10000000
# test异常处理
test = pd.read_csv('test.csv')
# Clean Data
test.loc[test['id'] == 6733, 'budget'] = 5000000
test.loc[test['id'] == 3889, 'budget'] = 15000000
test.loc[test['id'] == 6683, 'budget'] = 50000000
test.loc[test['id'] == 5704, 'budget'] = 4300000
test.loc[test['id'] == 6109, 'budget'] = 281756
test.loc[test['id'] == 7242, 'budget'] = 10000000
test.loc[test['id'] == 7021, 'budget'] = 17540562 # Two Is a Family
test.loc[test['id'] == 5591, 'budget'] = 4000000 # The Orphanage
test.loc[test['id'] == 4282, 'budget'] = 20000000 # Big Top Pee-wee
test.loc[test['id'] == 3033, 'budget'] = 250
test.loc[test['id'] == 3051, 'budget'] = 50
test.loc[test['id'] == 3084, 'budget'] = 337
test.loc[test['id'] == 3224, 'budget'] = 4
test.loc[test['id'] == 3594, 'budget'] = 25
test.loc[test['id'] == 3619, 'budget'] = 500
test.loc[test['id'] == 3831, 'budget'] = 3
test.loc[test['id'] == 3935, 'budget'] = 500
test.loc[test['id'] == 4049, 'budget'] = 995946
test.loc[test['id'] == 4424, 'budget'] = 3
test.loc[test['id'] == 4460, 'budget'] = 8
test.loc[test['id'] == 4555, 'budget'] = 1200000
test.loc[test['id'] == 4624, 'budget'] = 30
test.loc[test['id'] == 4645, 'budget'] = 500
test.loc[test['id'] == 4709, 'budget'] = 450
test.loc[test['id'] == 4839, 'budget'] = 7
test.loc[test['id'] == 3125, 'budget'] = 25
test.loc[test['id'] == 3142, 'budget'] = 1
test.loc[test['id'] == 3201, 'budget'] = 450
test.loc[test['id'] == 3222, 'budget'] = 6
test.loc[test['id'] == 3545, 'budget'] = 38
test.loc[test['id'] == 3670, 'budget'] = 18
test.loc[test['id'] == 3792, 'budget'] = 19
test.loc[test['id'] == 3881, 'budget'] = 7
test.loc[test['id'] == 3969, 'budget'] = 400
test.loc[test['id'] == 4196, 'budget'] = 6
test.loc[test['id'] == 4221, 'budget'] = 11
test.loc[test['id'] == 4222, 'budget'] = 500
test.loc[test['id'] == 4285, 'budget'] = 11
test.loc[test['id'] == 4319, 'budget'] = 1
test.loc[test['id'] == 4639, 'budget'] = 10
test.loc[test['id'] == 4719, 'budget'] = 45
test.loc[test['id'] == 4822, 'budget'] = 22
test.loc[test['id'] == 4829, 'budget'] = 20
test.loc[test['id'] == 4969, 'budget'] = 20
test.loc[test['id'] == 5021, 'budget'] = 40
test.loc[test['id'] == 5035, 'budget'] = 1
test.loc[test['id'] == 5063, 'budget'] = 14
test.loc[test['id'] == 5119, 'budget'] = 2
test.loc[test['id'] == 5214, 'budget'] = 30
test.loc[test['id'] == 5221, 'budget'] = 50
test.loc[test['id'] == 4903, 'budget'] = 15
test.loc[test['id'] == 4983, 'budget'] = 3
test.loc[test['id'] == 5102, 'budget'] = 28
test.loc[test['id'] == 5217, 'budget'] = 75
test.loc[test['id'] == 5224, 'budget'] = 3
test.loc[test['id'] == 5469, 'budget'] = 20
test.loc[test['id'] == 5840, 'budget'] = 1
test.loc[test['id'] == 5960, 'budget'] = 30
test.loc[test['id'] == 6506, 'budget'] = 11
test.loc[test['id'] == 6553, 'budget'] = 280
test.loc[test['id'] == 6561, 'budget'] = 7
test.loc[test['id'] == 6582, 'budget'] = 218
test.loc[test['id'] == 6638, 'budget'] = 5
test.loc[test['id'] == 6749, 'budget'] = 8
test.loc[test['id'] == 6759, 'budget'] = 50
test.loc[test['id'] == 6856, 'budget'] = 10
test.loc[test['id'] == 6858, 'budget'] = 100
test.loc[test['id'] == 6876, 'budget'] = 250
test.loc[test['id'] == 6972, 'budget'] = 1
test.loc[test['id'] == 7079, 'budget'] = 8000000
test.loc[test['id'] == 7150, 'budget'] = 118
test.loc[test['id'] == 6506, 'budget'] = 118
test.loc[test['id'] == 7225, 'budget'] = 6
test.loc[test['id'] == 7231, 'budget'] = 85
test.loc[test['id'] == 5222, 'budget'] = 5
test.loc[test['id'] == 5322, 'budget'] = 90
test.loc[test['id'] == 5350, 'budget'] = 70
test.loc[test['id'] == 5378, 'budget'] = 10
test.loc[test['id'] == 5545, 'budget'] = 80
test.loc[test['id'] == 5810, 'budget'] = 8
test.loc[test['id'] == 5926, 'budget'] = 300
test.loc[test['id'] == 5927, 'budget'] = 4
test.loc[test['id'] == 5986, 'budget'] = 1
test.loc[test['id'] == 6053, 'budget'] = 20
test.loc[test['id'] == 6104, 'budget'] = 1
test.loc[test['id'] == 6130, 'budget'] = 30
test.loc[test['id'] == 6301, 'budget'] = 150
test.loc[test['id'] == 6276, 'budget'] = 100
test.loc[test['id'] == 6473, 'budget'] = 100
test.loc[test['id'] == 6842, 'budget'] = 30
release_dates = pd.read_csv('release_dates_per_country.csv')
release_dates['id'] = range(1,7399)
release_dates.drop(['original_title','title'],axis = 1,inplace = True)
release_dates.index = release_dates['id']
train = pd.merge(train, release_dates, how='left', on=['id'])
test = pd.merge(test, release_dates, how='left', on=['id'])
test['revenue'] = np.nan
# 将从TMDB下载的其他特征进行合并
train = pd.merge(train, pd.read_csv('TrainAdditionalFeatures.csv'),
how='left', on=['imdb_id'])
test = pd.merge(test, | pd.read_csv('TestAdditionalFeatures.csv') | pandas.read_csv |
"""
Cascaded Convolution Model
- <NAME> (ps2958)
- <NAME> (jw3468)
"""
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing import text, sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.models import Model, Input
from keras.layers import Embedding, Dense, TimeDistributed, Concatenate, BatchNormalization
from keras.layers import Bidirectional, Activation, Dropout, CuDNNGRU, Conv1D
from sklearn.model_selection import train_test_split, KFold
from keras.metrics import categorical_accuracy
from keras import backend as K
from keras.regularizers import l1, l2
import tensorflow as tf
### Data Retrieval
# cb6133 = np.load("../data/cb6133.npy")
cb6133filtered = np.load("../data/cb6133filtered.npy")
cb513 = np.load("../data/cb513.npy")
print()
# print(cb6133.shape)
print(cb6133filtered.shape)
print(cb513.shape)
maxlen_seq = r = 700 # protein residues padded to 700
f = 57 # number of features for each residue
residue_list = list('ACEDGFIHKMLNQPSRTWVYX') + ['NoSeq']
q8_list = list('LBEGIHST') + ['NoSeq']
columns = ["id", "len", "input", "profiles", "expected"]
def get_data(arr, bounds=None):
if bounds is None: bounds = range(len(arr))
data = [None for i in bounds]
for i in bounds:
seq, q8, profiles = '', '', []
for j in range(r):
jf = j*f
# Residue convert from one-hot to decoded
residue_onehot = arr[i,jf+0:jf+22]
residue = residue_list[np.argmax(residue_onehot)]
# Q8 one-hot encoded to decoded structure symbol
residue_q8_onehot = arr[i,jf+22:jf+31]
residue_q8 = q8_list[np.argmax(residue_q8_onehot)]
if residue == 'NoSeq': break # terminating sequence symbol
nc_terminals = arr[i,jf+31:jf+33] # nc_terminals = [0. 0.]
sa = arr[i,jf+33:jf+35] # sa = [0. 0.]
profile = arr[i,jf+35:jf+57] # profile features
seq += residue # concat residues into amino acid sequence
q8 += residue_q8 # concat secondary structure into secondary structure sequence
profiles.append(profile)
data[i] = [str(i+1), len(seq), seq, np.array(profiles), q8]
return | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
from fbprophet import Prophet
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
df = pd.read_pickle('all_trips.pkl')
df.head()
# Day of Week
df['dow'] = df.trip_start_time.dt.day_name()
df['hour'] = df.trip_start_time.dt.hour
sns.set_style("darkgrid")
ax = sns.FacetGrid(data=df.groupby(
['dow', 'hour']
).hour.count().to_frame(
name='day_hour_count').reset_index(), col='dow', col_order=[
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday',
'Sunday'],
col_wrap=4)
ax.map(sns.barplot, 'hour', 'day_hour_count')
# Predict
daily = df.set_index('trip_start_time').groupby(pd.Grouper(freq='D')).size()
daily = | pd.DataFrame(daily) | pandas.DataFrame |
from flowsa.common import WITHDRAWN_KEYWORD
from flowsa.flowbyfunctions import assign_fips_location_system
from flowsa.location import US_FIPS
import math
import pandas as pd
import io
from flowsa.settings import log
from string import digits
YEARS_COVERED = {
"asbestos": "2014-2018",
"barite": "2014-2018",
"bauxite": "2013-2017",
"beryllium": "2014-2018",
"boron": "2014-2018",
"chromium": "2014-2018",
"clay": "2015-2016",
"cobalt": "2013-2017",
"copper": "2011-2015",
"diatomite": "2014-2018",
"feldspar": "2013-2017",
"fluorspar": "2013-2017",
"fluorspar_inports": ["2016", "2017"],
"gallium": "2014-2018",
"garnet": "2014-2018",
"gold": "2013-2017",
"graphite": "2013-2017",
"gypsum": "2014-2018",
"iodine": "2014-2018",
"ironore": "2014-2018",
"kyanite": "2014-2018",
"lead": "2012-2018",
"lime": "2014-2018",
"lithium": "2013-2017",
"magnesium": "2013-2017",
"manganese": "2012-2016",
"manufacturedabrasive": "2017-2018",
"mica": "2014-2018",
"molybdenum": "2014-2018",
"nickel": "2012-2016",
"niobium": "2014-2018",
"peat": "2014-2018",
"perlite": "2013-2017",
"phosphate": "2014-2018",
"platinum": "2014-2018",
"potash": "2014-2018",
"pumice": "2014-2018",
"rhenium": "2014-2018",
"salt": "2013-2017",
"sandgravelconstruction": "2013-2017",
"sandgravelindustrial": "2014-2018",
"silver": "2012-2016",
"sodaash": "2010-2017",
"sodaash_t4": ["2016", "2017"],
"stonecrushed": "2013-2017",
"stonedimension": "2013-2017",
"strontium": "2014-2018",
"talc": "2013-2017",
"titanium": "2013-2017",
"tungsten": "2013-2017",
"vermiculite": "2014-2018",
"zeolites": "2014-2018",
"zinc": "2013-2017",
"zirconium": "2013-2017",
}
def usgs_myb_year(years, current_year_str):
"""
Sets the column for the string based on the year. Checks that the year
you picked is in the last file.
:param years: string, with hypthon
:param current_year_str: string, year of interest
:return: string, year
"""
years_array = years.split("-")
lower_year = int(years_array[0])
upper_year = int(years_array[1])
current_year = int(current_year_str)
if lower_year <= current_year <= upper_year:
column_val = current_year - lower_year + 1
return "year_" + str(column_val)
else:
log.info("Your year is out of scope. Pick a year between %s and %s",
lower_year, upper_year)
def usgs_myb_name(USGS_Source):
"""
Takes the USGS source name and parses it so it can be used in other parts
of Flow by activity.
:param USGS_Source: string, usgs source name
:return:
"""
source_split = USGS_Source.split("_")
name_cc = str(source_split[2])
name = ""
for char in name_cc:
if char.isupper():
name = name + " " + char
else:
name = name + char
name = name.lower()
name = name.strip()
return name
def usgs_myb_static_variables():
"""
Populates the data values for Flow by activity that are the same
for all of USGS_MYB Files
:return:
"""
data = {}
data["Class"] = "Geological"
data['FlowType'] = "ELEMENTARY_FLOWS"
data["Location"] = US_FIPS
data["Compartment"] = "ground"
data["Context"] = None
data["ActivityConsumedBy"] = None
return data
def usgs_myb_remove_digits(value_string):
"""
Eliminates numbers in a string
:param value_string:
:return:
"""
remove_digits = str.maketrans('', '', digits)
return_string = value_string.translate(remove_digits)
return return_string
def usgs_myb_url_helper(*, build_url, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param args: dictionary, arguments specified when running flowbyactivity.py
flowbyactivity.py ('year' and 'source')
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
return [build_url]
def usgs_asbestos_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['asbestos'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_asbestos_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
product = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Exports and reexports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(dataframe,
str(year))
return dataframe
def usgs_barite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(
io.BytesIO(resp.content), sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['barite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_barite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Crude, sold or used by producers:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_bauxite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['bauxite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_bauxite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['bauxite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, as shipped:":
prod = "import"
elif df.iloc[index]["Production"].strip() == \
"Exports, as shipped:":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
flow_amount = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = flow_amount
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_beryllium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:9]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['beryllium'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_beryllium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["United States6", "Mine shipments1",
"Imports for consumption, beryl2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['beryllium'], year)
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"Imports for consumption, beryl2":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_boron_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data.loc[8:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = pd.DataFrame(df_raw_data.loc[21:22]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_three = pd.DataFrame(df_raw_data.loc[27:28]).reindex()
df_data_three = df_data_three.reset_index()
del df_data_three["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
df_data_two.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
df_data_three.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['boron'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
del df_data_two[col]
del df_data_three[col]
frames = [df_data_one, df_data_two, df_data_three]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_boron_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["B2O3 content", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['boron'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "B2O3 content" or \
df.iloc[index]["Production"].strip() == "Quantity":
product = "production"
if df.iloc[index]["Production"].strip() == "Colemanite:4":
des = "Colemanite"
elif df.iloc[index]["Production"].strip() == "Ulexite:4":
des = "Ulexite"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if des == name:
data['FlowName'] = name + " " + product
else:
data['FlowName'] = name + " " + product + " " + des
data["Description"] = des
data["ActivityProducedBy"] = name
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_chromium_call(*, resp, year, **_):
""""
Convert response for calling url to pandas dataframe,
begin parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:24]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
elif len(df_data. columns) == 13:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['chromium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_chromium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Secondary2", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Imports:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Secondary2":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_clay_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data_ball = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T3')
df_data_ball = pd.DataFrame(df_raw_data_ball.loc[19:19]).reindex()
df_data_ball = df_data_ball.reset_index()
del df_data_ball["index"]
df_raw_data_bentonite = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4 ')
df_data_bentonite = pd.DataFrame(
df_raw_data_bentonite.loc[28:28]).reindex()
df_data_bentonite = df_data_bentonite.reset_index()
del df_data_bentonite["index"]
df_raw_data_common = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T5 ')
df_data_common = pd.DataFrame(df_raw_data_common.loc[40:40]).reindex()
df_data_common = df_data_common.reset_index()
del df_data_common["index"]
df_raw_data_fire = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T6 ')
df_data_fire = pd.DataFrame(df_raw_data_fire.loc[12:12]).reindex()
df_data_fire = df_data_fire.reset_index()
del df_data_fire["index"]
df_raw_data_fuller = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7 ')
df_data_fuller = pd.DataFrame(df_raw_data_fuller.loc[17:17]).reindex()
df_data_fuller = df_data_fuller.reset_index()
del df_data_fuller["index"]
df_raw_data_kaolin = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8 ')
df_data_kaolin = pd.DataFrame(df_raw_data_kaolin.loc[18:18]).reindex()
df_data_kaolin = df_data_kaolin.reset_index()
del df_data_kaolin["index"]
df_raw_data_export = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T13')
df_data_export = pd.DataFrame(df_raw_data_export.loc[6:15]).reindex()
df_data_export = df_data_export.reset_index()
del df_data_export["index"]
df_raw_data_import = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T14')
df_data_import = pd.DataFrame(df_raw_data_import.loc[6:13]).reindex()
df_data_import = df_data_import.reset_index()
del df_data_import["index"]
df_data_ball.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_bentonite.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_common.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_fire.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_fuller.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_kaolin.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_export.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
df_data_import.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
df_data_ball["type"] = "Ball clay"
df_data_bentonite["type"] = "Bentonite"
df_data_common["type"] = "Common clay"
df_data_fire["type"] = "Fire clay"
df_data_fuller["type"] = "Fuller’s earth"
df_data_kaolin["type"] = "Kaolin"
df_data_export["type"] = "export"
df_data_import["type"] = "import"
col_to_use = ["Production", "type"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['clay'], year))
for col in df_data_import.columns:
if col not in col_to_use:
del df_data_import[col]
del df_data_export[col]
for col in df_data_ball.columns:
if col not in col_to_use:
del df_data_ball[col]
del df_data_bentonite[col]
del df_data_common[col]
del df_data_fire[col]
del df_data_fuller[col]
del df_data_kaolin[col]
frames = [df_data_import, df_data_export, df_data_ball, df_data_bentonite,
df_data_common, df_data_fire, df_data_fuller, df_data_kaolin]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_clay_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Ball clay", "Bentonite", "Fire clay", "Kaolin",
"Fuller’s earth", "Total", "Grand total",
"Artificially activated clay and earth",
"Clays, not elsewhere classified",
"Clays, not elsewhere classified"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["type"].strip() == "import":
product = "imports"
elif df.iloc[index]["type"].strip() == "export":
product = "exports"
else:
product = "production"
if str(df.iloc[index]["Production"]).strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if product == "production":
data['FlowName'] = \
df.iloc[index]["type"].strip() + " " + product
data["Description"] = df.iloc[index]["type"].strip()
data["ActivityProducedBy"] = df.iloc[index]["type"].strip()
else:
data['FlowName'] = \
df.iloc[index]["Production"].strip() + " " + product
data["Description"] = df.iloc[index]["Production"].strip()
data["ActivityProducedBy"] = \
df.iloc[index]["Production"].strip()
col_name = usgs_myb_year(YEARS_COVERED['clay'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)" or \
str(df.iloc[index][col_name]) == "(2)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_cobalt_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:11]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[23:23]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "space_6", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['cobalt'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_cobalt_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["United Statese, 16, 17", "Mine productione",
"Imports for consumption", "Exports"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"United Statese, 16, 17":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Exports":
prod = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['cobalt'], year)
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
remove_rows = ["(18)", "(2)"]
if data["FlowAmount"] not in remove_rows:
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_copper_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[30:31]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
df_data_2.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Unit"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['copper'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_copper_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
if product == "Total":
prod = "production"
elif product == "Exports, refined":
prod = "exports"
elif product == "Imports, refined":
prod = "imports"
data["ActivityProducedBy"] = "Copper; Mine"
data['FlowName'] = name + " " + prod
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['copper'], year)
data["Description"] = "Copper; Mine"
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_diatomite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) == 10:
df_data_one.columns = ["Production", "year_1", "space_2", "year_2",
"space_3", "year_3", "space_4", "year_4",
"space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['diatomite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_diatomite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports2", "Imports for consumption2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports2":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption2":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand metric tons"
col_name = usgs_myb_year(YEARS_COVERED['diatomite'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_feldspar_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_two = pd.DataFrame(df_raw_data_two.loc[4:8]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_one = pd.DataFrame(df_raw_data_two.loc[10:15]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_two. columns) == 13:
df_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['feldspar'], year))
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
del df_data_one[col]
frames = [df_data_two, df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_feldspar_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports, feldspar:4":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:4":
prod = "imports"
elif df.iloc[index]["Production"].strip() == \
"Production, feldspar:e, 2":
prod = "production"
elif df.iloc[index]["Production"].strip() == "Nepheline syenite:":
prod = "production"
des = "Nepheline syenite"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['feldspar'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
if name == des:
data['FlowName'] = name + " " + prod
else:
data['FlowName'] = name + " " + prod + " " + des
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_fluorspar_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
if year in YEARS_COVERED['fluorspar_inports']:
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T2')
df_raw_data_three = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7')
df_raw_data_four = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
df_data_one = pd.DataFrame(df_raw_data_one.loc[5:15]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if year in YEARS_COVERED['fluorspar_inports']:
df_data_two = | pd.DataFrame(df_raw_data_two.loc[7:8]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import gc
import random
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import os
import time
def MCMAE(output,targets,stds):
maes=[]
for i in range(output.shape[1]):
mae=torch.abs(output[:,i]-targets[:,i]).mean()*stds[i]
maes.append(mae)
maes=torch.stack(maes).mean()
return maes
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_lr(optimizer):
for param_group in optimizer.param_groups:
lr=param_group['lr']
return lr
class lr_AIAYN():
'''
Learning rate scheduler from the paper:
Attention is All You Need
'''
def __init__(self,optimizer,d_model,warmup_steps=4000,factor=1):
self.optimizer=optimizer
self.d_model=d_model
self.warmup_steps=warmup_steps
self.step_num=0
self.factor=factor
def step(self):
self.step_num+=1
lr=self.d_model**-0.5*np.min([self.step_num**-0.5,
self.step_num*self.warmup_steps**-1.5])*self.factor
update_lr(self.optimizer,lr)
return lr
def add_features(df):
#df['area'] = df['time_step'] * df['u_in']
#df['area'] = df.groupby('breath_id')['area'].cumsum()
df['u_in_cumsum'] = (df['u_in']).groupby(df['breath_id']).cumsum()
# fast area calculation
df['time_delta'] = df['time_step'].diff()
df['time_delta'].fillna(0, inplace=True)
df['time_delta'].mask(df['time_delta'] < 0, 0, inplace=True)
df['tmp'] = df['time_delta'] * df['u_in']
df['area_true'] = df.groupby('breath_id')['tmp'].cumsum()
df['tmp'] = df['u_out']*(-1)+1 # inversion of u_out
df['u_in_lag1'] = df.groupby('breath_id')['u_in'].shift(1)
#df['u_out_lag1'] = df.groupby('breath_id')['u_out'].shift(1)
df['u_in_lag_back1'] = df.groupby('breath_id')['u_in'].shift(-1)
#df['u_out_lag_back1'] = df.groupby('breath_id')['u_out'].shift(-1)
df['u_in_lag2'] = df.groupby('breath_id')['u_in'].shift(2)
#df['u_out_lag2'] = df.groupby('breath_id')['u_out'].shift(2)
df['u_in_lag_back2'] = df.groupby('breath_id')['u_in'].shift(-2)
#df['u_out_lag_back2'] = df.groupby('breath_id')['u_out'].shift(-2)
df['u_in_lag3'] = df.groupby('breath_id')['u_in'].shift(3)
#df['u_out_lag3'] = df.groupby('breath_id')['u_out'].shift(3)
df['u_in_lag_back3'] = df.groupby('breath_id')['u_in'].shift(-3)
#df['u_out_lag_back3'] = df.groupby('breath_id')['u_out'].shift(-3)
df['u_in_lag4'] = df.groupby('breath_id')['u_in'].shift(4)
#df['u_out_lag4'] = df.groupby('breath_id')['u_out'].shift(4)
df['u_in_lag_back4'] = df.groupby('breath_id')['u_in'].shift(-4)
#df['u_out_lag_back4'] = df.groupby('breath_id')['u_out'].shift(-4)
df = df.fillna(0)
df['breath_id__u_in__max'] = df.groupby(['breath_id'])['u_in'].transform('max')
#df['breath_id__u_out__max'] = df.groupby(['breath_id'])['u_out'].transform('max')
df['u_in_diff1'] = df['u_in'] - df['u_in_lag1']
#df['u_out_diff1'] = df['u_out'] - df['u_out_lag1']
df['u_in_diff2'] = df['u_in'] - df['u_in_lag2']
#df['u_out_diff2'] = df['u_out'] - df['u_out_lag2']
df['breath_id__u_in__diffmax'] = df.groupby(['breath_id'])['u_in'].transform('max') - df['u_in']
df['breath_id__u_in__diffmean'] = df.groupby(['breath_id'])['u_in'].transform('mean') - df['u_in']
df['breath_id__u_in__diffmax'] = df.groupby(['breath_id'])['u_in'].transform('max') - df['u_in']
df['breath_id__u_in__diffmean'] = df.groupby(['breath_id'])['u_in'].transform('mean') - df['u_in']
df['u_in_diff3'] = df['u_in'] - df['u_in_lag3']
#df['u_out_diff3'] = df['u_out'] - df['u_out_lag3']
df['u_in_diff4'] = df['u_in'] - df['u_in_lag4']
#df['u_out_diff4'] = df['u_out'] - df['u_out_lag4']
#df['cross']= df['u_in']*df['u_out']
#df['cross2']= df['time_step']*df['u_out']
df['R'] = df['R'].astype(str)
df['C'] = df['C'].astype(str)
df['R__C'] = df["R"].astype(str) + '__' + df["C"].astype(str)
df = | pd.get_dummies(df) | pandas.get_dummies |
import os
import numpy as np
import pandas as pd
import pytest
from janitor.testing_utils import date_data
TEST_DATA_DIR = "tests/test_data"
EXAMPLES_DIR = "examples/"
@pytest.fixture
def dataframe():
data = {
"a": [1, 2, 3] * 3,
"Bell__Chart": [1.234_523_45, 2.456_234, 3.234_612_5] * 3,
"decorated-elephant": [1, 2, 3] * 3,
"animals@#$%^": ["rabbit", "leopard", "lion"] * 3,
"cities": ["Cambridge", "Shanghai", "Basel"] * 3,
}
df = | pd.DataFrame(data) | pandas.DataFrame |
import argparse
from datetime import datetime
import pandas as pd
import re
import os
from tabulate import tabulate
from ast import literal_eval
import numpy as np
def init_data(
) -> pd.DataFrame:
"""
Return
-------
Plan: pd.DataFrame. Item of the planner
Notes
-------
Reads the plan from the file in "pwd/../data/data.csv" and initialise it
into the plan pandas DataFrame. If either "data" folder or "data.csv" or both
of them do not exist, it creates this file.
"""
# Features of the plan
features = ["title", "note", "date", "tags"]
# Initialise the plan as dataframe object
plan = pd.DataFrame(columns=features)
# finding the current directory
loc_dir = os.path.abspath(os.getcwd())
# moving to the parent folder and into "data" folder
dir_path = os.path.abspath(os.path.join(loc_dir, "..", "data"))
# path to "data.csv" file
data_path = os.path.abspath(os.path.join(dir_path, "data.csv"))
# If the folder does not exist yet
if not os.path.exists(dir_path):
os.mkdir(dir_path)
plan.to_csv(data_path, index=False)
# If the folder already exists
else:
# If "data.csv" does not exist yet
if not os.path.exists(data_path):
plan.to_csv(data_path, index=False)
# If "data.csv" already exists
else:
plan = pd.read_csv(data_path, index_col=False)
# returning plan
return plan
def update_data(
plan: pd.DataFrame
):
"""
Parameters
----------
plan: pd.DataFrame. Contains all the notes
Notes
----
This function takes in input the updated version of plan and
overwrite the existing local copy in "Data/data.csv"
"""
# finding the current directory
loc_dir = os.path.abspath(os.getcwd())
# moving to the parent directory and into "data" folder
dir_path = os.path.abspath(os.path.join(loc_dir, "..", "data"))
# path to the "data.csv" file
data_path = os.path.abspath(os.path.join(dir_path, "data.csv"))
# Sorting the dictionary by date
plan["date"] = pd.to_datetime(plan["date"], format = '%Y-%m-%d', errors='coerce')
plan["date"] = plan["date"].dt.date
plan = plan.sort_values(by="date")
# overwriting data
plan.to_csv(data_path, index=False)
pass
def add_note(
args: argparse.Namespace, # parser arguments
plan: pd.DataFrame # DataFrame to be updated
):
"""
Parameters
----------
args: argparse.Namespace. Contains the arguments "title", "note" and "date"
plan: pd.DataFrame. Contains all the notes
Returns
-------
update_plan: pd.DataFrame with the added note
Notes
-----
This function adds a new note to the existing planner.
Warnings
--------
This function must be updated everytime the columns of the plan are changed
"""
item = {}
for name in plan.columns:
if str(name) != "tags":
item[str(name)] = vars(args)[str(name)]
# these three lines insert a list in the pd.DataFrame. ATTENTION: it is stored as a string
# they are needed because pd.DataFrame can't be initialized with nested data
item["tags"] = "..."
data = pd.DataFrame(item, index=[0])
data.at[0, "tags"] = vars(args)[str("tags")] # use literal_eval('[1.23, 2.34]') to read this data
plan = plan.append(data)
update_data(plan)
def add_note_verbose(
plan: pd.DataFrame # DataFrame to be updated
):
"""
Parameters
----------
plan: pd.DataFrame
Returns
-------
plan: pd.DataFrame with the added note
Notes
-----
This function adds a new note to the existing planner.
It uses an input/output interface; this is more convenient to use with larger notes or notes with tags.
Warnings
--------
This function must be updated everytime the columns of the plan are changed
"""
item = {} # initializing the new note
# title
title = input("Please, insert the title: ")
item["title"] = title
# body
note = input("It's time to write your note: ")
item["note"] = note
# date
date = input("Insert the date 'Y-m-d'. Press Enter to use the current date: ")
if date == '': # insert the current data if requested
date = datetime.today().strftime('%Y-%m-%d')
item["date"] = date
# tags
tags = input("Insert the tags (separated by a space or a comma): ")
# these three lines insert a list in the pd.DataFrame. ATTENTION: it is stored as a string.
# they are needed because pd.DataFrame can't be initialized with nested data
item["tags"] = "..."
data_bug = | pd.DataFrame(item, index=[0]) | pandas.DataFrame |
import copy
import re
from textwrap import dedent
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
jinja2 = pytest.importorskip("jinja2")
from pandas.io.formats.style import ( # isort:skip
Styler,
)
from pandas.io.formats.style_render import (
_get_level_lengths,
_get_trimming_maximums,
maybe_convert_css_to_tuples,
non_reducing_slice,
)
@pytest.fixture
def mi_df():
return DataFrame(
[[1, 2], [3, 4]],
index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
dtype=int,
)
@pytest.fixture
def mi_styler(mi_df):
return Styler(mi_df, uuid_len=0)
@pytest.fixture
def mi_styler_comp(mi_styler):
# comprehensively add features to mi_styler
mi_styler = mi_styler._copy(deepcopy=True)
mi_styler.css = {**mi_styler.css, **{"row": "ROW", "col": "COL"}}
mi_styler.uuid_len = 5
mi_styler.uuid = "abcde"
mi_styler.set_caption("capt")
mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}])
mi_styler.hide(axis="columns")
mi_styler.hide([("c0", "c1_a")], axis="columns", names=True)
mi_styler.hide(axis="index")
mi_styler.hide([("i0", "i1_a")], axis="index", names=True)
mi_styler.set_table_attributes('class="box"')
mi_styler.format(na_rep="MISSING", precision=3)
mi_styler.format_index(precision=2, axis=0)
mi_styler.format_index(precision=4, axis=1)
mi_styler.highlight_max(axis=None)
mi_styler.applymap_index(lambda x: "color: white;", axis=0)
mi_styler.applymap_index(lambda x: "color: black;", axis=1)
mi_styler.set_td_classes(
DataFrame(
[["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns
)
)
mi_styler.set_tooltips(
DataFrame(
[["a2", "b2"], ["a2", "c2"]],
index=mi_styler.index,
columns=mi_styler.columns,
)
)
return mi_styler
@pytest.mark.parametrize(
"sparse_columns, exp_cols",
[
(
True,
[
{"is_visible": True, "attributes": 'colspan="2"', "value": "c0"},
{"is_visible": False, "attributes": "", "value": "c0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "c0"},
{"is_visible": True, "attributes": "", "value": "c0"},
],
),
],
)
def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols):
exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"}
exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"}
ctx = mi_styler._translate(True, sparse_columns)
assert exp_cols[0].items() <= ctx["head"][0][2].items()
assert exp_cols[1].items() <= ctx["head"][0][3].items()
assert exp_l1_c0.items() <= ctx["head"][1][2].items()
assert exp_l1_c1.items() <= ctx["head"][1][3].items()
@pytest.mark.parametrize(
"sparse_index, exp_rows",
[
(
True,
[
{"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"},
{"is_visible": False, "attributes": "", "value": "i0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "i0"},
{"is_visible": True, "attributes": "", "value": "i0"},
],
),
],
)
def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows):
exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"}
exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"}
ctx = mi_styler._translate(sparse_index, True)
assert exp_rows[0].items() <= ctx["body"][0][0].items()
assert exp_rows[1].items() <= ctx["body"][1][0].items()
assert exp_l1_r0.items() <= ctx["body"][0][1].items()
assert exp_l1_r1.items() <= ctx["body"][1][1].items()
def test_mi_styler_sparsify_options(mi_styler):
with pd.option_context("styler.sparse.index", False):
html1 = mi_styler.to_html()
with pd.option_context("styler.sparse.index", True):
html2 = mi_styler.to_html()
assert html1 != html2
with pd.option_context("styler.sparse.columns", False):
html1 = mi_styler.to_html()
with pd.option_context("styler.sparse.columns", True):
html2 = mi_styler.to_html()
assert html1 != html2
@pytest.mark.parametrize(
"rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn",
[
(100, 100, 100, None, None, 12, 6), # reduce to (12, 6) < 100 elements
(1000, 3, 750, None, None, 250, 3), # dynamically reduce rows to 250, keep cols
(4, 1000, 500, None, None, 4, 125), # dynamically reduce cols to 125, keep rows
(1000, 3, 750, 10, None, 10, 3), # overwrite above dynamics with max_row
(4, 1000, 500, None, 5, 4, 5), # overwrite above dynamics with max_col
(100, 100, 700, 50, 50, 25, 25), # rows cols below given maxes so < 700 elmts
],
)
def test_trimming_maximum(rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn):
rn, cn = _get_trimming_maximums(
rn, cn, max_els, max_rows, max_cols, scaling_factor=0.5
)
assert (rn, cn) == (exp_rn, exp_cn)
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_rows", 3),
],
)
def test_render_trimming_rows(option, val):
# test auto and specific trimming of rows
df = DataFrame(np.arange(120).reshape(60, 2))
with pd.option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 3 # index + 2 data cols
assert len(ctx["body"]) == 4 # 3 data rows + trimming row
assert len(ctx["body"][0]) == 3 # index + 2 data cols
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_columns", 2),
],
)
def test_render_trimming_cols(option, val):
# test auto and specific trimming of cols
df = DataFrame(np.arange(30).reshape(3, 10))
with pd.option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 4 # index + 2 data cols + trimming col
assert len(ctx["body"]) == 3 # 3 data rows
assert len(ctx["body"][0]) == 4 # index + 2 data cols + trimming col
def test_render_trimming_mi():
midx = MultiIndex.from_product([[1, 2], [1, 2, 3]])
df = DataFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx)
with pd.option_context("styler.render.max_elements", 4):
ctx = df.style._translate(True, True)
assert len(ctx["body"][0]) == 5 # 2 indexes + 2 data cols + trimming row
assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items()
assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items()
assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
assert len(ctx["body"]) == 3 # 2 data rows + trimming row
assert len(ctx["head"][0]) == 5 # 2 indexes + 2 column headers + trimming col
assert {"attributes": 'colspan="2"'}.items() <= ctx["head"][0][2].items()
def test_render_empty_mi():
# GH 43305
df = DataFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"]))
expected = dedent(
"""\
>
<thead>
<tr>
<th class="index_name level0" > </th>
<th class="index_name level1" >one</th>
</tr>
</thead>
"""
)
assert expected in df.style.to_html()
@pytest.mark.parametrize("comprehensive", [True, False])
@pytest.mark.parametrize("render", [True, False])
@pytest.mark.parametrize("deepcopy", [True, False])
def test_copy(comprehensive, render, deepcopy, mi_styler, mi_styler_comp):
styler = mi_styler_comp if comprehensive else mi_styler
styler.uuid_len = 5
s2 = copy.deepcopy(styler) if deepcopy else copy.copy(styler) # make copy and check
assert s2 is not styler
if render:
styler.to_html()
excl = [
"na_rep", # deprecated
"precision", # deprecated
"cellstyle_map", # render time vars..
"cellstyle_map_columns",
"cellstyle_map_index",
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
if not deepcopy: # check memory locations are equal for all included attributes
for attr in [a for a in styler.__dict__ if (not callable(a) and a not in excl)]:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else: # check memory locations are different for nested or mutable vars
shallow = [
"data",
"columns",
"index",
"uuid_len",
"uuid",
"caption",
"cell_ids",
"hide_index_",
"hide_columns_",
"hide_index_names",
"hide_column_names",
"table_attributes",
]
for attr in shallow:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
for attr in [
a
for a in styler.__dict__
if (not callable(a) and a not in excl and a not in shallow)
]:
if getattr(s2, attr) is None:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else:
assert id(getattr(s2, attr)) != id(getattr(styler, attr))
def test_clear(mi_styler_comp):
# NOTE: if this test fails for new features then 'mi_styler_comp' should be updated
# to ensure proper testing of the 'copy', 'clear', 'export' methods with new feature
# GH 40675
styler = mi_styler_comp
styler._compute() # execute applied methods
clean_copy = Styler(styler.data, uuid=styler.uuid)
excl = [
"data",
"index",
"columns",
"uuid",
"uuid_len", # uuid is set to be the same on styler and clean_copy
"cell_ids",
"cellstyle_map", # execution time only
"cellstyle_map_columns", # execution time only
"cellstyle_map_index", # execution time only
"precision", # deprecated
"na_rep", # deprecated
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
# tests vars are not same vals on obj and clean copy before clear (except for excl)
for attr in [a for a in styler.__dict__ if not (callable(a) or a in excl)]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert not (all(res) if (hasattr(res, "__iter__") and len(res) > 0) else res)
# test vars have same vales on obj and clean copy after clearing
styler.clear()
for attr in [a for a in styler.__dict__ if not (callable(a))]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert all(res) if hasattr(res, "__iter__") else res
def test_export(mi_styler_comp, mi_styler):
exp_attrs = [
"_todo",
"hide_index_",
"hide_index_names",
"hide_columns_",
"hide_column_names",
"table_attributes",
"table_styles",
"css",
]
for attr in exp_attrs:
check = getattr(mi_styler, attr) == getattr(mi_styler_comp, attr)
assert not (
all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
)
export = mi_styler_comp.export()
used = mi_styler.use(export)
for attr in exp_attrs:
check = getattr(used, attr) == getattr(mi_styler_comp, attr)
assert all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
used.to_html()
def test_hide_raises(mi_styler):
msg = "`subset` and `level` cannot be passed simultaneously"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", subset="something", level="something else")
msg = "`level` must be of type `int`, `str` or list of such"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", level={"bad": 1, "type": 2})
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
def test_hide_index_level(mi_styler, level):
mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"]
ctx = mi_styler.hide(axis="index", level=level)._translate(False, True)
assert len(ctx["head"][0]) == 3
assert len(ctx["head"][1]) == 3
assert len(ctx["head"][2]) == 4
assert ctx["head"][2][0]["is_visible"]
assert not ctx["head"][2][1]["is_visible"]
assert ctx["body"][0][0]["is_visible"]
assert not ctx["body"][0][1]["is_visible"]
assert ctx["body"][1][0]["is_visible"]
assert not ctx["body"][1][1]["is_visible"]
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
@pytest.mark.parametrize("names", [True, False])
def test_hide_columns_level(mi_styler, level, names):
mi_styler.columns.names = ["zero", "one"]
if names:
mi_styler.index.names = ["zero", "one"]
ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False)
assert len(ctx["head"]) == (2 if names else 1)
@pytest.mark.parametrize("method", ["applymap", "apply"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header(method, axis):
# GH 41893
df = DataFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"])
func = {
"apply": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s],
"applymap": lambda v: "attr: val" if ("A" in v or "C" in v) else "",
}
# test execution added to todo
result = getattr(df.style, f"{method}_index")(func[method], axis=axis)
assert len(result._todo) == 1
assert len(getattr(result, f"ctx_{axis}")) == 0
# test ctx object on compute
result._compute()
expected = {
(0, 0): [("attr", "val")],
}
assert getattr(result, f"ctx_{axis}") == expected
@pytest.mark.parametrize("method", ["apply", "applymap"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header_mi(mi_styler, method, axis):
# GH 41893
func = {
"apply": lambda s: ["attr: val;" if "b" in v else "" for v in s],
"applymap": lambda v: "attr: val" if "b" in v else "",
}
result = getattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute()
expected = {(1, 1): [("attr", "val")]}
assert getattr(result, f"ctx_{axis}") == expected
def test_apply_map_header_raises(mi_styler):
# GH 41893
with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"):
mi_styler.applymap_index(lambda v: "attr: val;", axis="bad")._compute()
class TestStyler:
def setup_method(self, method):
np.random.seed(24)
self.s = DataFrame({"A": np.random.permutation(range(6))})
self.df = DataFrame({"A": [0, 1], "B": np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo="bar"):
return pd.Series(f"color: {foo}", index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = DataFrame({"A": ["color: red", "color: blue"]})
self.dataframes = [
self.df,
DataFrame(
{"f": [1.0, 2.0], "o": ["a", "b"], "c": pd.Categorical(["a", "b"])}
),
]
self.blank_value = " "
def test_init_non_pandas(self):
msg = "``data`` must be a Series or DataFrame"
with pytest.raises(TypeError, match=msg):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(pd.Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_repr_html_mathjax(self):
# gh-19824 / 41395
assert "tex2jax_ignore" not in self.styler._repr_html_()
with pd.option_context("styler.html.mathjax", False):
assert "tex2jax_ignore" in self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_and_trailing_semi(self):
attrs = DataFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]})
self.styler._update_ctx(attrs)
expected = {
(0, 0): [("color", "red"), ("foo", "bar")],
(1, 0): [("color", "blue"), ("foo", "baz")],
}
assert self.styler.ctx == expected
def test_render(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid="AB").apply(style)
s.to_html()
# it worked?
def test_multiple_render(self):
# GH 39396
s = Styler(self.df, uuid_len=0).applymap(lambda x: "color: red;", subset=["A"])
s.to_html() # do 2 renders to ensure css styles not duplicated
assert (
'<style type="text/css">\n#T__row0_col0, #T__row1_col0 {\n'
" color: red;\n}\n</style>" in s.to_html()
)
def test_render_empty_dfs(self):
empty_df = DataFrame()
es = Styler(empty_df)
es.to_html()
# An index but no columns
DataFrame(columns=["a"]).style.to_html()
# A column but no index
DataFrame(index=["a"]).style.to_html()
# No IndexError raised?
def test_render_double(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(
["color: red; border: 1px", "color: blue; border: 2px"], name=x.name
)
s = Styler(df, uuid="AB").apply(style)
s.to_html()
# it worked?
def test_set_properties(self):
df = DataFrame({"A": [0, 1]})
result = df.style.set_properties(color="white", size="10px")._compute().ctx
# order is deterministic
v = [("color", "white"), ("size", "10px")]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
df = DataFrame({"A": [0, 1]})
result = (
df.style.set_properties(subset=pd.IndexSlice[0, "A"], color="white")
._compute()
.ctx
)
expected = {(0, 0): [("color", "white")]}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.style._translate(True, True)
assert len(result["head"]) == 1
expected = {
"class": "blank level0",
"type": "th",
"value": self.blank_value,
"is_visible": True,
"display_value": self.blank_value,
}
assert expected.items() <= result["head"][0][0].items()
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.set_index("A").style._translate(True, True)
expected = {
"class": "index_name level0",
"type": "th",
"value": "A",
"is_visible": True,
"display_value": "A",
}
assert expected.items() <= result["head"][1][0].items()
def test_multiindex_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = | DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) | pandas.DataFrame |
from __future__ import print_function
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
import torch.utils.data as utils
import pandas as pd
from sklearn.model_selection import train_test_split as train_test_split
from skmultilearn.problem_transform import BinaryRelevance
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC, SVC
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
from sklearn.multioutput import MultiOutputClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier, RidgeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.multioutput import ClassifierChain
mdl = [
(MultiOutputClassifier(LinearDiscriminantAnalysis(solver='svd')), "MultiOutputClassifier(LinearDiscriminantAnalysis(solver='svd'))"), # 0.3xx
(MultiOutputClassifier(LinearDiscriminantAnalysis(solver='lsqr')), "MultiOutputClassifier(LinearDiscriminantAnalysis(solver='lsqr'))"), #0.3xx
(MultiOutputClassifier(LinearDiscriminantAnalysis(solver='eigen')), "MultiOutputClassifier(LinearDiscriminantAnalysis(solver='eigen'))"), # 0.3xx
MultiOutputClassifier(SVC()), # 0.4005246444843297
MultiOutputClassifier(AdaBoostClassifier(DecisionTreeClassifier(max_depth=5), n_estimators=10, learning_rate=2)),
MultiOutputClassifier(AdaBoostClassifier(RandomForestClassifier())), # 0.40204335220212617
MultiOutputClassifier(GaussianNB()),
ClassifierChain(GaussianNB()), #0.0009664503658704957
#ClassifierChain(SGDClassifier(loss='perceptron')),
ClassifierChain(KNeighborsClassifier()),
ClassifierChain(DecisionTreeClassifier(max_depth=10)), #ACC: 0.3296976390998205, mean(loss**2) = 0.00177
ClassifierChain(AdaBoostClassifier(DecisionTreeClassifier(max_depth=5)))
]
def load_data():
Train_featues = pd.read_csv('../Data/train_features.csv')
Train_targets_scored = pd.read_csv("../Data/train_targets_scored.csv")
Train_targets_nonscored = pd.read_csv("../Data/train_targets_nonscored.csv")
Test_features = pd.read_csv("../Data/test_features.csv")
#return other files
return Train_featues, Train_targets_scored , Test_features
def preprocess(Data):
def add_dummies(df, col):
hot_vector = | pd.get_dummies(df[col]) | pandas.get_dummies |
import pandas as pd
import dash
from dash import html, dcc
import dash_bootstrap_components as dbc
from dash_extensions.javascript import Namespace
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from dash_tabulator import DashTabulator
from . import tools as T
ns = Namespace("myNamespace", "tabulator")
tabulator_options = {
"groupBy": "Label",
"selectable": True,
"headerFilterLiveFilterDelay": 3000,
"layout": "fitDataFill",
"height": "900px",
}
downloadButtonType = {
"css": "btn btn-primary",
"text": "Export",
"type": "csv",
"filename": "Metadata",
}
clearFilterButtonType = {"css": "btn btn-outline-dark", "text": "Clear Filters"}
meta_table = html.Div(
id="meta-table-container",
style={"minHeight": 100, "margin": "0%"},
children=[
DashTabulator(
id="meta-table",
columns=T.gen_tabulator_columns(
add_ms_file_col=True,
add_color_col=True,
add_peakopt_col=True,
add_ms_file_active_col=True,
),
options=tabulator_options,
downloadButtonType=downloadButtonType,
clearFilterButtonType=clearFilterButtonType,
)
],
)
options = [
{"label": "Batch", "value": "Batch"},
{"label": "Label", "value": "Label"},
{"label": "Color", "value": "Color"},
{"label": "Type", "value": "Type"},
{"label": "Concentration", "value": "Concentration"},
]
_label = "Metadata"
_layout = html.Div(
[
html.H3("Metadata"),
dcc.Upload(
id="meta-upload",
children=html.Div(["Drag and Drop or ", html.A("Select Files")]),
style={
"width": "100%",
"height": "60px",
"lineHeight": "60px",
"borderWidth": "1px",
"borderStyle": "dashed",
"borderRadius": "5px",
"textAlign": "center",
"margin": "10px",
},
# Allow multiple files to be uploaded
multiple=True,
),
dcc.Markdown("---"),
dcc.Markdown("##### Actions"),
dbc.Row(
[
dcc.Dropdown(
id="meta-action",
options=[
{"label": "Set", "value": "Set"},
{"label": "Create column", "value": "create_column"},
{"label": "Delete column", "value": "delete_column"},
# {'label': 'Delete selected files', 'value': 'delete_ms_files'},
],
value="Set",
style={"width": "150px"},
),
dcc.Dropdown(
id="meta-column",
options=options,
value=None,
style={"width": "150px"},
),
dcc.Input(id="meta-input"),
dcc.Dropdown(
id="meta-input-bool",
options=[
{"value": "True", "label": "True"},
{"value": "False", "label": "False"},
],
value=None,
),
html.Button("Apply", id="meta-apply"),
],
style={"marginLeft": "5px"},
),
dcc.Markdown("---"),
dcc.Loading(meta_table),
html.Div(id="meta-upload-output", style={"visibility": "hidden"}),
html.Div(id="meta-apply-output", style={"visibility": "hidden"}),
]
)
_outputs = html.Div(
id="meta-outputs",
children=[
html.Div(id={"index": "meta-apply-output", "type": "output"}),
html.Div(id={"index": "meta-table-saved-on-edit-output", "type": "output"}),
],
)
def layout():
return _layout
def callbacks(app, fsc, cache):
@app.callback(
Output("meta-table", "data"),
Output("meta-table", "columns"),
Output("meta-column", "options"),
Input("tab", "value"),
Input("meta-upload-output", "children"),
Input("meta-apply-output", "children"),
State("wdir", "children"),
)
def meta_load_table(tab, upload_trigger, apply_trigger, wdir):
if tab != "Metadata":
raise PreventUpdate
df = T.get_metadata(wdir)
columns = df.columns.to_list()
options = [{"label": col, "value": col} for col in columns if col != "index"]
return (
df.to_dict("records"),
T.gen_tabulator_columns(
df.columns,
add_ms_file_col=True,
add_color_col=True,
add_peakopt_col=True,
add_ms_file_active_col=True,
),
options,
)
@app.callback(
Output("meta-upload-output", "children"),
Input("meta-upload", "contents"),
Input("meta-upload", "filename"),
State("wdir", "children"),
)
def meta_upload(contents, filename, wdir):
if contents is None:
raise PreventUpdate
df = T.get_metadata(wdir)
contents = T.parse_table_content(contents[0], filename[0])
df = T.merge_metadata(df, contents)
if "index" not in df.columns:
df = df.reset_index()
T.write_metadata(df, wdir)
return "Table updated"
@app.callback(
Output("meta-table", "downloadButtonType"),
Input("tab", "value"),
State("active-workspace", "children"),
)
def update_table_export_fn(tab, ws_name):
fn = f"{T.today()}-{ws_name}_MINT-metadata"
downloadButtonType = {
"css": "btn btn-primary",
"text": "Export",
"type": "csv",
"filename": fn,
}
return downloadButtonType
@app.callback(
Output({"index": "meta-apply-output", "type": "output"}, "children"),
Output("meta-apply-output", "children"),
Input("meta-apply", "n_clicks"),
State("meta-table", "data"),
State("meta-table", "multiRowsClicked"),
State("meta-table", "dataFiltered"),
State("meta-action", "value"),
State("meta-column", "value"),
State("meta-input", "value"),
State("meta-input-bool", "value"),
State("wdir", "children"),
)
def meta_apply(
n_clicks,
data,
selected_rows,
data_filtered,
action,
column,
value,
value_bool,
wdir,
):
"""
This callback saves applies the column actions and
saves the table to the harddrive.
"""
if (n_clicks is None) or (data is None) or (len(data) == 0):
raise PreventUpdate
if action == "Set" and column == "PeakOpt":
value = value_bool
df = pd.DataFrame(data)
if "index" in df.columns:
df = df.set_index("index")
else:
df = df.reset_index()
prop_id = dash.callback_context.triggered[0]["prop_id"]
if prop_id == "meta-apply.n_clicks":
if action == "Set":
filtered_rows = [r for r in data_filtered["rows"] if r is not None]
filtered_ndx = [r["index"] for r in filtered_rows]
if selected_rows == []:
# If nothing is selected apply to all visible rows
ndxs = filtered_ndx
else:
# If something is selected only apply to selected rows
ndxs = [
r["index"] for r in selected_rows if r["index"] in filtered_ndx
]
if len(ndxs) == 0 or column is None:
# If no column is selected just save the table.
T.write_metadata(df, wdir)
return dbc.Alert("Metadata saved.", color="info"), "Applied"
df.loc[ndxs, column] = value
elif action == "create_column":
df[value] = ""
elif action == "delete_column":
del df[column]
T.write_metadata(df, wdir)
return dbc.Alert("Metadata saved.", color="info"), "Applied"
@app.callback(
Output(
{"index": "meta-table-saved-on-edit-output", "type": "output"}, "children"
),
Input("meta-table", "cellEdited"),
State("meta-table", "data"),
State("wdir", "children"),
)
def save_table_on_edit(cell_edited, data, wdir):
"""
This callback saves the table on cell edits.
This saves some bandwidth.
"""
if data is None or cell_edited is None:
raise PreventUpdate
df = | pd.DataFrame(data) | pandas.DataFrame |
import os
import pandas
import sqlalchemy as sa
import json
import wx, wx.adv, wx.lib
from datetime import datetime
from wx.lib.wordwrap import wordwrap
from threading import Thread
from pydispatch import dispatcher
from openpyxl import load_workbook
CLOSE_DIALOG_SIGNAL = 'close-notification-dialog'
from components.datatable import DataGrid
from components.fieldcreation import (FieldCreateDialog, OrganismFieldFormDialog,
DrugRegFormDialog, IndexFieldList,
HeatmapFieldList, DateRangeFieldList)
APPDATA_DIR = 'appdata'
DRUG_REGISTRY_FILE = 'drugs.json'
basepath = os.path.dirname(os.path.abspath(__file__))
drug_dict = {}
drug_df = None
def load_drug_registry():
global drug_dict
global drug_df
if DRUG_REGISTRY_FILE:
try:
drug_df = pandas.read_json(os.path.join(APPDATA_DIR, DRUG_REGISTRY_FILE))
except:
return pandas.DataFrame(columns=['drug', 'abbreviation', 'group'])
else:
drug_dict = {}
if drug_df.empty:
drug_df = pandas.DataFrame(columns=['drug', 'abbreviation', 'group'])
else:
drug_df = drug_df.sort_values(['group'])
for idx, row in drug_df.iterrows():
drug = row
if row['abbreviation']:
abbrs = [a.strip().lower() for a in row['abbreviation'].split(',')]
else:
abbrs = []
for ab in abbrs:
drug_dict[ab] = drug
class FieldAttribute():
def __init__(self):
self.data = {}
self.columns = []
self.organisms = {}
def update_from_json(self, json_data):
if not self.columns:
return False
json_data = json.loads(json_data)
profile_cols = json_data['columns']
profile_cols_no_agg = set([col for col in profile_cols if not
col.startswith('@')])
if profile_cols_no_agg.difference(self.columns):
return False
else:
self.columns = profile_cols
self.data = json_data['data']
self.organisms = json_data['organisms']
return True
def update_from_json_for_database(self, json_data):
"""
Update columns with data from the saved profile
:param json_data:
:return: Boolean
"""
# Data must be loaded first
if not self.columns:
return False
json_data = json.loads(json_data)
profile_cols = json_data['columns']
# columns must match
profile_cols_no_agg = [col for col in profile_cols if not col.startswith('@')]
assert len(set(profile_cols_no_agg).difference(set(self.columns))) == 0
self.columns = profile_cols
self.data = json_data['data']
self.organisms = json_data['organisms']
return True
def update_from_dataframe(self, data_frame):
self.columns = []
for n, column in enumerate(data_frame.columns):
self.columns.append(column)
self.data[column] = {'name': column,
'alias': column,
'organism': False,
'key': False,
'drug': True if column.lower() in drug_dict else False,
'date': False,
'type': str(data_frame[column].dtype),
'keep': True,
'desc': "",
}
def values(self):
return self.data.values()
def get_column(self, colname):
try:
return self.data[colname]
except KeyError as e:
raise AttributeError(e)
def iget_column(self, index):
try:
return self.columns[index]
except IndexError:
return None
def get_col_index(self, colname):
try:
return self.columns.index(colname)
except ValueError:
return -1
def is_col_aggregate(self, colname):
if colname in self.data:
if self.data[colname].get('aggregate'):
return True
else:
return False
else:
raise KeyError
def update_organisms(self, df):
self.organisms = {}
for idx, row in df.iterrows():
self.organisms[row[0]] = {'genus': row[1], 'species': row[2]}
def browse(filetype='MLAB'):
file_meta = {
'MLAB': {
'wildcard': "Excel files (*.xls;*xlsx)|*.xls;*.xlsx"
},
'CSV': {
'wildcard': "CSV files (*.csv)|*.csv"
}
}
with wx.FileDialog(None, "Open data file",
wildcard=file_meta[filetype]['wildcard'],
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) \
as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
return fileDialog.GetPath()
def show_sheets(parent, worksheets):
dlg = wx.SingleChoiceDialog(None,
"Select a worksheet", "Worksheets", worksheets)
if dlg.ShowModal() == wx.ID_OK:
return dlg.GetStringSelection()
dlg.Destroy()
class NotificationBox(wx.Dialog):
def __init__(self, parent, caption, message):
super(NotificationBox, self).__init__(parent=parent,
title=caption, size=(300, 90),
style=wx.CAPTION)
self.label = wx.StaticText(self, label=message)
vsizer = wx.BoxSizer(wx.VERTICAL)
vsizer.Add(self.label, 1, wx.ALL | wx.EXPAND | wx.CENTER, 20)
self.SetSizer(vsizer)
self.Center(wx.HORIZONTAL)
dispatcher.connect(self.endModal, signal=CLOSE_DIALOG_SIGNAL, sender=dispatcher.Any)
def updateLabel(self, msg):
self.label.SetLabelText(msg)
def endModal(self, rc):
# Not sure why sometimes the dialog is not modal,
# but failing to check it causes an error.
if self.IsModal():
self.EndModal(rc)
else:
return rc
class MainWindow(wx.Frame):
def __init__(self, parent):
super(MainWindow, self).__init__(parent)
scr_width, scr_height = wx.DisplaySize()
self.SetIcon(wx.Icon(os.path.join(basepath, 'icons/appicon.ico')))
self.version_no = '2019.1.8'
self.description = 'Faculty of Medical Technology, Mahidol University'
self.SetTitle('Mivisor Version {}'.format(self.version_no))
self.SetSize((int(scr_width * 0.75), int(scr_height * 0.85)))
self.Center()
self.current_column = None
self.data_filepath = None
self.profile_filepath = None
self.db_filepath = None
self.current_session_id = None
self.dbengine = None
self.data_loaded = False
self.field_attr = FieldAttribute()
df = pandas.DataFrame({'Name': ['Mivisor'],
'Version': [self.version_no],
'Released': ['2019-08-04'],
'Brought to you by': [self.description],
'Contact': ['<EMAIL>']})
menubar = wx.MenuBar()
fileMenu = wx.Menu()
dataMenu = wx.Menu()
fieldMenu = wx.Menu()
exportMenu = wx.Menu()
antibiogramMenu = wx.Menu()
registryMenu = wx.Menu()
analyzeMenu = wx.Menu()
aboutMenu = wx.Menu()
databaseMenu = wx.Menu()
imp = wx.Menu()
mlabItem = imp.Append(wx.ID_ANY, 'Excel (MLAB)')
# csvItem = imp.Append(wx.ID_ANY, 'CSV')
# csvItem.Enable(False)
fileMenu.AppendSeparator()
fileMenu.Append(wx.ID_ANY, 'I&mport', imp)
fileMenu.AppendSeparator()
self.loadProfileItem = fileMenu.Append(wx.ID_ANY, 'Load Profile')
self.loadProfileItem.Enable(False)
self.saveProfileItem = fileMenu.Append(wx.ID_ANY, 'Save Profile')
self.saveProfileItem.Enable(False)
exitItem = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit Application')
self.createFieldItem = fieldMenu.Append(wx.ID_ANY, 'Matching')
self.createFieldItem.Enable(False)
dataMenu.Append(wx.ID_ANY, 'New field', fieldMenu)
self.saveToDatabaseMenuItem = dataMenu.Append(wx.ID_ANY, 'Save to database')
self.saveToDatabaseMenuItem.Enable(False)
self.appendToDatabaseMenuItem = dataMenu.Append(wx.ID_ANY, 'Append to database')
self.appendToDatabaseMenuItem.Enable(False)
dataMenu.AppendSeparator()
self.organismItem = dataMenu.Append(wx.ID_ANY, 'Organism')
self.organismItem.Enable(False)
dataMenu.AppendSeparator()
self.exportToExcelMenuItem = exportMenu.Append(wx.ID_ANY, 'To Excel')
self.saveToFlatDbMenuItem = exportMenu.Append(wx.ID_ANY, 'Create flat database')
self.addToFlatDbMenuItem = exportMenu.Append(wx.ID_ANY, 'Add to flat database')
self.exportToExcelMenuItem.Enable(False)
self.saveToFlatDbMenuItem.Enable(False)
self.addToFlatDbMenuItem.Enable(False)
dataMenu.Append(wx.ID_ANY, 'Export flat table', exportMenu)
drugRegMenuItem = registryMenu.Append(wx.ID_ANY, 'Drugs')
self.biogramDbMenuItem = antibiogramMenu.Append(wx.ID_ANY, 'Create summary report')
self.biogramDbMenuItem.Enable(True)
self.biogramHeatmapMenuItem = antibiogramMenu.Append(wx.ID_ANY, 'Create heatmap plot')
self.biogramHeatmapMenuItem.Enable(True)
analyzeMenu.Append(wx.ID_ANY, 'Antibiogram', antibiogramMenu)
aboutMenuItem = aboutMenu.Append(wx.ID_ANY, "About the program")
self.connectDbMenuItem = databaseMenu.Append(wx.ID_ANY, 'Connect')
self.disconnectDbMenuItem = databaseMenu.Append(wx.ID_ANY, 'Disconnect')
self.Bind(wx.EVT_MENU, self.onConnectDbMenuItemClick, self.connectDbMenuItem)
self.Bind(wx.EVT_MENU, self.onDisconnectDbMenuItemClick, self.disconnectDbMenuItem)
self.Bind(wx.EVT_MENU, lambda x: self.onSaveToDatabaseMenuItemClick(x, action='replace'),
self.saveToDatabaseMenuItem)
self.Bind(wx.EVT_MENU, lambda x: self.onSaveToDatabaseMenuItemClick(x, action='append'),
self.appendToDatabaseMenuItem)
menubar.Append(fileMenu, '&File')
menubar.Append(dataMenu, '&Data')
menubar.Append(databaseMenu, 'Database')
menubar.Append(analyzeMenu, 'Analy&ze')
menubar.Append(registryMenu, '&Registry')
menubar.Append(aboutMenu, '&About')
self.SetMenuBar(menubar)
accel_tbl = wx.AcceleratorTable([
(wx.ACCEL_CTRL, ord('M'), mlabItem.GetId()),
])
self.SetAcceleratorTable(accel_tbl)
import sys
self.Bind(wx.EVT_CLOSE, lambda x: sys.exit())
self.Bind(wx.EVT_MENU, self.on_about_menu_click, aboutMenuItem)
self.Bind(wx.EVT_MENU, self.OnQuit, exitItem)
self.Bind(wx.EVT_MENU, self.onLoadMLABItemClick, mlabItem)
# self.Bind(wx.EVT_MENU, self.OnLoadCSV, csvItem)
self.Bind(wx.EVT_MENU, self.OnCreateField, self.createFieldItem)
self.Bind(wx.EVT_MENU, self.OnSaveProfile, self.saveProfileItem)
self.Bind(wx.EVT_MENU, self.OnLoadProfile, self.loadProfileItem)
self.Bind(wx.EVT_MENU, self.OnOrganismClick, self.organismItem)
# TODO: rename OnExportRawData method
self.Bind(wx.EVT_MENU, self.OnExportRawData, self.exportToExcelMenuItem)
self.Bind(wx.EVT_MENU, lambda x: self.onSaveToFlatDbMenuItemClick(x, action='replace'),
self.saveToFlatDbMenuItem)
self.Bind(wx.EVT_MENU, lambda x: self.onSaveToFlatDbMenuItemClick(x, action='append'),
self.addToFlatDbMenuItem)
self.Bind(wx.EVT_MENU, self.on_drug_reg_menu_click, drugRegMenuItem)
self.Bind(wx.EVT_MENU, self.onBiogramDbMenuItemClick, self.biogramDbMenuItem)
self.Bind(wx.EVT_MENU, self.onBiogramHeatmapMenuItemClick, self.biogramHeatmapMenuItem)
# init panels
self.info_panel = wx.Panel(self, wx.ID_ANY)
self.preview_panel = wx.Panel(self, wx.ID_ANY)
self.summary_panel = wx.Panel(self, wx.ID_ANY)
self.attribute_panel = wx.Panel(self, wx.ID_ANY)
self.edit_panel = wx.Panel(self, wx.ID_ANY)
# init sizers
self.info_sizer = wx.StaticBoxSizer(wx.VERTICAL, self.info_panel, "Session Information")
self.summary_sizer = wx.StaticBoxSizer(wx.VERTICAL, self.summary_panel, "Field Summary")
self.field_attr_sizer = wx.StaticBoxSizer(wx.VERTICAL, self.attribute_panel, "Field Attributes")
edit_box_sizer = wx.StaticBoxSizer(wx.HORIZONTAL, self.edit_panel, "Edit")
self.data_grid_box_sizer = wx.StaticBoxSizer(wx.VERTICAL, self.preview_panel, "Data Preview")
self.profile_lbl = wx.StaticText(self.info_panel, -1, "Profile filepath: {}".format(self.profile_filepath))
self.datafile_lbl = wx.StaticText(self.info_panel, -1, "Data filepath: {}".format(self.data_filepath))
self.dbfile_lbl = wx.StaticText(self.info_panel, -1, "Database filepath: {}".format(self.db_filepath))
self.info_sizer.Add(self.datafile_lbl)
self.info_sizer.Add(self.profile_lbl)
self.info_sizer.Add(self.dbfile_lbl)
self.data_grid = DataGrid(self.preview_panel)
self.data_grid.set_table(df)
self.data_grid.AutoSize()
self.data_grid_box_sizer.Add(self.data_grid, 1, flag=wx.EXPAND | wx.ALL)
self.key_chkbox = wx.CheckBox(self.edit_panel, -1, label="Key", name="key")
self.drug_chkbox = wx.CheckBox(self.edit_panel, -1, label="Drug", name="drug")
self.organism_chkbox = wx.CheckBox(self.edit_panel, -1, label="Organism", name="organism")
self.keep_chkbox = wx.CheckBox(self.edit_panel, -1, label="Included", name="keep")
self.date_chkbox = wx.CheckBox(self.edit_panel, -1, label="Date", name="date")
self.field_edit_checkboxes = [self.key_chkbox, self.drug_chkbox,
self.keep_chkbox, self.organism_chkbox,
self.date_chkbox]
checkbox_sizer = wx.FlexGridSizer(cols=len(self.field_edit_checkboxes), hgap=4, vgap=0)
for chkbox in self.field_edit_checkboxes:
checkbox_sizer.Add(chkbox)
chkbox.Bind(wx.EVT_CHECKBOX, self.on_edit_save_button_clicked)
checkbox_label = wx.StaticText(self.edit_panel, -1, "Marked as")
self.field_desc = wx.TextCtrl(self.edit_panel, -1, "", style=wx.TE_MULTILINE, size=(200, 100))
self.field_alias = wx.TextCtrl(self.edit_panel, -1, "")
self.edit_save_button = wx.Button(self.edit_panel, -1, "Update")
self.edit_save_button.Bind(wx.EVT_BUTTON, self.on_edit_save_button_clicked)
alias_label = wx.StaticText(self.edit_panel, -1, "Alias")
desc_label = wx.StaticText(self.edit_panel, -1, "Description")
form_sizer = wx.FlexGridSizer(cols=2, hgap=2, vgap=2)
form_sizer.AddMany([checkbox_label, checkbox_sizer])
form_sizer.AddMany([desc_label, self.field_desc])
form_sizer.AddMany([alias_label, self.field_alias])
form_sizer.AddMany([wx.StaticText(self.edit_panel, -1, ""), self.edit_save_button])
edit_box_sizer.Add(form_sizer, 1, flag=wx.ALIGN_LEFT)
self.summary_table = wx.ListCtrl(self.summary_panel, style=wx.LC_REPORT)
self.summary_table.InsertColumn(0, 'Field')
self.summary_table.InsertColumn(1, 'Value')
self.summary_sizer.Add(self.summary_table, 1, wx.EXPAND)
self.field_attr_list = wx.ListCtrl(self.attribute_panel, style=wx.LC_REPORT)
self.add_field_attr_list_column()
self.field_attr_sizer.Add(self.field_attr_list, 1, wx.EXPAND)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onFieldAttrListItemSelected)
self.preview_panel.SetSizer(self.data_grid_box_sizer)
self.attribute_panel.SetSizer(self.field_attr_sizer)
self.summary_panel.SetSizer(self.summary_sizer)
self.edit_panel.SetSizer(edit_box_sizer)
self.info_panel.SetSizer(self.info_sizer)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(self.edit_panel, 2, flag=wx.ALL | wx.EXPAND)
self.hbox.Add(self.summary_panel, 1, flag=wx.ALL | wx.EXPAND)
self.vbox.Add(self.info_panel, 0, flag=wx.EXPAND | wx.ALL)
self.vbox.Add(self.preview_panel, 1, flag=wx.EXPAND | wx.ALL)
self.vbox.Add(self.attribute_panel, flag=wx.ALL | wx.EXPAND)
self.vbox.Add(self.hbox, flag=wx.ALL | wx.EXPAND | wx.ALL)
self.SetSizer(self.vbox)
load_drug_registry()
def OnQuit(self, e):
self.Close()
def OnOrganismClick(self, event):
columns = []
sel_col = None
for c in self.field_attr.columns:
col = self.field_attr.get_column(c)
if col['keep'] and col['organism']:
columns.append(col['alias'])
if not columns:
dlg = wx.MessageDialog(None, "No organism field specified.",
"Please select a field for organism.",
wx.OK)
ret = dlg.ShowModal()
if ret == wx.ID_OK:
return
dlg = wx.SingleChoiceDialog(None,
"Select a column", "Kept columns", columns)
if dlg.ShowModal() == wx.ID_OK:
sel_col = dlg.GetStringSelection()
dlg.Destroy()
if sel_col:
sel_col_index = self.field_attr.get_col_index(sel_col)
column = self.field_attr.get_column(sel_col)
values = self.data_grid.table.df[sel_col].unique()
fc = OrganismFieldFormDialog()
if not self.field_attr.organisms:
_df = pandas.DataFrame({column['alias']: values, 'genus': None, 'species': None})
else:
orgs = []
genuses = []
species = []
for org in self.field_attr.organisms:
orgs.append(org)
genuses.append(self.field_attr.organisms[org]['genus'])
species.append(self.field_attr.organisms[org]['species'])
_df = pandas.DataFrame({column['alias']: orgs, 'genus': genuses, 'species': species})
fc.grid.set_table(_df)
resp = fc.ShowModal()
self.field_attr.update_organisms(fc.grid.table.df)
def load_profile_from_filepath(self, df):
try:
fp = open(self.profile_filepath, 'r')
except IOError:
wx.MessageDialog(self,
'Cannot read data from {}. Please double check the file path.'.format(
self.profile_filepath),
'The profile file cannot be loaded',
wx.ICON_ERROR).ShowModal()
return
json_data = fp.read()
fp.close()
if not self.field_attr.update_from_json_for_database(json_data):
wx.MessageDialog(self,
'Fields in the profile and the data do not match.',
'The profile cannot be loaded',
wx.ICON_INFORMATION).ShowModal()
return
for c in self.field_attr.columns:
if self.field_attr.is_col_aggregate(c):
column = self.field_attr.get_column(c)
column_index = self.field_attr.get_col_index(c)
if c not in df.columns:
d = []
from_col = column['aggregate']['from']
dict_ = column['aggregate']['data']
for value in df[from_col]:
d.append(dict_.get(value, value))
df.insert(column_index, c, value=d)
return df
def OnLoadProfile(self, event):
if not self.data_filepath:
dlg = wx.MessageDialog(None,
"No data for this session.",
"Please provide data for this session first.",
wx.OK | wx.CENTER)
ret = dlg.ShowModal()
return
wildcard = "JSON (*.json)|*.json"
_profile_pth = self.data_filepath or os.getcwd()
with wx.FileDialog(None, "Choose a file", _profile_pth,
"", wildcard, wx.FC_OPEN) as file_dlg:
if file_dlg.ShowModal() == wx.ID_CANCEL:
return
try:
fp = open(file_dlg.GetPath(), 'r')
json_data = fp.read()
fp.close()
if not self.field_attr.update_from_json(json_data):
wx.MessageDialog(self,
'Fields in the profile and the data do not match.',
'The profile cannot be loaded',
wx.ICON_INFORMATION).ShowModal()
return
for c in self.field_attr.columns:
if self.field_attr.is_col_aggregate(c):
column = self.field_attr.get_column(c)
column_index = self.field_attr.get_col_index(c)
if c not in self.data_grid.table.df.columns:
d = []
from_col = column['aggregate']['from']
dict_ = column['aggregate']['data']
for value in self.data_grid.table.df[from_col]:
d.append(dict_.get(value, value))
self.data_grid.table.df.insert(column_index, c, value=d)
self.data_grid.table.InsertCols(column_index, 1)
self.refresh_field_attr_list_column()
self.update_edit_panel(self.field_attr.iget_column(0))
self.profile_filepath = file_dlg.GetPath()
self.profile_lbl.SetLabelText("Profile filepath: {}".format(self.profile_filepath))
except IOError:
print('Cannot load data from file.')
def OnSaveProfile(self, event):
wildcard = "JSON (*.json)|*.json"
with wx.FileDialog(None, "Choose a file to save a profile.", os.getcwd(),
"", wildcard, wx.FC_SAVE) as file_dlg:
if file_dlg.ShowModal() == wx.ID_CANCEL:
return
try:
fp = open(file_dlg.GetPath(), 'w')
for col in self.field_attr.columns:
column = self.field_attr.get_column(col)
fp.write(json.dumps({'data': self.field_attr.data,
'columns': self.field_attr.columns,
'organisms': self.field_attr.organisms},
indent=2))
fp.close()
self.profile_filepath = file_dlg.GetPath()
self.profile_lbl.SetLabelText("Profile filepath: {}".format(self.profile_filepath))
except IOError:
print('Cannot save data to file.')
def load_datafile(self, filetype='MLAB'):
filepath = browse(filetype)
if filepath and filepath:
try:
worksheets = load_workbook(filepath).sheetnames
except FileNotFoundError:
wx.MessageDialog(self,
'Cannot download the data file.\nPlease check the file path again.',
'File Not Found!', wx.OK | wx.CENTER).ShowModal()
else:
if len(worksheets) > 1:
sel_worksheet = show_sheets(self, worksheets)
else:
sel_worksheet = worksheets[0]
bag = {'data': None, 'filepath': ''}
def read_excel():
# TODO: need to handle an error
df = pandas.read_excel(filepath, sheet_name=sel_worksheet)
bag['data'] = df
bag['filepath'] = filepath
wx.CallAfter(dispatcher.send, CLOSE_DIALOG_SIGNAL, rc=0)
thread = Thread(target=read_excel)
thread.start()
with NotificationBox(self, caption='Import Data',
message='Reading from the Excel file...') as md:
md.ShowModal()
return bag['data'], bag['filepath']
else:
wx.MessageDialog(self, 'File path is not valid!',
'Please check the file path.',
wx.OK | wx.CENTER).ShowModal()
return pandas.DataFrame(), ''
def onLoadMLABItemClick(self, e):
if self.data_loaded:
dlg = wx.MessageDialog(None, "Click \"Yes\" to continue or click \"No\" to return to your session.",
"Data in this current session will be discarded!",
wx.YES_NO | wx.ICON_QUESTION)
ret_ = dlg.ShowModal()
if ret_ == wx.ID_NO:
return
self.profile_filepath = None
self.db_filepath = None
self.dbengine = None
self.dbfile_lbl.SetLabelText('Database filepath:')
self.profile_lbl.SetLabelText('Profile filepath:')
df, filepath = self.load_datafile()
if filepath:
if df.empty:
dlg = wx.MessageDialog(None,
"Do you want to proceed?\nClick \"Yes\" to continue or \"No\" to cancel.",
"Warning: dataset is empty.",
wx.YES_NO | wx.ICON_QUESTION)
ret_ = dlg.ShowModal()
if ret_ == wx.ID_NO:
return
self.data_filepath = filepath
self.datafile_lbl.SetLabelText("Data filepath: {}".format(self.data_filepath))
self.data_loaded = True
self.data_grid_box_sizer.Remove(0)
self.data_grid.Destroy()
self.data_grid = DataGrid(self.preview_panel)
self.data_grid.set_table(df)
self.data_grid.AutoSizeColumns()
self.data_grid_box_sizer.Add(self.data_grid, 1, flag=wx.EXPAND | wx.ALL)
self.data_grid_box_sizer.Layout() # repaint the sizer
self.field_attr.update_from_dataframe(df)
self.field_attr_list.ClearAll()
self.refresh_field_attr_list_column()
if self.field_attr.columns:
self.current_column = self.field_attr.iget_column(0)
self.field_attr_list.Select(0)
self.saveProfileItem.Enable(True)
self.loadProfileItem.Enable(True)
self.organismItem.Enable(True)
self.loadProfileItem.Enable(True)
self.createFieldItem.Enable(True)
self.appendToDatabaseMenuItem.Enable(True)
self.saveToDatabaseMenuItem.Enable(True)
# need to enable load profile menu item here
# after refactoring the menu bar
def OnLoadCSV(self, e):
filepath = browse('CSV')
if filepath:
try:
df = pandas.read_csv(filepath)
except FileNotFoundError:
wx.MessageDialog(self, 'Cannot download the data file.\nPlease check the file path again.',
'File Not Found!', wx.OK | wx.CENTER).ShowModal()
else:
wx.MessageDialog(self, 'No File Path Found!',
'Please enter/select the file path.',
wx.OK | wx.CENTER).ShowModal()
def OnCreateField(self, event):
columns = []
for c in self.field_attr.columns:
col = self.field_attr.get_column(c)
if col['keep']:
columns.append(col['alias'])
with wx.SingleChoiceDialog(
None, "Select a column", "Kept columns", columns) as dlg:
if dlg.ShowModal() == wx.ID_OK:
sel_col = dlg.GetStringSelection()
else:
return
if sel_col:
sel_col_index = self.field_attr.get_col_index(sel_col)
values = self.data_grid.table.df[sel_col].unique()
_df = pandas.DataFrame({'Value': values, 'Group': values})
with FieldCreateDialog() as fc:
fc.grid.set_table(_df)
resp = fc.ShowModal()
if resp == wx.ID_OK:
_agg_dict = {}
for idx, row in fc.grid.table.df.iterrows():
_agg_dict[row['Value']] = row['Group']
_agg_data = []
for value in self.data_grid.table.df[sel_col]:
_agg_data.append(_agg_dict[value])
new_col = '@' + fc.field_name.GetValue()
if new_col in self.field_attr.columns:
new_col += '-copy'
self.data_grid.table.df.insert(sel_col_index + 1, new_col, value=_agg_data)
self.data_grid.AutoSize()
self.data_grid_box_sizer.Layout()
self.data_grid.table.InsertCols(sel_col_index + 1, 1)
self.field_attr.columns.insert(sel_col_index + 1, new_col)
self.field_attr.data[new_col] = {
'name': new_col,
'alias': new_col,
'organism': False,
'key': False,
'drug': False,
'date': False,
'type': str(self.data_grid.table.df[new_col].dtype),
'keep': True,
'desc': "",
'aggregate': {
'from': sel_col,
'data': _agg_dict
}
}
self.refresh_field_attr_list_column()
# self.OnSaveProfile(None)
# self.onSaveToDatabaseMenuItemClick(None)
def reset_summary_table(self, desc):
self.summary_table.ClearAll()
self.summary_table.InsertColumn(0, 'Field')
self.summary_table.InsertColumn(1, 'Value')
for n, k in enumerate(desc.keys()):
self.summary_table.InsertItem(n, k)
self.summary_table.SetItem(n, 1, str(desc[k]))
def update_edit_panel(self, colname):
for cb in self.field_edit_checkboxes:
name = cb.GetName()
cb.SetValue(self.field_attr.get_column(colname)[name])
self.field_alias.SetValue(self.field_attr.get_column(colname)['alias'])
self.field_desc.SetValue(self.field_attr.get_column(colname)['desc'])
self.current_column = colname
def onFieldAttrListItemSelected(self, evt):
index = evt.GetIndex()
current_column = self.data_grid.table.df.columns[index]
desc = self.data_grid.table.df[self.current_column].describe()
self.reset_summary_table(desc=desc)
self.update_edit_panel(current_column)
self.data_grid.SelectCol(index)
def refresh_field_attr_list_column(self):
self.add_field_attr_list_column()
self.update_field_attrs()
def add_field_attr_list_column(self):
self.field_attr_list.ClearAll()
self.field_attr_list.InsertColumn(0, 'Field name')
self.field_attr_list.InsertColumn(1, 'Alias name')
self.field_attr_list.InsertColumn(2, 'Type')
self.field_attr_list.InsertColumn(3, 'Key')
self.field_attr_list.InsertColumn(4, 'Date')
self.field_attr_list.InsertColumn(5, 'Organism')
self.field_attr_list.InsertColumn(6, 'Drug')
self.field_attr_list.InsertColumn(7, 'Description')
self.field_attr_list.InsertColumn(8, 'Kept')
self.field_attr_list.SetColumnWidth(7, 300)
def update_field_attrs(self):
for n, c in enumerate(self.field_attr.columns):
col = self.field_attr.get_column(c)
self.field_attr_list.InsertItem(n, col['name'])
self.field_attr_list.SetItem(n, 1, col['alias'])
self.field_attr_list.SetItem(n, 2, col['type'])
self.field_attr_list.SetItem(n, 3, str(col['key']))
self.field_attr_list.SetItem(n, 4, str(col['date']))
self.field_attr_list.SetItem(n, 5, str(col['organism']))
self.field_attr_list.SetItem(n, 6, str(col['drug']))
self.field_attr_list.SetItem(n, 7, str(col['desc']))
self.field_attr_list.SetItem(n, 8, str(col['keep']))
def on_edit_save_button_clicked(self, event):
col_index = self.field_attr.get_col_index(self.current_column)
for cb in self.field_edit_checkboxes:
name = cb.GetName()
self.field_attr.get_column(self.current_column)[name] = cb.GetValue()
self.field_attr.get_column(self.current_column)['alias'] = self.field_alias.GetValue()
self.field_attr.get_column(self.current_column)['desc'] = self.field_desc.GetValue()
self.refresh_field_attr_list_column()
self.field_attr_list.Select(col_index)
self.field_attr_list.Focus(col_index)
def convert_to_flat(self, engine, startdate, enddate, deduplicate=True):
info_columns = []
dup_keys = []
organism_column = None
for colname in self.field_attr.columns:
column = self.field_attr.get_column(colname)
if column['keep']:
if column['key'] and not column['organism'] and not column['drug'] and deduplicate:
dup_keys.append(colname)
if column['organism']:
organism_column = column
elif column['date']:
date_column = colname
info_columns.append(column)
elif column['drug']:
continue
else:
info_columns.append(column)
if not organism_column:
with wx.MessageDialog(self,
"Please specify the organism column.",
"Export failed.",
wx.OK) as md:
md.ShowModal()
wx.CallAfter(dispatcher.send, CLOSE_DIALOG_SIGNAL, rc=1)
rf = pandas.read_sql_table('records', con=engine)
df = pandas.read_sql_table('drugs', con=engine)
dict_ = {}
for column in info_columns:
dict_[column['alias']] = self.data_grid.table.df[column['name']]
dict_['sur_key'] = rf['sur_key']
genuses = []
species = []
organisms = []
for org in self.data_grid.table.df[organism_column['name']]:
organisms.append(org)
org_item = self.field_attr.organisms.get(org, {'genus': org, 'species': org})
genuses.append(org_item.get('genus', org))
species.append(org_item.get('species', org))
dict_[organism_column['alias']] = organisms
dict_['genus'] = genuses
dict_['species'] = species
dict_['organism_name'] = [' '.join(item) for item in zip(genuses, species)]
def get_drug_group(x):
global drug_dict
return drug_dict.get(x.lower(), pandas.Series()).get('group', 'unspecified')
exported_data = pandas.DataFrame(dict_)
if deduplicate:
if not dup_keys:
with wx.MessageDialog(self,
"Please specify some key columns.",
"Export failed.",
wx.OK) as md:
md.ShowModal()
wx.CallAfter(dispatcher.send, CLOSE_DIALOG_SIGNAL, rc=1)
else:
#TODO: inform user about error in deduplication if no date was found..
dup_keys.append('organism_name')
# dup_keys.append('drug')
if dup_keys and date_column:
exported_data = exported_data.sort_values(by=date_column)
exported_data = exported_data.drop_duplicates(
subset=dup_keys, keep='first'
)
else:
with wx.MessageDialog(self,
"Please specify a date column.",
"Export failed.",
wx.OK) as md:
md.ShowModal()
wx.CallAfter(dispatcher.send, CLOSE_DIALOG_SIGNAL, rc=1)
df['drugGroup'] = df['drug'].apply(lambda x: get_drug_group(x))
self.flat_dataframe = exported_data.merge(df, on='sur_key', how='inner')
del self.flat_dataframe['sur_key'] # remove surrogate key column
if startdate and enddate:
try:
self.flat_dataframe = self.flat_dataframe[
(self.flat_dataframe[date_column] >= startdate) & (self.flat_dataframe[date_column] <= enddate)]
except TypeError:
wx.CallAfter(dispatcher.send, CLOSE_DIALOG_SIGNAL, rc=1)
wx.CallAfter(dispatcher.send, CLOSE_DIALOG_SIGNAL, rc=0)
def OnExportRawData(self, event):
wildcard = "Excel (*.xlsx;*.xls)|*.xlsx;*.xls"
with wx.FileDialog(None, "Choose a file", os.getcwd(),
"", wildcard, wx.FC_SAVE) as file_dlg:
if file_dlg.ShowModal() == wx.ID_CANCEL:
return
else:
output_filepath = file_dlg.GetPath()
date_dlg = DateRangeFieldList(self)
if date_dlg.ShowModal() == wx.ID_OK:
deduplicate = date_dlg.deduplicate.IsChecked()
if not date_dlg.all.IsChecked():
startdate = map(int, date_dlg.startDatePicker.GetValue().FormatISODate().split('-'))
enddate = map(int, date_dlg.endDatePicker.GetValue().FormatISODate().split('-'))
startdate = pandas.Timestamp(*startdate)
enddate = pandas.Timestamp(*enddate)
else:
startdate = None
enddate = None
thread = Thread(target=self.convert_to_flat, args=(self.dbengine, startdate, enddate, deduplicate))
thread.start()
with NotificationBox(self, caption='Export Data',
message='Preparing data to export...') as nd:
result = nd.ShowModal()
if result > 0:
return
'''
for colname in self.field_attr.columns:
column = self.field_attr.get_column(colname)
if column['keep'] and column['date']:
date_column = colname
'''
# df = self.flat_dataframe
def write_to_excel(flat_df, output_filepath):
try:
flat_df.to_excel(output_filepath, engine='xlsxwriter', index=False)
except:
with wx.MessageDialog(None,
"Cannot save data to the output file.",
"Export failed.",
wx.OK) as md:
md.ShowModal()
wx.CallAfter(dispatcher.send, CLOSE_DIALOG_SIGNAL, rc=1)
else:
with wx.MessageDialog(None,
"Data have been export to Excel as a flat table.",
"Export succeeds.",
wx.OK) as md:
md.ShowModal()
wx.CallAfter(dispatcher.send, CLOSE_DIALOG_SIGNAL, rc=0)
thread = Thread(target=write_to_excel, args=(self.flat_dataframe, output_filepath))
thread.start()
with NotificationBox(self, caption='Writing Data',
message='Writing data to Excel file...') as nd:
result = nd.ShowModal()
if result > 0:
return
def onSaveToFlatDbMenuItemClick(self, event, action='replace'):
style = wx.FD_SAVE
if not self.profile_filepath:
wx.MessageDialog(None, "No profile path specified.",
"Please save a profile to a file or load a profile to the session before continue.",
wx.OK).ShowModal()
return
# Select date range to export data
date_dlg = DateRangeFieldList(self)
if date_dlg.ShowModal() == wx.ID_OK:
deduplicate = date_dlg.deduplicate.IsChecked()
if not date_dlg.all.IsChecked():
startdate = map(int, date_dlg.startDatePicker.GetValue().FormatISODate().split('-'))
enddate = map(int, date_dlg.endDatePicker.GetValue().FormatISODate().split('-'))
startdate = pandas.Timestamp(*startdate)
enddate = pandas.Timestamp(*enddate)
else:
startdate = None
enddate = None
if self.dbengine:
thread = Thread(target=self.convert_to_flat, args=(self.dbengine, startdate, enddate, deduplicate))
thread.start()
with NotificationBox(self, caption='Export Data',
message='Preparing data to export...'
) as nd:
result = nd.ShowModal()
if result == 1:
wx.MessageDialog(None, "Could not save data to the database.",
"Export failed.",
wx.OK).ShowModal()
if result == 2:
wx.MessageDialog(None, "Could not save the profile data to the database.",
"Export failed.",
wx.OK).ShowModal()
else:
wx.MessageDialog(None, "Please first save data to the database.",
"Export failed.",
wx.OK).ShowModal()
return
with wx.FileDialog(None, "Choose an SQLite data file",
wildcard='SQLite files (*.sqlite;*.db)|*.sqlite;*.db',
style=style) \
as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
else:
dw_filepath = fileDialog.GetPath()
if dw_filepath:
dwengine = sa.create_engine('sqlite:///{}'.format(dw_filepath))
try:
self.flat_dataframe['added_at'] = datetime.utcnow()
self.flat_dataframe.to_sql('facts', con=dwengine, if_exists=action, index=False)
except IOError:
wx.MessageDialog(self, "Error occurred while saving the data to the database.",
"Failed to save the data.",
wx.OK).ShowModal()
return
metadata = pandas.DataFrame({'profile': [self.profile_filepath], 'updatedAt': [datetime.utcnow()]})
try:
metadata.to_sql('metadata', con=dwengine, if_exists='replace', index=False)
except IOError:
wx.MessageDialog(self, "Error occurred while saving the metadata to the database.",
"Failed to save the metadata.",
wx.OK).ShowModal()
return
wx.MessageDialog(self, "Data have been exported to the database.",
"Finished.",
wx.OK).ShowModal()
def onSaveToDatabaseMenuItemClick(self, event, action='replace'):
if not self.profile_filepath:
with wx.MessageDialog(None, message='Please save the profile to a file first.',
caption='Profile file not found error.',
style=wx.OK | wx.CENTER) as msgDialog:
msgDialog.ShowModal()
return
if action == 'append' or not self.dbengine:
with wx.FileDialog(None, "Choose or specify a database file",
wildcard='SQLite files (*.sqlite;*.db)|*.sqlite;*.db',
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) \
as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
else:
self.db_filepath = fileDialog.GetPath()
if self.db_filepath:
if action == 'replace':
with wx.MessageDialog(None,
"Are you sure you want to write to {}".format(self.db_filepath),
"Database is about to be overwritten.",
wx.OK | wx.CANCEL) as msgDialog:
ret = msgDialog.ShowModal()
if ret == wx.ID_CANCEL:
return
elif action == 'append':
with wx.MessageDialog(None,
"Are you sure you want to write to {}".format(self.db_filepath),
"Database is about to be modified.",
wx.OK | wx.CANCEL) as msgDialog:
ret = msgDialog.ShowModal()
if ret == wx.ID_CANCEL:
return
metadata = pandas.DataFrame({'profile': [self.profile_filepath], 'updatedAt': [datetime.utcnow()]})
self.dbfile_lbl.SetLabelText('Database filepath {} CONNECTED'.format(self.db_filepath))
self.dbengine = sa.create_engine('sqlite:///{}'.format(self.db_filepath))
# add surrogate keys
try:
records_df = | pandas.read_sql_table('records', con=self.dbengine) | pandas.read_sql_table |
'''
このコードはKMiura.ioさんのコードを元に作成しています。
https://github.com/code4nagoya/covid19-scrape
MIT License
Copyright (c) 2020 KMiura.io
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import camelot
import codecs
import json
import os
import pandas as pd
import re
import traceback
import urllib.request
from bs4 import BeautifulSoup
from datetime import datetime
base_url = "https://www.pref.aichi.jp"
outdir = './data'
if not os.path.exists(outdir):
os.mkdir(outdir)
def findpath(url, searchWord):
page_url = base_url + url
raw_html = urllib.request.urlopen(page_url)
soup = BeautifulSoup(raw_html, "html.parser")
for aa in soup.find_all("a"):
link = aa.get("href")
name = aa.get_text()
if searchWord in name:
table_link = link
if "Excelファイル" in name:
ext = "xlsx"
# Excelファイルなら確定
break
elif "PDFファイル" in name:
ext = "pdf"
return table_link, ext
def convert_pdf(FILE_PATH, pdf_path, csv_path):
# 最新版のPDFをダウンロード
page_url = base_url + FILE_PATH
with urllib.request.urlopen(page_url) as b:
with open(pdf_path, "bw") as f:
f.write(b.read())
tables = camelot.read_pdf(
pdf_path, pages="1-end", split_text=True,
strip_text="\n", line_scale=40)
# csvに保存
df_csv = pd.concat([table.df for table in tables])
df_csv.to_csv(csv_path, index=False, header=False)
df = pd.read_csv(csv_path, parse_dates=["発表日"], date_parser=my_parser)
df = add_date(df).fillna("")
str_index = pd.Index([str(num) for num in list(df.index)])
df = df.set_index(str_index)
# df.index.name = "No"
# print(df)
return df
def convert_xlsx(FILE_PATH, xlsx_path):
# 最新版のExcelをダウンロード
page_url = base_url + FILE_PATH
with urllib.request.urlopen(page_url) as b:
with open(xlsx_path, "bw") as f:
f.write(b.read())
df = pd.read_excel(xlsx_path, header=2, index_col=None, dtype={2: object})
df["発表日"] = df["発表日"].apply(exceltime2datetime)
df = df.replace(0,"")
df = add_date(df).fillna("")
str_index = pd.Index([str(num) for num in list(df.index)])
df = df.set_index(str_index)
# print(df)
# exit()
return df
def add_date(df):
basedate = df["発表日"]
df["発表日"] = basedate.dt.strftime("%Y/%m/%d %H:%M")
df["date"] = basedate.dt.strftime("%Y-%m-%d")
df["w"] = [str(int(w)+1) if int(w)+1 != 7 else "0"
for w in basedate.dt.dayofweek]
df["short_date"] = basedate.dt.strftime("%m\\/%d")
return df
def my_parser(s):
y = datetime.now().year
m, d = map(int, re.findall("[0-9]{1,2}", s))
return pd.Timestamp(year=y, month=m, day=d)
def exceltime2datetime(et):
if et < 60:
days = pd.to_timedelta(et - 1, unit='days')
else:
days = pd.to_timedelta(et - 2, unit='days')
return pd.to_datetime('1900/1/1') + days
if __name__ == "__main__":
FILE_PATH1, extension1 = findpath("/site/covid19-aichi/kansensya-kensa.html", "7月まで")
FILE_PATH2, extension2 = findpath("/site/covid19-aichi/kansensya-kensa.html", "8月以降")
try:
if extension1 == "xlsx":
df1 = convert_xlsx(FILE_PATH1, "./data/source1.xlsx")
elif extension1 == "pdf":
df1 = convert_pdf(FILE_PATH1, "./data/source1.pdf", "./data/source1.csv")
else:
exit()
if extension2 == "xlsx":
df2 = convert_xlsx(FILE_PATH2, "./data/source2.xlsx")
elif extension2 == "pdf":
df2 = convert_pdf(FILE_PATH2, "./data/source2.pdf", "./data/source2.csv")
else:
exit()
df = | pd.concat([df1, df2]) | pandas.concat |
"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from pandas._libs.tslibs import timezones
from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import pandas._testing as tm
from pandas.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=pd.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
date_range(start=pd.NaT, end="2016-01-01", freq="D")
def test_date_range_multiplication_overflow(self):
# GH#24255
# check that overflows in calculating `addend = periods * stride`
# are caught
with tm.assert_produces_warning(None):
# we should _not_ be seeing a overflow RuntimeWarning
dti = date_range(start="1677-09-22", periods=213503, freq="D")
assert dti[0] == Timestamp("1677-09-22")
assert len(dti) == 213503
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("1969-05-04", periods=200000000, freq="30000D")
def test_date_range_unsigned_overflow_handling(self):
# GH#24255
# case where `addend = periods * stride` overflows int64 bounds
# but not uint64 bounds
dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
dti2 = date_range(start=dti[0], periods=len(dti), freq="D")
assert dti2.equals(dti)
dti3 = date_range(end=dti[-1], periods=len(dti), freq="D")
assert dti3.equals(dti)
def test_date_range_int64_overflow_non_recoverable(self):
# GH#24255
# case with start later than 1970-01-01, overflow int64 but not uint64
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start="1970-02-01", periods=106752 * 24, freq="H")
# case with end before 1970-01-01, overflow int64 but not uint64
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1969-11-14", periods=106752 * 24, freq="H")
def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
# cases where stride * periods overflow int64 and stride/endpoint
# have different signs
start = Timestamp("2262-02-23")
end = Timestamp("1969-11-14")
expected = date_range(start=start, end=end, freq="-1H")
assert expected[0] == start
assert expected[-1] == end
dti = date_range(end=end, periods=len(expected), freq="-1H")
tm.assert_index_equal(dti, expected)
start2 = Timestamp("1970-02-01")
end2 = Timestamp("1677-10-22")
expected2 = date_range(start=start2, end=end2, freq="-1H")
assert expected2[0] == start2
assert expected2[-1] == end2
dti2 = date_range(start=start2, periods=len(expected2), freq="-1H")
tm.assert_index_equal(dti2, expected2)
def test_date_range_out_of_bounds(self):
# GH#14187
msg = "Cannot generate range"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("2016-01-01", periods=100000, freq="D")
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1763-10-12", periods=100000, freq="D")
def test_date_range_gen_error(self):
rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min")
assert len(rng) == 4
@pytest.mark.parametrize("freq", ["AS", "YS"])
def test_begin_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
freq=freq,
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["A", "Y"])
def test_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["BA", "BY"])
def test_business_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
)
| tm.assert_index_equal(rng, exp) | pandas._testing.assert_index_equal |
from context import dero
import dero.data.ff.create.sort as ff_sort
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
import datetime
class DataFrameTest:
df_3_fac = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, .51, 1000),
(10516, 'a', '1/2/2000', 1.02, .52, 2000),
(10516, 'a', '1/3/2000', 1.03, .53, 3000),
(10516, 'a', '1/4/2000', 1.04, .54, 4000),
(10516, 'b', '1/1/2000', 1.05, 1.55, 50000),
(10516, 'b', '1/2/2000', 1.06, 1.56, 60000),
(10516, 'b', '1/3/2000', 1.07, 1.57, 70000),
(10516, 'b', '1/4/2000', 1.08, 1.58, 80000),
(10517, 'a', '1/1/2000', 1.09, .59, 9000),
(10517, 'a', '1/2/2000', 1.10, .60, 10000),
(10517, 'a', '1/3/2000', 1.11, .61, 11000),
(10517, 'a', '1/4/2000', 1.12, .62, 12000),
(10517, 'b', '1/1/2000', 1.13, .63, 13000),
(10517, 'b', '1/2/2000', 1.14, .64, 14000),
(10517, 'b', '1/3/2000', 1.15, .65, 15000),
(10517, 'b', '1/4/2000', 1.16, .66, 16000),
(10518, 'a', '1/1/2000', 1.17, .67, 17000),
(10518, 'a', '1/2/2000', 1.18, .68, 18000),
(10518, 'a', '1/3/2000', 1.19, .69, 19000),
(10518, 'a', '1/4/2000', 1.20, .70, 20000),
(10518, 'b', '1/1/2000', 1.21, .71, 21000),
(10518, 'b', '1/2/2000', 1.22, .72, 22000),
(10518, 'b', '1/3/2000', 1.23, .73, 23000),
(10518, 'b', '1/4/2000', 1.24, .74, 24000),
], columns=['PERMNO', 'byvar', 'Date', 'RET', 'be/me', 'me'])
df_3_fac['Date'] = pd.to_datetime(df_3_fac['Date'])
class TestCalculateFFFactors(DataFrameTest):
def test_create_portfolios(self):
expect_df = pd.DataFrame(data=[
(10516, 'a', Timestamp('2000-01-01 00:00:00'), 1.01, 0.51, 1000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'a', Timestamp('2000-01-02 00:00:00'), 1.02, 0.52, 2000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'a', Timestamp('2000-01-03 00:00:00'), 1.03, 0.53, 3000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'a', Timestamp('2000-01-04 00:00:00'), 1.04, 0.54, 4000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'b', Timestamp('2000-01-01 00:00:00'), 1.05, 1.55, 50000, 3, Timestamp('2000-01-01 00:00:00'), 2,
Timestamp('2000-01-01 00:00:00')),
(10516, 'b', Timestamp('2000-01-02 00:00:00'), 1.06, 1.56, 60000, 3, Timestamp('2000-01-01 00:00:00'), 2,
Timestamp('2000-01-01 00:00:00')),
(10516, 'b', Timestamp('2000-01-03 00:00:00'), 1.07, 1.57, 70000, 3, Timestamp('2000-01-01 00:00:00'), 2,
Timestamp('2000-01-01 00:00:00')),
(10516, 'b', Timestamp('2000-01-04 00:00:00'), 1.08, 1.58, 80000, 3, Timestamp('2000-01-01 00:00:00'), 2,
Timestamp('2000-01-01 00:00:00')),
(10517, 'a', Timestamp('2000-01-01 00:00:00'), 1.09, 0.59, 9000, 2, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10517, 'a', Timestamp('2000-01-02 00:00:00'), 1.1, 0.6, 10000, 2, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10517, 'a', Timestamp('2000-01-03 00:00:00'), 1.11, 0.61, 11000, 2, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10517, 'a', Timestamp('2000-01-04 00:00:00'), 1.12, 0.62, 12000, 2, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10517, 'b', Timestamp('2000-01-01 00:00:00'), 1.13, 0.63, 13000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10517, 'b', Timestamp('2000-01-02 00:00:00'), 1.14, 0.64, 14000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10517, 'b', Timestamp('2000-01-03 00:00:00'), 1.15, 0.65, 15000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10517, 'b', Timestamp('2000-01-04 00:00:00'), 1.16, 0.66, 16000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10518, 'a', Timestamp('2000-01-01 00:00:00'), 1.17, 0.67, 17000, 3, Timestamp('2000-01-01 00:00:00'), 2,
Timestamp('2000-01-01 00:00:00')),
(10518, 'a', | Timestamp('2000-01-02 00:00:00') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import math
from .davgis.functions import (Spatial_Reference, List_Datasets, Clip,
Resample, Raster_to_Array, NetCDF_to_Raster)
import os
import tempfile
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta,
epsg=4326, fill_val=-9999.0,
rasters_path_out=None, export_hants_only=False):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg, fill_val)
HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val)
#if rasters_path_out:
#export_tiffs(rasters_path_out, nc_path, name_format, export_hants_only)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg=4326, fill_val=-9999.0):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1] + 0.5*cellsize,
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1] + 0.5*cellsize,
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = pd.date_range(start_date, end_date, freq='D')
dates_ls = [d.strftime('%Y%m%d') for d in dates_dt]
ras_ls = List_Datasets(rasters_path, 'tif')
# Cell code
temp_ll_ls = [pd.np.arange(x, x + lon_n)
for x in range(1, lat_n*lon_n, lon_n)]
code_ls = pd.np.array(temp_ll_ls)
empty_vec = pd.np.empty((lat_n, lon_n))
empty_vec[:] = fill_val
# Create netcdf file
print('Creating netCDF file...')
nc_file = netCDF4.Dataset(nc_path, 'w', format="NETCDF4")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_n)
lon_dim = nc_file.createDimension('longitude', lon_n)
time_dim = nc_file.createDimension('time', len(dates_ls))
# Create Variables
crs_var = nc_file.createVariable('crs', 'i4')
crs_var.grid_mapping_name = 'latitude_longitude'
crs_var.crs_wkt = spa_ref
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude'),
fill_value=fill_val)
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude'),
fill_value=fill_val)
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time'),
fill_value=fill_val)
time_var.standard_name = 'time'
time_var.calendar = 'gregorian'
code_var = nc_file.createVariable('code', 'i4', ('latitude', 'longitude'),
fill_value=fill_val)
outliers_var = nc_file.createVariable('outliers', 'i4',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
outliers_var.long_name = 'outliers'
original_var = nc_file.createVariable('original_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
original_var.long_name = 'original values'
hants_var = nc_file.createVariable('hants_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
hants_var.long_name = 'hants values'
combined_var = nc_file.createVariable('combined_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
combined_var.long_name = 'combined values'
print('\tVariables created')
# Load data
lat_var[:] = lat_ls
lon_var[:] = lon_ls
time_var[:] = dates_ls
code_var[:] = code_ls
# temp folder
temp_dir = tempfile.mkdtemp()
bbox = [lonlim[0], latlim[0], lonlim[1], latlim[1]]
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(len(dates_ls)):
# Raster
ras = name_format.format(dates_ls[tt])
if ras in ras_ls:
# Resample
ras_resampled = os.path.join(temp_dir, 'r_' + ras)
Resample(os.path.join(rasters_path, ras), ras_resampled, cellsize)
# Clip
ras_clipped = os.path.join(temp_dir, 'c_' + ras)
Clip(ras_resampled, ras_clipped, bbox)
# Raster to Array
array = Raster_to_Array(ras_resampled,
ll_corner, lon_n, lat_n,
values_type='float32')
# Store values
original_var[:, :, tt] = array
else:
# Store values
original_var[:, :, tt] = empty_vec
# Close file
nc_file.close()
print('NetCDF file created')
# Return
return nc_path
def HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netcdf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
# Read netcdfs
nc_file = netCDF4.Dataset(nc_path, 'r+')
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[rows, cols, ztime] = original_values.shape
size_st = cols*rows
values_hants = pd.np.empty((rows, cols, ztime))
outliers_hants = pd.np.empty((rows, cols, ztime))
values_hants[:] = pd.np.nan
outliers_hants[:] = pd.np.nan
# Additional parameters
ni = len(time_var)
ts = range(ni)
# Loop
counter = 1
print('Running HANTS...')
for m in range(rows):
for n in range(cols):
print('\t{0}/{1}'.format(counter, size_st))
y = pd.np.array(original_values[m, n, :])
y[pd.np.isnan(y)] = fill_val
[yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
low, high, fet, dod, delta, fill_val)
values_hants[m, n, :] = yr
outliers_hants[m, n, :] = outliers
counter = counter + 1
nc_file.variables['hants_values'][:] = values_hants
nc_file.variables['outliers'][:] = outliers_hants
nc_file.variables['combined_values'][:] = pd.np.where(outliers_hants,
values_hants,
original_values)
# Close netcdf file
nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
delta, fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm for a
single point (lat, lon). It plots the fit and returns a data frame with
the 'original' and the 'hants' time series.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r')
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[pd.np.abs(lon - lonx).argmin()]
lat_i = pd.np.where(lat == lat_closest)[0][0]
lon_i = pd.np.where(lon == lon_closest)[0][0]
# Read values
original_values = nc_file.variables['original_values'][lat_i, lon_i, :]
# Additional parameters
ni = len(time)
ts = range(ni)
# HANTS
y = pd.np.array(original_values)
y[pd.np.isnan(y)] = fill_val
[hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
dod, delta, fill_val)
# Plot
top = 1.15*max(pd.np.nanmax(original_values),
pd.np.nanmax(hants_values))
bottom = 1.15*min(pd.np.nanmin(original_values),
| pd.np.nanmin(hants_values) | pandas.np.nanmin |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 31 21:15:57 2018
@author: Chinmay
"""
# Import all the necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import ast
from pyspark.sql.types import StringType
from pyspark import SQLContext , SparkContext
from pyspark.sql.session import SparkSession
# Create a Spark Context
sc = SparkContext('local','fisrt_SPARK') # If using locally
sqlContext = SQLContext(sc)
spark = SparkSession(sc)
# sc.stop()
# Load all the data into Spark Data Frame
df = (spark.read.format("csv").options(header="true" , inferSchema = True ).load("D:/Dbda/Project/Final Draft/Chinmay/Final/final_all_3.0.csv"))
batsman_all_data= (spark.read.format("csv").options(header="true" , inferSchema = True).load("D:/Dbda/Project/Final Draft/Chinmay/Final/all_batsmans_data_2.0.csv"))
# List of the teams
teams = ['Australia' , 'New Zealand' , 'India' , 'Zimbabwe' , 'Bangladesh' , 'South Africa' , 'England'
, 'Sri Lanka' , 'Pakistan' , 'West Indies' , 'Ireland']
match_type = 'ODI'
team1 = 'India'
team2 = 'Australia'
team_1 = [ '<NAME>' , '<NAME>' , '<NAME>' , '<NAME>', '<NAME>', '<NAME>' , 'HH Pandya' , '<NAME>' , '<NAME>' , '<NAME>' , 'YZ Chahal']
team_2 = ['<NAME>' , '<NAME>' , '<NAME>' , 'T Head' , '<NAME>' , '<NAME>' , '<NAME>' , '<NAME>' , '<NAME>' , 'NM Coulter-Nile' , '<NAME>']
bowlers_1 = ['<NAME>' , '<NAME>' , '<NAME>' , '<NAME>' , '<NAME>']
bowlers_2 = ['<NAME>' , '<NAME>' , 'NM Coulter-Nile' , '<NAME>', '<NAME>', 'T Head']
#bowlers_2 = ['<NAME>' , '<NAME>' , 'JT Ball' , '<NAME>' , '<NAME>' , '<NAME>' , 'JE Root']
#players = df.filter(df['`team`'].rlike('England')).select('batsman').distinct()
#df_batsman = pd.DataFrame(df.filter(df['`team`'].rlike('England')).select('batsman').distinct().collect())
#df_bolwer = pd.DataFrame(df.filter(df['`team`'].rlike('England')).select('bowler').distinct().collect())
import math
from sklearn.naive_bayes import GaussianNB
import numpy as np
from sklearn.model_selection import train_test_split
from pyspark.sql import SparkSession
from pyspark.sql.functions import isnan,isnull,when,count
from sklearn.ensemble import RandomForestRegressor
# Function to predict Player Score
def score_predict(batsman , bowlers , team1 , team2 , match_type):
#batsman = '<NAME>'
#bowlers = ['<NAME>' , '<NAME>' , 'NM Coulter-Nile' , '<NAME>', '<NAME>', 'T Head']
############ Prediction Based On Bowlers ##############################
batsman_data = batsman_all_data[(batsman_all_data['name'] == batsman) & (batsman_all_data['match_type'] == match_type)]
batsman_data_team = batsman_data[(batsman_data['against'] == team2)]
if batsman_data_team.select('match_id').count() == 0:
print("No Data Found against" , team2)
return 0,0,0,0,0,0,0,0,0
batsman_data_team = batsman_data_team.toPandas()
batsman_data = batsman_all_data[(batsman_all_data['name'] == batsman) & (batsman_all_data['match_type'] == match_type)]
batsman_data_team = batsman_data[(batsman_data['against'] == team2)]
if batsman_data_team.select('match_id').count() == 0:
print("No Data Found against" , team2)
return 0,0,0,0,0,0,
batsman_data_team = batsman_data_team.toPandas()
batsman_data_team['bowler_encoded'] = batsman_data_team['bowler'].astype('category').cat.codes
A = batsman_data_team.loc[:, ['balls' , 'bowler_encoded' , 'home_away']].values
B = batsman_data_team.loc[:, 'runs_scored'].values
A = pd.DataFrame(A)
B = pd.DataFrame(B)
score_pred = []
score_act_pred = []
score_act = []
for bowler in bowlers:
bowl = []
test = []
if batsman_data_team[(batsman_data_team['bowler'] == bowler)]['bowler_encoded'].empty:
continue
encoded_bowl = batsman_data_team[(batsman_data_team['bowler'] == bowler)]['bowler_encoded'].index
encoded_bowler = batsman_data_team[(batsman_data_team['bowler'] == bowler)]['bowler_encoded'].iloc[0]
if not encoded_bowler:
continue
X_train = A.loc[A.index != list(encoded_bowl)[-1]].values
y_train = B.loc[B.index != list(encoded_bowl)[-1]].values
X_test = A.loc[A.index == list(encoded_bowl)[-1]].values
y_test = B.loc[B.index == list(encoded_bowl)[-1]].values
y_test = y_test.tolist()
if batsman_data_team[(batsman_data_team['bowler_encoded'] == encoded_bowler) & (batsman_data_team['balls'] != 0)]['balls'].count() == 0:
continue
avg = batsman_data_team[(batsman_data_team['bowler_encoded'] == encoded_bowler) & (batsman_data_team['balls'] != 0)]['balls'].sum()/batsman_data_team[(batsman_data_team['bowler_encoded'] == encoded_bowler) & (batsman_data_team['balls'] != 0)]['balls'].count()
bowl.append(avg)
bowl.append(encoded_bowler)
test.append(bowl)
model = GaussianNB()
model.fit(X_train, y_train)
regressor = RandomForestRegressor(max_depth = 2, min_samples_split=2, n_estimators = 100, random_state = 1)
regressor.fit(X_train,y_train)
if not test:
predicted2 = 0
else:
#predicted2= model.predict(test)
#predicted = model.predict(X_test)
predicted = regressor.predict(X_test)
#score_pred.append(sum(predicted2))
score_act_pred.append(sum(predicted))
score_act.append(y_test[0][0])
#predicted = sum(score_pred)
predicted = math.ceil(sum(score_act_pred))
predicted2 = math.ceil(sum(score_act))
#print(batsman , "will score against bowlers predicted" , sum(score_pred))
print(batsman , "will score against bowlers predicted" , predicted)
print(batsman , "will score against bowlers " , predicted2)
############### Predicition Based On Against Team Record #########################
from pyspark.sql.functions import col
df_team = df.filter(df['`info.teams`'].rlike('India'))
df_team = df_team.toPandas()
df_team['info.teams'] = df_team['info.teams'].str.strip().apply(ast.literal_eval)
match_index = df_team['index_all'].unique()
batsman_data = | pd.DataFrame() | pandas.DataFrame |
import streamlit as st
import streamlit.components.v1 as components
from streamlit_folium import folium_static
# import folium
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import immo
import ssl
# to avoid SSLCertVerificationError
ssl._create_default_https_context = ssl._create_unverified_context
# search from data.gouv.fr geo-dvf
@st.cache
def load_data(code_commune):
# json_data = immo.dvf_commune(postcode)
url = "https://files.data.gouv.fr/geo-dvf/latest/csv/2021/communes/"+str(code_commune)[:2]+"/"+str(code_commune)+".csv"
df = pd.read_csv(url)
df.date_mutation = | pd.to_datetime(df.date_mutation) | pandas.to_datetime |
import io
import gzip
import networkx as nx
import numpy as np
import pandas as pd
import random
import requests
import scipy as sc
import scipy.io
from sklearn import cluster, manifold, linear_model, metrics
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import StandardScaler
import time
import umap
import warnings
import csrgraph as cg
def make_blogcatalog(edgelist="../data/blogcatalog.mat",
dedupe=True):
"""
Graph with cluster labels from blogcatalog
Dedupe: Whether to deduplicate results (else some nodes have multilabels)
"""
mat = scipy.io.loadmat(edgelist)
nodes = mat['network'].tocsr()
groups = mat['group']
G = nx.from_scipy_sparse_matrix(nodes)
labels = (
pd.DataFrame(groups.todense())
.idxmax(axis=1)
.reset_index(drop=False)
)
labels.columns = ['node', 'label']
labels.node = labels.node.astype(int)
if dedupe:
labels = labels.loc[~labels.node.duplicated()
].reset_index(drop=True)
labels.label = labels.label.astype(int) - 1
return G, labels
else:
df = pd.DataFrame(groups.todense())
labels_list = df.apply(lambda row: list((row.loc[row > 0]).index), axis=1)
return G, pd.DataFrame({'node': list(G), 'mlabels': pd.Series(labels_list)})
def get_karateclub(graph_name):
"""
Gets formatted dataset from KarateClub library
https://karateclub.readthedocs.io
"""
try:
from karateclub import GraphReader
except:
raise Exception(
"get_karateclub requires the karateclub library!\n"
+ "Try 'pip install karateclub'\n"
+ "see https://github.com/benedekrozemberczki/KarateClub")
G = GraphReader(graph_name).get_graph()
y = GraphReader(graph_name).get_target()
return G, pd.DataFrame({'node': list(G), 'label': pd.Series(y)})
def make_email():
"""
Graph from university emails, clustered by departments
Data from http://snap.stanford.edu/data/email-Eu-core.html
Edge list Format
"""
res = requests.get('http://snap.stanford.edu/data/email-Eu-core.txt.gz', verify=False)
edges = gzip.GzipFile(fileobj=io.BytesIO(res.content))
edges = pd.read_csv(io.StringIO(edges.read().decode()), header=None, sep=' ')
edges.columns = ['src', 'dest']
# cluster labels per node
res = requests.get('http://snap.stanford.edu/data/email-Eu-core-department-labels.txt.gz', verify=False)
labels = gzip.GzipFile(fileobj=io.BytesIO(res.content))
labels = pd.read_csv(io.StringIO(labels.read().decode()), header=None, sep=' ')
labels.columns = ['node', 'cluster']
G = nx.Graph()
G.add_edges_from([(t.src, t.dest) for t in edges.itertuples()])
return G, pd.DataFrame({'node': list(G), 'label': labels.cluster})
def get_from_snap(url="http://snap.stanford.edu/data/wiki-Vote.txt.gz",
sep='\t', header=None, comment='#'):
"""
Download graph from SNAP dataset
"""
res = requests.get(url, verify=False)
edges = gzip.GzipFile(fileobj=io.BytesIO(res.content))
edges = pd.read_csv(io.StringIO(edges.read().decode()),
header=header, sep=sep, comment=comment)
edges.columns = ['src', 'dest']
G = nx.Graph()
G.add_edges_from([(t.src, t.dest) for t in edges.itertuples()])
return G
#############################
# #
# RNG DATASETS #
# #
#############################
def make_cluster_graph(
n_nodes, n_clusters,
connections=1, drop_pct=0.1,
max_edge_weight=None):
"""
Makes distinct complete subgraphs
connected by random paths
n_nodes (int): number of nodes
n_clusters (int): number of clusters
This is also the number of disjoint subgraphs
connections (int): number of random connections
These join the disjoint subgraphs
"""
div = int(n_nodes / n_clusters)
subgraph_sizes = [div] * n_clusters
# last cluster has remainder nodes
subgraph_sizes[-1] = subgraph_sizes[-1] + (n_nodes % n_clusters)
# Make G from disjoint subgraphs
G = nx.complete_graph(subgraph_sizes[0])
for i in range(1, len(subgraph_sizes)):
G = nx.disjoint_union(G, nx.complete_graph(subgraph_sizes[i]))
# connecting paths
for i in range(connections):
while True:
c1, c2 = np.random.randint(n_nodes, size=2)
if G.has_edge(c1, c2):
continue
G.add_edge(c1, c2)
break
# Drop random edges
n_edges = len(G.edges)
to_remove=random.sample(G.edges(),
k=int(n_edges * drop_pct))
G.remove_edges_from(to_remove)
# Generate labels
labels = []
for i in range(len(subgraph_sizes)):
labels.append([i] * subgraph_sizes[i])
labels = sum(labels, [])
assert len(labels) == n_nodes, f"{labels}"
assert len(set(labels)) == n_clusters, f"{labels}"
return G, pd.DataFrame({'node': list(G), 'label': pd.Series(labels)})
def make_weighed_cluster_graph(
n_nodes, n_clusters,
connections=1, drop_pct=0.1,
max_edge_weight=10):
"""
Makes distinct complete subgraphs
connected by random paths
n_nodes (int): number of nodes
n_clusters (int): number of clusters
This is also the number of disjoint subgraphs
connections (int): number of random connections
These join the disjoint subgraphs
"""
div = int(n_nodes / n_clusters)
subgraph_sizes = [div] * n_clusters
# last cluster has remainder nodes
subgraph_sizes[-1] = subgraph_sizes[-1] + (n_nodes % n_clusters)
# Make G from disjoint subgraphs
G = nx.complete_graph(subgraph_sizes[0])
for i in range(1, len(subgraph_sizes)):
G = nx.disjoint_union(G, nx.complete_graph(subgraph_sizes[i]))
for eg in G.edges:
G[eg[0]][eg[1]]['weight'] = np.random.randint(0, max_edge_weight)
# connecting paths
for i in range(connections):
while True:
c1, c2 = np.random.randint(n_nodes, size=2)
if G.has_edge(c1, c2):
continue
G.add_edge(c1, c2)
G[c1][c2]['weight'] = np.random.randint(0, max_edge_weight)
break
# Drop random edges
n_edges = len(G.edges)
to_remove=random.sample(G.edges(),
k=int(n_edges * drop_pct))
G.remove_edges_from(to_remove)
# Generate labels
labels = []
for i in range(len(subgraph_sizes)):
labels.append([i] * subgraph_sizes[i])
labels = sum(labels, [])
assert len(labels) == n_nodes, f"{labels}"
assert len(set(labels)) == n_clusters, f"{labels}"
return G, pd.DataFrame({'node': list(G), 'label': pd.Series(labels)})
#############################
# #
# BIO DATASETS #
# #
#############################
def read_bionev_labels(filename):
"""
Reads multilabels in BioNEV format
eg. node label1 label2 ... labeln
ex.
1 5 8 99 103
2 4
3 9 192 777
Returns pd.DataFrame with nodeID
"""
fin = open(filename, 'r')
node_list = []
labels = []
while 1:
l = fin.readline()
if l == '':
break
vec = l.strip().split()
node_list.append(int(vec[0]))
labels.append([int(x) for x in vec[1:]])
fin.close()
res = pd.DataFrame({'nodes': node_list, 'mlabels': | pd.Series(labels) | pandas.Series |
import pytest
import numpy as np
import pandas as pd
from datetime import datetime
from pandas.util import testing as tm
from pandas import DataFrame, MultiIndex, compat, Series, bdate_range, Index
def test_apply_issues():
# GH 5788
s = """2011.05.16,00:00,1.40893
2011.05.16,01:00,1.40760
2011.05.16,02:00,1.40750
2011.05.16,03:00,1.40649
2011.05.17,02:00,1.40893
2011.05.17,03:00,1.40760
2011.05.17,04:00,1.40750
2011.05.17,05:00,1.40649
2011.05.18,02:00,1.40893
2011.05.18,03:00,1.40760
2011.05.18,04:00,1.40750
2011.05.18,05:00,1.40649"""
df = pd.read_csv(
compat.StringIO(s), header=None, names=['date', 'time', 'value'],
parse_dates=[['date', 'time']])
df = df.set_index('date_time')
expected = df.groupby(df.index.date).idxmax()
result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
tm.assert_frame_equal(result, expected)
# GH 5789
# don't auto coerce dates
df = pd.read_csv(
compat.StringIO(s), header=None, names=['date', 'time', 'value'])
exp_idx = pd.Index(
['2011.05.16', '2011.05.17', '2011.05.18'
], dtype=object, name='date')
expected = Series(['00:00', '02:00', '02:00'], index=exp_idx)
result = df.groupby('date').apply(
lambda x: x['time'][x['value'].idxmax()])
tm.assert_series_equal(result, expected)
def test_apply_trivial():
# GH 20066
# trivial apply: ignore input and return a constant dataframe.
df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=['key', 'data'])
expected = pd.concat([df.iloc[1:], df.iloc[1:]],
axis=1, keys=['float64', 'object'])
result = df.groupby([str(x) for x in df.dtypes],
axis=1).apply(lambda x: df.iloc[1:])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH#20066; function passed into apply "
"returns a DataFrame with the same index "
"as the one to create GroupBy object.",
strict=True)
def test_apply_trivial_fail():
# GH 20066
# trivial apply fails if the constant dataframe has the same index
# with the one used to create GroupBy object.
df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=['key', 'data'])
expected = pd.concat([df, df],
axis=1, keys=['float64', 'object'])
result = df.groupby([str(x) for x in df.dtypes],
axis=1).apply(lambda x: df)
tm.assert_frame_equal(result, expected)
def test_fast_apply():
# make sure that fast apply is correctly called
# rather than raising any kind of error
# otherwise the python path will be callsed
# which slows things down
N = 1000
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame({'key': labels,
'key2': labels2,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
def f(g):
return 1
g = df.groupby(['key', 'key2'])
grouper = g.grouper
splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)
group_keys = grouper._get_group_keys()
values, mutated = splitter.fast_apply(f, group_keys)
assert not mutated
def test_apply_with_mixed_dtype():
# GH3480, apply with mixed dtype on axis=1 breaks in 0.11
df = DataFrame({'foo1': np.random.randn(6),
'foo2': ['one', 'two', 'two', 'three', 'one', 'two']})
result = df.apply(lambda x: x, axis=1)
tm.assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())
# GH 3610 incorrect dtype conversion with as_index=False
df = DataFrame({"c1": [1, 2, 6, 6, 8]})
df["c2"] = df.c1 / 2.0
result1 = df.groupby("c2").mean().reset_index().c2
result2 = df.groupby("c2", as_index=False).mean().c2
tm.assert_series_equal(result1, result2)
def test_groupby_as_index_apply(df):
# GH #4648 and #3417
df = DataFrame({'item_id': ['b', 'b', 'a', 'c', 'a', 'b'],
'user_id': [1, 2, 1, 1, 3, 1],
'time': range(6)})
g_as = df.groupby('user_id', as_index=True)
g_not_as = df.groupby('user_id', as_index=False)
res_as = g_as.head(2).index
res_not_as = g_not_as.head(2).index
exp = Index([0, 1, 2, 4])
tm.assert_index_equal(res_as, exp)
tm.assert_index_equal(res_not_as, exp)
res_as_apply = g_as.apply(lambda x: x.head(2)).index
res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index
# apply doesn't maintain the original ordering
# changed in GH5610 as the as_index=False returns a MI here
exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (
2, 4)])
tp = [(1, 0), (1, 2), (2, 1), (3, 4)]
exp_as_apply = MultiIndex.from_tuples(tp, names=['user_id', None])
tm.assert_index_equal(res_as_apply, exp_as_apply)
tm.assert_index_equal(res_not_as_apply, exp_not_as_apply)
ind = Index(list('abcde'))
df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
res = df.groupby(0, as_index=False).apply(lambda x: x).index
tm.assert_index_equal(res, ind)
def test_apply_concat_preserve_names(three_group):
grouped = three_group.groupby(['A', 'B'])
def desc(group):
result = group.describe()
result.index.name = 'stat'
return result
def desc2(group):
result = group.describe()
result.index.name = 'stat'
result = result[:len(group)]
# weirdo
return result
def desc3(group):
result = group.describe()
# names are different
result.index.name = 'stat_%d' % len(group)
result = result[:len(group)]
# weirdo
return result
result = grouped.apply(desc)
assert result.index.names == ('A', 'B', 'stat')
result2 = grouped.apply(desc2)
assert result2.index.names == ('A', 'B', 'stat')
result3 = grouped.apply(desc3)
assert result3.index.names == ('A', 'B', None)
def test_apply_series_to_frame():
def f(piece):
with np.errstate(invalid='ignore'):
logged = np.log(piece)
return DataFrame({'value': piece,
'demeaned': piece - piece.mean(),
'logged': logged})
dr = bdate_range('1/1/2000', periods=100)
ts = Series(np.random.randn(100), index=dr)
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(f)
assert isinstance(result, DataFrame)
tm.assert_index_equal(result.index, ts.index)
def test_apply_series_yield_constant(df):
result = df.groupby(['A', 'B'])['C'].apply(len)
assert result.index.names[:2] == ('A', 'B')
def test_apply_frame_yield_constant(df):
# GH13568
result = df.groupby(['A', 'B']).apply(len)
assert isinstance(result, Series)
assert result.name is None
result = df.groupby(['A', 'B'])[['C', 'D']].apply(len)
assert isinstance(result, Series)
assert result.name is None
def test_apply_frame_to_series(df):
grouped = df.groupby(['A', 'B'])
result = grouped.apply(len)
expected = grouped.count()['C']
tm.assert_index_equal(result.index, expected.index)
tm.assert_numpy_array_equal(result.values, expected.values)
def test_apply_frame_concat_series():
def trans(group):
return group.groupby('B')['C'].sum().sort_values()[:2]
def trans2(group):
grouped = group.groupby(df.reindex(group.index)['B'])
return grouped.sum().sort_values()[:2]
df = DataFrame({'A': np.random.randint(0, 5, 1000),
'B': np.random.randint(0, 5, 1000),
'C': np.random.randn(1000)})
result = df.groupby('A').apply(trans)
exp = df.groupby('A')['C'].apply(trans2)
tm.assert_series_equal(result, exp, check_names=False)
assert result.name == 'C'
def test_apply_transform(ts):
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
tm.assert_series_equal(result, expected)
def test_apply_multikey_corner(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
def f(group):
return group.sort_values('A')[-5:]
result = grouped.apply(f)
for key, group in grouped:
tm.assert_frame_equal(result.loc[key], f(group))
def test_apply_chunk_view():
# Low level tinkering could be unsafe, make sure not
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'value': compat.lrange(9)})
# return view
f = lambda x: x[:2]
result = df.groupby('key', group_keys=False).apply(f)
expected = df.take([0, 1, 3, 4, 6, 7])
tm.assert_frame_equal(result, expected)
def test_apply_no_name_column_conflict():
df = DataFrame({'name': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'name2': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
'value': compat.lrange(10)[::-1]})
# it works! #2605
grouped = df.groupby(['name', 'name2'])
grouped.apply(lambda x: x.sort_values('value', inplace=True))
def test_apply_typecast_fail():
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(
['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)})
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
tm.assert_frame_equal(result, expected)
def test_apply_multiindex_fail():
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
tm.assert_frame_equal(result, expected)
def test_apply_corner(tsframe):
result = tsframe.groupby(lambda x: x.year).apply(lambda x: x * 2)
expected = tsframe * 2
tm.assert_frame_equal(result, expected)
def test_apply_without_copy():
# GH 5545
# returning a non-copy in an applied function fails
data = DataFrame({'id_field': [100, 100, 200, 300],
'category': ['a', 'b', 'c', 'c'],
'value': [1, 2, 3, 4]})
def filt1(x):
if x.shape[0] == 1:
return x.copy()
else:
return x[x.category == 'c']
def filt2(x):
if x.shape[0] == 1:
return x
else:
return x[x.category == 'c']
expected = data.groupby('id_field').apply(filt1)
result = data.groupby('id_field').apply(filt2)
tm.assert_frame_equal(result, expected)
def test_apply_corner_cases():
# #535, can't use sliding iterator
N = 1000
labels = np.random.randint(0, 100, size=N)
df = DataFrame({'key': labels,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
grouped = df.groupby('key')
def f(g):
g['value3'] = g['value1'] * 2
return g
result = grouped.apply(f)
assert 'value3' in result
def test_apply_numeric_coercion_when_datetime():
# In the past, group-by/apply operations have been over-eager
# in converting dtypes to numeric, in the presence of datetime
# columns. Various GH issues were filed, the reproductions
# for which are here.
# GH 15670
df = pd.DataFrame({'Number': [1, 2],
'Date': ["2017-03-02"] * 2,
'Str': ["foo", "inf"]})
expected = df.groupby(['Number']).apply(lambda x: x.iloc[0])
df.Date = pd.to_datetime(df.Date)
result = df.groupby(['Number']).apply(lambda x: x.iloc[0])
tm.assert_series_equal(result['Str'], expected['Str'])
# GH 15421
df = pd.DataFrame({'A': [10, 20, 30],
'B': ['foo', '3', '4'],
'T': [pd.Timestamp("12:31:22")] * 3})
def get_B(g):
return g.iloc[0][['B']]
result = df.groupby('A').apply(get_B)['B']
expected = df.B
expected.index = df.A
tm.assert_series_equal(result, expected)
# GH 14423
def predictions(tool):
out = pd.Series(index=['p1', 'p2', 'useTime'], dtype=object)
if 'step1' in list(tool.State):
out['p1'] = str(tool[tool.State == 'step1'].Machine.values[0])
if 'step2' in list(tool.State):
out['p2'] = str(tool[tool.State == 'step2'].Machine.values[0])
out['useTime'] = str(
tool[tool.State == 'step2'].oTime.values[0])
return out
df1 = pd.DataFrame({'Key': ['B', 'B', 'A', 'A'],
'State': ['step1', 'step2', 'step1', 'step2'],
'oTime': ['', '2016-09-19 05:24:33',
'', '2016-09-19 23:59:04'],
'Machine': ['23', '36L', '36R', '36R']})
df2 = df1.copy()
df2.oTime = pd.to_datetime(df2.oTime)
expected = df1.groupby('Key').apply(predictions).p1
result = df2.groupby('Key').apply(predictions).p1
tm.assert_series_equal(expected, result)
def test_time_field_bug():
# Test a fix for the following error related to GH issue 11324 When
# non-key fields in a group-by dataframe contained time-based fields
# that were not returned by the apply function, an exception would be
# raised.
df = pd.DataFrame({'a': 1, 'b': [datetime.now() for nn in range(10)]})
def func_with_no_date(batch):
return pd.Series({'c': 2})
def func_with_date(batch):
return pd.Series({'b': datetime(2015, 1, 1), 'c': 2})
dfg_no_conversion = df.groupby(by=['a']).apply(func_with_no_date)
dfg_no_conversion_expected = pd.DataFrame({'c': 2}, index=[1])
dfg_no_conversion_expected.index.name = 'a'
dfg_conversion = df.groupby(by=['a']).apply(func_with_date)
dfg_conversion_expected = pd.DataFrame(
{'b': datetime(2015, 1, 1),
'c': 2}, index=[1])
dfg_conversion_expected.index.name = 'a'
tm.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected)
tm.assert_frame_equal(dfg_conversion, dfg_conversion_expected)
def test_gb_apply_list_of_unequal_len_arrays():
# GH1738
df = DataFrame({'group1': ['a', 'a', 'a', 'b', 'b', 'b', 'a', 'a', 'a',
'b', 'b', 'b'],
'group2': ['c', 'c', 'd', 'd', 'd', 'e', 'c', 'c', 'd',
'd', 'd', 'e'],
'weight': [1.1, 2, 3, 4, 5, 6, 2, 4, 6, 8, 1, 2],
'value': [7.1, 8, 9, 10, 11, 12, 8, 7, 6, 5, 4, 3]})
df = df.set_index(['group1', 'group2'])
df_grouped = df.groupby(level=['group1', 'group2'], sort=True)
def noddy(value, weight):
out = np.array(value * weight).repeat(3)
return out
# the kernel function returns arrays of unequal length
# pandas sniffs the first one, sees it's an array and not
# a list, and assumed the rest are of equal length
# and so tries a vstack
# don't die
df_grouped.apply(lambda x: noddy(x.value, x.weight))
def test_groupby_apply_all_none():
# Tests to make sure no errors if apply function returns all None
# values. Issue 9684.
test_df = DataFrame({'groups': [0, 0, 1, 1],
'random_vars': [8, 7, 4, 5]})
def test_func(x):
pass
result = test_df.groupby('groups').apply(test_func)
expected = | DataFrame() | pandas.DataFrame |
from pathlib import Path
from unicodedata import normalize
import sys
from itertools import chain
sys.path.append("c:\\Users\\kpdav\\machine_learning\\projects\\PGA-portfolio-optimizer\\config")
import config
import requests
from bs4 import BeautifulSoup
import pandas as pd
def pgatour_tournament_ids(url):
"""Find pgatour.com tournament ids on webpage
Args:
url (str) : pgatour.com stats webpage
Returns:
tournament_ids, with entires of (tournament_id, tournament_name)
"""
page = requests.get(url)
soup = BeautifulSoup(page.content, "lxml")
season = int(url[url.rfind("y") + 1:url.rfind(".")])
headers = soup.find("section", class_="statistics-details-content")
tournaments_info = []
if headers is not None:
tournament_parent = headers.find_all("div",
class_="statistics-details-select-wrap tournament")
if tournament_parent is not None:
tournaments = tournament_parent[0].find_all("option")
for tourn in tournaments:
tournaments_info.append((tourn["value"], tourn.text, season))
return tournaments_info
def get_pgatour_ids(start, end):
"""Get pgatour.com tournament ids for a range of seasons
Args:
start (int) : start season
end (int): end season
Returns:
pgatour tournament ids
"""
seasons = [year for year in range(start, end+1)]
# same tournament id's for all stat pages on pgatour.com
urls = [f"https://www.pgatour.com/content/pgatour/stats/stat.328.y{season}.html"
for season in seasons]
pgatour_ids = [pgatour_tournament_ids(url) for url in urls]
# flatten data
pgatour_ids = list(chain.from_iterable(pgatour_ids))
return pgatour_ids
def save_pgatour_ids(file_p):
"""Save pgatour tournament ids in given file path
Args:
file_p (str) : file path
"""
pgatour_data = get_pgatour_ids(2017, 2020)
df = pd.DataFrame(pgatour_data, columns=["tournament_id", "tournament_name", "season"])
df.to_csv(file_p, index=False)
def get_espn_tournaments(start, end=None, all_tournaments=False):
"""Get espn tournaments for given season(s).
Notes:
if all_tournaments is left as False, the dataframe of tournaments
will contain only valid tournamets. Otherwise tournaments that have
not been cancelled will be given (this includes tournaments of match play,
charity events, etc.)
Args:
start (int) : starting pga season
end (int) : ending pga season, optional
all_tournaments (bool) : get all or valid tournaments
Returns:
dataframe of tournaments for specified season(s)
"""
if all_tournaments:
pass
else:
# change path to point to 2011 to 2016 espn tournaments
valid_tournaments_path = str(Path(config.TOURNAMENTS_DIR, "valid_espn_tournaments_2011_2016.csv"))
df = pd.read_csv(valid_tournaments_path, date_parser=["date"])
if end is not None:
season_df = df[(df.season_id >= start) & (df.season_id <= end)]
else:
season_df = df[df.season_id == start]
return season_df
def pgatour_statistic(url):
"""Get data on give pgatour statistic
Args:
url (str) : pgatour statistic url
Returns:
data of pgatour statistic
"""
with requests.Session() as session:
page = session.get(url)
soup = BeautifulSoup(page.content, "lxml")
print(f"Fetching: {url}")
data = []
data_keys = []
# meta-information
pga_stat_id = url[url.rfind("stat.") + 5 : url.rfind(".y")]
pga_tourn_id = url[url.rfind("off.") + 4 : url.rfind(".")]
pga_season_id = url[url.rfind("y") + 1: url.rfind(".eoff")]
statistic_name = soup.select("section.statistics-details-content")
if statistic_name is not None:
name_header = statistic_name[0].find("div", class_="header")
name = name_header.find("h1")
if name is not None:
pga_stat_name = name.text
pga_stat_name = pga_stat_name.replace(" ", "_")
statistic_table = soup.select("div.details-table-wrap")
if statistic_table is not None:
header = statistic_table[0].find("thead")
if header is not None:
header_cols = header.find_all("th")
for h_col in header_cols:
col_str = h_col.text
col_str = normalize('NFKD',col_str)
col_str = col_str.strip()
col_str = col_str.replace(" ", "_")
data_keys.append(col_str)
body = statistic_table[0].find("tbody")
if body is not None:
players = body.find_all("tr")
for player in players:
p_data = player.find_all("td")
player_dict = {}
player_dict["pga_stat_name"] = pga_stat_name
player_dict["pga_stat_id"] = pga_stat_id
player_dict["pga_tourn_id"] = pga_tourn_id
player_dict["pga_season_id"] = pga_season_id
key_counter = 0
for col in p_data:
player_dict[data_keys[key_counter]] = col.text.strip()
key_counter += 1
data.append(player_dict)
return data
def get_pgatour_statistic(url, start, end=None):
"""Get pgatour statistic over given range of season(s)
Args:
url (str) : base url for pga statistic
start (int) : start season
end (int) : end season
Returns:
dataframe of pgatour statistic over given range of season(s)
"""
front_url = url[:url.rfind("html")]
end_url = url[url.rfind("."): ]
if end is not None:
base_urls = [front_url + "y" + str(season) for season in range(start, end+1)]
pgatour_tournaments_path = str(Path(config.RAW_DATA_DIR, "PGATOUR_tournament_ids_2017_2020.csv"))
pgatour_ids = pd.read_csv(pgatour_tournaments_path)
pgatour_stat_urls = []
for url in base_urls:
season_id = int(url[url.rfind("y")+1:])
tournament_id_list = pgatour_ids["tournament_id"][pgatour_ids["season"] == season_id]
for t_id in tournament_id_list:
stat_url = url + ".eoff." + t_id + end_url
# print(stat_url)
pgatour_stat_urls.append(stat_url)
# tournament_id_list = pgatour_ids["tournament_id"][(pgatour_ids["season"] <= end) & (pgatour_ids["season"] >= start)].tolist()
stat_data = [pgatour_statistic(url) for url in pgatour_stat_urls]
# flatten data
stat_data = list(chain.from_iterable(stat_data))
df = pd.DataFrame(stat_data)
return df
else:
base_url = front_url + "y" + str(start)
# Make into a function (get stored pgatour tournament ids)
pgatour_tournaments_path = str(Path(config.RAW_DATA_DIR, "PGATOUR_tournament_ids_2017_2020.csv"))
pgatour_ids = pd.read_csv(pgatour_tournaments_path)
tournament_id_list = pgatour_ids["tournament_id"][pgatour_ids["season"] == start].tolist()
pgatour_stat_urls = [base_url + ".eoff." + t_id + end_url for t_id in tournament_id_list]
stat_data = [pgatour_statistic(url) for url in pgatour_stat_urls]
# flatten data
stat_data = list(chain.from_iterable(stat_data))
df = | pd.DataFrame(stat_data) | pandas.DataFrame |
"""
Script to extract all existing ontology annotations in HCA Ingest for a given set of projects.
The script takes a list of project UUIDs and extracts the corresponding ontology annotations from the ingest API (prod).
Project UUIDs can be specified as a comma delimited list or as a text file.
You can run the script in your favourite IDE (eg Pycharm) or via the command line using `python3 ontology_mappings_extractor.py`
The script generates a file called YYYY-mm-dd_HH-MM_property_mappings.tsv, which contains every single free-text to ontology term mapping in every biomaterial or protocol in every submission requested. To get a list of unique mappings by project, use the -u flag
Improvement suggestion: read all mappings into a dictionary first, then save/print only unique ones.
Warning - this script takes a while (10s of mins to a couple of hours!) to run for large submissions or if you give it many in one go!
Copied from HumanCellAtlas/data-wrangling 30/04/2021. Original Author <NAME>
Updated/refactored by <NAME> in April/May 2021.
"""
import argparse
import requests
import pandas as pd
import sys
from datetime import datetime
import re
def define_parser():
parser = argparse.ArgumentParser(description="Parser for the arguments")
parser.add_argument("--file", "-f", action="store", dest="file_path", type=str,
help="Path to a text file with project uuids.")
parser.add_argument("--project_uuid", "-p", action="store", dest="uuid_list", type=str,
help="Single or comma-delimited list of project uuids.")
parser.add_argument("--unique", "-u", action="store_true", dest="unique",
help="If specified, collapse duplicate curations per project.")
parser.add_argument("--api", "-a", action="store", dest="ingest_api_url",
default="http://api.ingest.archive.data.humancellatlas.org/",
help="URL of the api to search, default is current prod api.")
parser.add_argument("--iri_replace", "-i", action="store", dest="iri_replace",
help="Path to a file where there are obo ids like EFO:000001 in column named 'SEMANTIC_TAG' "
"and replace these with full iris. All other arguments ignored.")
return parser
def extract_mappings(uuid, api, unique, file_string):
"""
Method to iterate over project metadata as well as the entities in all submissions in a project and save progress to
file.
:param uuid: project uuid
:type uuid: string
:param api: string of ingest api to search
:type api: string
:param unique: Toggle to collapse duplicate curations
:type unique: bool
:param file_string: Path to file to save extracted mappings
:type file_string: str
"""
project_json = requests.get("{}projects/search/findByUuid?uuid={}".format(api, uuid)).json()
project_content = project_json['content']
project_name = project_content['project_core']['project_short_name']
if re.search("Integration Test", project_name):
return
project_mapping_list = read_properties(project_content, 'project', project_name, property_list=[])
save_df(project_mapping_list, unique, file_string)
submissions_link = project_json['_links']['submissionEnvelopes']['href']
submissions_json = requests.get(submissions_link).json()
for submission in submissions_json['_embedded']['submissionEnvelopes']:
biomaterials_link = submission['_links']['biomaterials']['href']
biomaterials_mapping_list = process_json(biomaterials_link, 'biomaterials', project_name)
save_df(biomaterials_mapping_list, unique, file_string)
protocols_link = submission['_links']['protocols']['href']
protocols_mapping_list = process_json(protocols_link, 'protocols', project_name)
save_df(protocols_mapping_list, unique, file_string)
files_link = submission['_links']['files']['href']
files_mapping_list = process_json(files_link, 'files', project_name)
save_df(files_mapping_list, unique, file_string)
# TODO: Process Analysis entities
def process_json(link, schema_type, project_name):
"""
Iterate through all pages of entities for a particular schema type in a particular submission.
:param link: Link to the submission entities to request
:type link: string
:param schema_type: which type to return, one of biomaterials, files, protocols
:type schema_type: string
:param project_name: The short name of the projects
:type project_name: string
:return mapping_list: List of curation mappings
:rtype mapping_list: list
"""
done = False
mapping_list = []
while not done:
entries = requests.get(link).json()
try:
for entry in entries['_embedded'][schema_type]:
bioentity = entry['content']['describedBy'].split('/')[-1]
mapping_list.extend(read_properties(entry['content'], bioentity, project_name, property_list=[]))
if 'next' in entries['_links']:
link = entries['_links']['next']['href']
else:
done = True
except KeyError:
print("Error retrieving metadata from {}. Probably no submission metadata. Skipping...".format(link))
done = True
return mapping_list
# this function recursively reads through an entire json doc to find all the instances of ontology mappings
def read_properties(data, bioentity, project_name, property_list=[], root=None):
"""
Recursively read through all properties of each json entity and retrieve ontology curations.
:param data: Content from api request
:type data: dict
:param bioentity: The schema type of the content, fills the 'BIOENTITY' field in output
:type bioentity: string
:param project_name: The shortname of the project, fills the 'STUDY' field in output
:type project_name: string
:param property_list: The list of properties and curations in a dictionary for each property
:type property_list: list of dicts
:param root:
:type root:
:return: property_list - the list of property curations
:rtype: list of dicts
"""
for k, v in data.items():
if isinstance(v, dict):
if "ontology" in v:
ontology = v['ontology'].strip()
if ontology != "":
text = v['text'].strip()
property_list.append([project_name, bioentity, k, text, ontology])
else:
read_properties(v, bioentity, project_name, property_list, k)
elif isinstance(v, list):
for index, e in enumerate(v):
if isinstance(e, dict):
if "ontology" in e.keys():
ontology = e['ontology'].strip()
if ontology != "":
text = e['text'].strip()
property_list.append([project_name, bioentity, k, text, ontology])
else:
read_properties(e, bioentity, project_name, property_list, k)
return property_list
def save_df(type_mapping_list, unique, file_string, write_mode='a', head=False):
"""
Save curation dataframe to file.
:param type_mapping_list: The list of property curations
:type type_mapping_list: list of dicts
:param unique: Whether to collapse unique curations
:type unique: bool
:param file_string: The path to where to save the curations
:type file_string: string
:param write_mode: The write mode to use, append by default
:type write_mode: string
:param head: Whether to print the header of the dataframe
:type head: bool
"""
column_names = ['STUDY', 'BIOENTITY', 'PROPERTY_TYPE', 'PROPERTY_VALUE', 'SEMANTIC_TAG']
property_df = pd.DataFrame(type_mapping_list, columns=column_names)
if unique:
property_df = property_df.drop_duplicates()
property_df.to_csv(file_string, sep="\t", index=False, mode=write_mode, header=head)
def get_full_iri(obo_id):
"""
Given an ontology id of the form X:01234, look up the full iri using the ebi ols
:param obo_id: ontology identifier, e.g. HsapDv:0000087
:type obo_id: string
:return: full iri for the term, e.g. http://purl.obolibrary.org/obo/HsapDv_0000087
:rtype: string
"""
try:
obo_id = obo_id.strip()
ols_response = requests.get('http://www.ebi.ac.uk/ols/api/terms?obo_id={}'.format(obo_id))
ols_json = ols_response.json()
return ols_json['_embedded']['terms'][0]['iri']
except KeyError:
print('http://www.ebi.ac.uk/ols/api/terms?id={}'.format(obo_id))
print("Could not find {}.".format(obo_id))
return None
except ConnectionError as e:
print(e)
print("Something went wrong while trying to fetch from api.")
def replace_obo_ids(property_df):
"""
Given a pandas DataFrame with the column 'SEMANTIC_TAG' filled with ontology ids of the form X:01234, replace those
ontology ids with the full iris
:param property_df: A pandas dataframe with column 'SEMANTIC_TAG' filled with obo_ids
:type property_df: DataFrame
:return: The updated DataFrame
:rtype: DataFrame
"""
obo_ids = list(set(property_df['SEMANTIC_TAG']))
print("Found {} obo_ids to search and replace.".format(len(obo_ids)))
obo_dict = {obo_id: get_full_iri(obo_id) for obo_id in obo_ids}
property_df["SEMANTIC_TAG"].replace(obo_dict, inplace=True)
return property_df
def main(project_uuids, unique, api, iri_replace):
"""
The main method of the program that calls other methods and iterates over the specified project uuids.
:param project_uuids: A list of project uuids from which to retrieve ontology curations
:type project_uuids: list of strings
:param unique: Toggle to indicate whether to collapse duplicate curations within a project
:type unique: bool
:param api: The ingest api to search
:type api: string
:param iri_replace: Toggle to indicate whether to replace obo ids with full iris
:type iri_replace: bool
"""
if iri_replace:
print("Getting full iris")
mappings_df = | pd.read_csv(iri_replace, sep="\t") | pandas.read_csv |
import json
import logging
import os
import pathlib
from itertools import chain
from PIL import Image
import mxnet as mx
import numpy as np
import pandas as pd
from mxnet.recordio import MXIndexedRecordIO
from tqdm import tqdm
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
RANDOM_STATE = 666
import pdb
def json_labels_to_csv(
dataset_path,
output_csv_file='dataset_{}.csv',
output_format=['class_name', 'fname', 'xmin', 'ymin',
'xmax', 'ymax','im_rows','im_cols','image_id'],
val_split=0.1,
shuffle=True
):
"""
:param dataset_path: Path to coco-like dataset (each pic has a json label file)
:param output_csv_file:
:param output_format: order of the columns in the csv-file
defaults to: ['class_name', 'fname', 'xmin', 'ymin', 'xmax', 'ymax', 'im_rows','im_cols','image_id']
:param val_split: frac=0..1 size of validation data split
:param shuffle:
:return:
"""
dataset_path = pathlib.Path(dataset_path)
images = list(dataset_path.glob('*.jpg'))
records = []
for i, image_path in tqdm(enumerate(images)):
file_name = image_path.name
image = dataset_path / (image_path.stem + '.jpg')
im = Image.open(image)
im_rows, im_cols = im.size
annotation = dataset_path / (image_path.stem + '.json')
annotation_dict = json.loads(open(str(annotation)).read())
boxes = annotation_dict['boxes']
for box in boxes.keys():
current_box = boxes[box]
record = {
'fname': file_name,
'class_name': current_box['illness'],
'xmin': int(current_box['xmin']),
'ymin': int(current_box['ymin']),
'xmax': int(current_box['xmax']),
'ymax': int(current_box['ymax']),
'im_rows': im_rows,
'im_cols': im_cols,
'image_id': i
}
records.append(record)
df = | pd.DataFrame(records) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from itertools import product
import calendar
class Features():
def __init__(self):
self.df = []
self.readFile()
def execute(self):
self.remove_ex_value()
self.remove_same_data()
self.augumentation()
self.create_test_col()
self.locate_feature()
self.encodeing()
self.time_feature()
self.history_saled_feature()
self.slide_window_feature()
self.three_month_buying_feature()
self.history_sum_feature()
self.another_feature()
self.save()
def readFile(self):
self.test = pd.read_csv('./data/test.csv')
self.sales = pd.read_csv('./data/sales_train.csv')
self.shops = pd.read_csv('./data/shops.csv')
self.items = pd.read_csv('./data/items.csv')
self.item_cats = pd.read_csv('./data/item_categories.csv')
def remove_ex_value(self):
self.train = self.sales[(self.sales.item_price < 100000) & (self.sales.item_price > 0)]
self.train = self.train[self.sales.item_cnt_day < 1001]
def remove_same_data(self):
self.train.loc[self.train.shop_id == 0, 'shop_id'] = 57
self.test.loc[self.test.shop_id == 0, 'shop_id'] = 57
self.train.loc[self.train.shop_id == 1, 'shop_id'] = 58
self.test.loc[self.test.shop_id == 1, 'shop_id'] = 58
self.train.loc[self.train.shop_id == 40, 'shop_id'] = 39
self.test.loc[self.test.shop_id == 40, 'shop_id'] = 39
def augumentation(self):
self.index_cols = ['shop_id', 'item_id', 'date_block_num']
for block_num in self.train['date_block_num'].unique():
cur_shops = self.train.loc[self.sales['date_block_num'] == block_num, 'shop_id'].unique()
cur_items = self.train.loc[self.sales['date_block_num'] == block_num, 'item_id'].unique()
self.df.append(np.array(list(product(*[cur_shops, cur_items, [block_num]])),dtype='int32'))
self.df = pd.DataFrame(np.vstack(self.df), columns = self.index_cols,dtype=np.int32)
self.group = self.train.groupby(['date_block_num','shop_id','item_id']).agg({'item_cnt_day': ['sum']})
self.group.columns = ['item_cnt_month']
self.group.reset_index(inplace=True)
self.df = pd.merge(self.df, self.group, on=self.index_cols, how='left')
self.df['item_cnt_month'] = (self.df['item_cnt_month']
.fillna(0)
.clip(0,20)
.astype(np.float16))
def create_test_col(self):
self.test['date_block_num'] = 34
self.test['date_block_num'] = self.test['date_block_num'].astype(np.int8)
self.test['shop_id'] = self.test['shop_id'].astype(np.int8)
self.test['item_id'] = self.test['item_id'].astype(np.int16)
df = pd.concat([self.df, self.test], ignore_index=True, sort=False, keys=self.index_cols)
df.fillna(0, inplace=True)
def locate_feature(self):
self.shops['city'] = self.shops['shop_name'].apply(lambda x: x.split()[0].lower())
self.shops.loc[self.shops.city == '!якутск', 'city'] = 'якутск'
self.shops['city_code'] = LabelEncoder().fit_transform(self.shops['city'])
coords = dict()
coords['якутск'] = (62.028098, 129.732555, 4)
coords['адыгея'] = (44.609764, 40.100516, 3)
coords['балашиха'] = (55.8094500, 37.9580600, 1)
coords['волжский'] = (53.4305800, 50.1190000, 3)
coords['вологда'] = (59.2239000, 39.8839800, 2)
coords['воронеж'] = (51.6720400, 39.1843000, 3)
coords['выездная'] = (0, 0, 0)
coords['жуковский'] = (55.5952800, 38.1202800, 1)
coords['интернет-магазин'] = (0, 0, 0)
coords['казань'] = (55.7887400, 49.1221400, 4)
coords['калуга'] = (54.5293000, 36.2754200, 4)
coords['коломна'] = (55.0794400, 38.7783300, 4)
coords['красноярск'] = (56.0183900, 92.8671700, 4)
coords['курск'] = (51.7373300, 36.1873500, 3)
coords['москва'] = (55.7522200, 37.6155600, 1)
coords['мытищи'] = (55.9116300, 37.7307600, 1)
coords['н.новгород'] = (56.3286700, 44.0020500, 4)
coords['новосибирск'] = (55.0415000, 82.9346000, 4)
coords['омск'] = (54.9924400, 73.3685900, 4)
coords['ростовнадону'] = (47.2313500, 39.7232800, 3)
coords['спб'] = (59.9386300, 30.3141300, 2)
coords['самара'] = (53.2000700, 50.1500000, 4)
coords['сергиев'] = (56.3000000, 38.1333300, 4)
coords['сургут'] = (61.2500000, 73.4166700, 4)
coords['томск'] = (56.4977100, 84.9743700, 4)
coords['тюмень'] = (57.1522200, 65.5272200, 4)
coords['уфа'] = (54.7430600, 55.9677900, 4)
coords['химки'] = (55.8970400, 37.4296900, 1)
coords['цифровой'] = (0, 0, 0)
coords['чехов'] = (55.1477000, 37.4772800, 4)
coords['ярославль'] = (57.6298700, 39.8736800, 2)
self.shops['city_coord_1'] = self.shops['city'].apply(lambda x: coords[x][0])
self.shops['city_coord_2'] = self.shops['city'].apply(lambda x: coords[x][1])
self.shops['country_part'] = self.shops['city'].apply(lambda x: coords[x][2])
self.shops = self.shops[['shop_id', 'city_code', 'city_coord_1', 'city_coord_2', 'country_part']]
self.df = pd.merge(self.df, self.shops, on=['shop_id'], how='left')
def encodeing(self):
map_dict = {
'Чистые носители (штучные)': 'Чистые носители',
'Чистые носители (шпиль)' : 'Чистые носители',
'PC ': 'Аксессуары',
'Служебные': 'Служебные '
}
self.items = pd.merge(self.items, self.item_cats, on='item_category_id')
self.items['item_category'] = self.items['item_category_name'].apply(lambda x: x.split('-')[0])
self.items['item_category'] = self.items['item_category'].apply(lambda x: map_dict[x] if x in map_dict.keys() else x)
self.items['item_category_common'] = LabelEncoder().fit_transform(self.items['item_category'])
self.items['item_category_code'] = LabelEncoder().fit_transform(self.items['item_category_name'])
self.items = self.items[['item_id', 'item_category_common', 'item_category_code']]
self.df = pd.merge(self.df, self.items, on=['item_id'], how='left')
def time_feature(self):
def count_days(date_block_num):
year = 2013 + date_block_num // 12
month = 1 + date_block_num % 12
weeknd_count = len([1 for i in calendar.monthcalendar(year, month) if i[6] != 0])
days_in_month = calendar.monthrange(year, month)[1]
return weeknd_count, days_in_month, month
map_dict = {i: count_days(i) for i in range(35)}
self.df['weeknd_count'] = self.df['date_block_num'].apply(lambda x: map_dict[x][0])
self.df['days_in_month'] = self.df['date_block_num'].apply(lambda x: map_dict[x][1])
def history_saled_feature(self):
first_item_block = self.df.groupby(['item_id'])['date_block_num'].min().reset_index()
first_item_block['item_first_interaction'] = 1
first_shop_item_buy_block = self.df[self.df['date_block_num'] > 0].groupby(['shop_id', 'item_id'])['date_block_num'].min().reset_index()
first_shop_item_buy_block['first_date_block_num'] = first_shop_item_buy_block['date_block_num']
self.df = | pd.merge(self.df, first_item_block[['item_id', 'date_block_num', 'item_first_interaction']], on=['item_id', 'date_block_num'], how='left') | pandas.merge |
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append(( | Week(-2, weekday=1) | pandas.core.datetools.Week |
import argparse
import torch
from src.data import processors
import os
import json
import numpy as np
from transformers import AutoTokenizer
from src import setup, train
from torch.utils.data import Subset, SequentialSampler, DataLoader
from tqdm import tqdm
from torch.distributions.categorical import Categorical
from pathlib import Path
import pandas as pd
def compute_entropy(sampled, dataset, model, train_args):
"""Compute average entropy in label distribution for examples in [sampled]."""
all_entropy = None
data = Subset(dataset, sampled)
sampler = SequentialSampler(data)
dataloader = DataLoader(data, sampler=sampler, batch_size=train_args.per_gpu_eval_batch_size)
for batch in tqdm(dataloader, desc="Computing entropy"):
batch = tuple(t.to(train_args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1]}
if train_args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if train_args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
logits = outputs[0]
categorical = Categorical(logits = logits)
entropy = categorical.entropy()
if all_entropy is None:
all_entropy = entropy.detach().cpu().numpy()
else:
all_entropy = np.append(all_entropy, entropy.detach().cpu().numpy(), axis=0)
avg_entropy = all_entropy.mean()
return avg_entropy
def token_set(data, train_args):
all_tokens = set()
sampler = SequentialSampler(data)
dataloader = DataLoader(data, sampler=sampler, batch_size=32)
for batch in tqdm(dataloader, desc="Getting tokens"):
with torch.no_grad():
tokens = batch[0].unique().tolist()
all_tokens = all_tokens.union(tokens)
return all_tokens
def jaccard(a, b):
ji = len(a.intersection(b))/len(a.union(b))
return ji
def compute_diversity(sampled, data, train_args):
# compare jaccard similarity between sampled and unsampled points
data_sampled = Subset(data, sampled)
unsampled = np.delete(torch.arange(len(data)), sampled)
data_unsampled = Subset(data, unsampled)
tokens_sampled = token_set(data_sampled, train_args)
tokens_unsampled = token_set(data_unsampled, train_args)
ji = jaccard(tokens_sampled, tokens_unsampled)
return ji
def get_stats(model_path, base_model, dataset):
sampling, sample_size = model_path.name.split('_')
sampled = torch.load(model_path / 'sampled.pt')
diversity = compute_diversity(sampled, dataset, train_args)
entropy = compute_entropy(sampled, dataset, base_model, train_args)
stats = {
"sampling":sampling,
"iteration":int(sample_size)/100,
"task":model_path.parent.name,
"diversity":diversity,
"uncertainty":entropy
}
return stats
TASKNAME = {'sst-2':'SST-2'}
parser = argparse.ArgumentParser()
parser.add_argument(
"--task_models",
default=None,
type=Path,
help="Directory of models for task"
)
parser.add_argument(
"--output",
default=None,
type=Path,
help="Path to output file"
)
parser.add_argument(
"--clear",
action="store_true",
help="Flag for clearing csv"
)
args = parser.parse_args()
# get base model trained on full data
train_args = torch.load(args.task_models / 'base' / 'training_args.bin' )
base_model, tokenizer, model_class, tokenizer_class = setup.load_model(train_args)
# load training data
task = train_args.task_name
processor = processors[task]()
if task in TASKNAME:
task = TASKNAME[task]
dataset = train.load_and_cache_examples(train_args, train_args.task_name, tokenizer, evaluate=False)
# analyze samples from different active learning iterations
all_stats = []
for model_path in args.task_models.glob('**/') :
print(model_path.name)
if (model_path / 'sampled.pt').exists():
stats = get_stats(model_path, base_model, dataset)
all_stats.append(stats)
# output results
df = | pd.DataFrame(all_stats) | pandas.DataFrame |
from numpy import loadtxt
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score, f1_score
from sklearn import metrics
import smote_variants as sv
from sklearn.linear_model import LogisticRegression
from sklearn import svm
import heapq
# prepare the smote data --> X_samp, y_samp
df = | pd.read_csv("original_data.csv") | pandas.read_csv |
"""Thread for running an rdkafka_performance producer from librdkafka 1.1.0."""
import os
import tempfile
import threading
import time
import fabric
import numpy
import pandas
class LibrdkafkaProducer(threading.Thread):
def __init__(
self,
name,
server,
log,
bootstrap_servers,
topic,
msg_size,
msg_count,
output_path,
**kwargs
):
super().__init__(name=name)
self.server = server
self.log = log
self.kwargs = kwargs
self.bootstrap_servers = bootstrap_servers
self.topic = topic
self.msg_size = msg_size
self.msg_count = msg_count
self.output_path = output_path
self.kwargs = kwargs
self.result = None
self.start_time = None
self.end_time = None
self.cmd = self._build_cmd()
def run(self):
self.start_time = round(time.time())
self._add_to_log(
"{} running command: '{}'".format(self.name, self.cmd)
)
with fabric.Connection(self.server) as c:
self.result = c.run(self.cmd, hide=True, pty=True)
self._add_to_log("{} finished running command".format(self.name))
self.end_time = round(time.time())
def _build_cmd(self):
cmd = "rm -f {} {}".format(
os.path.join(self.output_path, "output"),
os.path.join(self.output_path, "error"),
)
cmd = cmd + " && mkdir -p {}".format(self.output_path)
cmd = cmd + "&& /opt/dm_group/librdkafka/librdkafka-1.1.0/examples/"
cmd = cmd + "rdkafka_performance -P -b {} -t {} -s {} -c {} -u".format(
self.bootstrap_servers, self.topic, self.msg_size, self.msg_count
)
if "configs" in self.kwargs:
for config in self.kwargs["configs"]:
cmd = cmd + " -X {}".format(config)
if "rate" in self.kwargs:
cmd = cmd + " -r {}".format(self.kwargs["rate"])
if "partition" in self.kwargs:
cmd = cmd + " -p {}".format(self.kwargs["partition"])
if "acks" in self.kwargs:
cmd = cmd + " -a {}".format(self.kwargs["acks"])
stdout_path = os.path.join(self.output_path, "output")
stderr_path = os.path.join(self.output_path, "error")
cmd = cmd + " 1>{} 2>{}".format(stdout_path, stderr_path)
return cmd
def _add_to_log(self, message):
t = round(time.time())
self.log.put("{}: {}".format(t, message))
def get_producer_metrics_and_errors(producers):
"""Get the results for a list of producers from remote servers."""
producer_metrics = {}
producer_errors = {}
for producer in producers:
with tempfile.TemporaryDirectory() as tmpdir:
_copy_producer_files(producer.server, producer.output_path, tmpdir)
producer_metrics[producer.name] = _read_producer_output(
os.path.join(tmpdir, "output")
)
producer_errors[producer.name] = _read_producer_errors(
os.path.join(tmpdir, "error")
)
return producer_metrics, producer_errors
def _copy_producer_files(server, remote_src, local_dst):
with fabric.Connection(server) as c:
c.get(
os.path.join(remote_src, "output"),
os.path.join(local_dst, "output"),
)
c.get(
os.path.join(remote_src, "error"), os.path.join(local_dst, "error")
)
def _read_producer_output(path):
# _parse_producer_output would work with a file path, but we pass the file
# to simplify testing.
with open(path, "r") as f:
data_frame = _parse_producer_output(f)
return data_frame
def _parse_producer_output(output):
data = numpy.genfromtxt(
output,
delimiter="|",
comments="%",
skip_header=2,
usecols=range(1, 12),
dtype=numpy.dtype(
[
("time_ms", "u8"),
("msgs", "u8"),
("bytes", "u8"),
("rtt", "u8"),
("dr", "u8"),
("dr_msgs_per_s", "u8"),
("dr_MB_per_s", "f8"),
("dr_err", "u8"),
("tx_err", "u8"),
("queue", "u8"),
("offset", "u8"),
]
),
)
return | pandas.DataFrame(data) | pandas.DataFrame |
"""Aggregate or condense tract-level energy burden into county-level data.
The output table stores the values of the minimum and maximum tract-level
energy burden within each county.
Usage: python <script> <input_csv> <output_csv>
"""
import pandas as pd
from sys import argv
df = pd.read_csv(argv[1], encoding='latin-1', dtype=str)
df['energy_burden_percent_income'] = df['energy_burden_percent_income'].astype(int)
gb = df.groupby(['state_code', 'county_code'])
out_df = | pd.DataFrame() | pandas.DataFrame |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = | pd.concat([outlier_names, principalDf_outlier], axis=1) | pandas.concat |
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(
pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start="4/2/2012", periods=10, freq="B")
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(
year=years, month=months, freq="M", start=Period("2007-01", freq="M")
)
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range("2007-1-1", periods=500, freq="X")
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N"
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="N"),
Period(ordinal=2, freq="N"),
Period(ordinal=3, freq="N"),
Period(ordinal=4, freq="N"),
],
freq="N",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC")
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq="A")
start = Period("2007", freq="A-JUN")
end = Period("2010", freq="A-DEC")
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = (
"Of the three parameters: start, end, and periods, exactly two"
" must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range("2007-01", periods=10.5, freq="M")
exp = period_range("2007-01", periods=10, freq="M")
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range("2007-01", periods=20, freq="M")
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx._ndarray_values)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx._ndarray_values))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period("2007", freq="A"))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq="M")
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == "M"
result = PeriodIndex(idx, freq="2M")
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq="D")
exp = idx.asfreq("D", "e")
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype("M8[us]"))
msg = r"Wrong dtype: datetime64\[us\]"
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals, freq="D")
@pytest.mark.parametrize("box", [None, "series", "index"])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = pd.date_range("2017", periods=4, freq="M")
if box is None:
data = data._values
elif box == "series":
data = pd.Series(data)
result = PeriodIndex(data, freq="D")
expected = PeriodIndex(
["2017-01-31", "2017-02-28", "2017-03-31", "2017-04-30"], freq="D"
)
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(["2013-01", "2013-03"], dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-03"], freq="M")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[M]"
idx = PeriodIndex(["2013-01-05", "2013-03-05"], dtype="period[3D]")
exp = PeriodIndex(["2013-01-05", "2013-03-05"], freq="3D")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[3D]"
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(["2013-01-01", "2013-01-02"], freq="D")
res = PeriodIndex(idx, dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-01"], freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
res = PeriodIndex(idx, freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
msg = "specified freq and dtype are different"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(["2011-01"], freq="M", dtype="period[D]")
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq="M")
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == "M"
with pytest.raises(ValueError, match="freq not specified"):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")]
)
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array([Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")])
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
[pd.NaT, pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
exp = PeriodIndex(["NaT", "NaT", "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array(
[
pd.NaT,
pd.NaT,
Period("2011-01", freq="M"),
Period("2011-01", freq="M"),
]
)
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex([pd.NaT, pd.NaT])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(["NaT", "NaT"])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array(["NaT", "NaT"]))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
)
# first element is pd.NaT
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
)
def test_constructor_mixed(self):
idx = PeriodIndex(["2011-01", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(["NaT", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["NaT", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period("2011-01-01", freq="D"), pd.NaT, "2012-01-01"])
exp = PeriodIndex(["2011-01-01", "NaT", "2012-01-01"], freq="D")
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range("2007-01", name="p", periods=2, freq="M")
result = idx._simple_new(idx, name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype("i8"), name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq="M", name="p")
result = idx._simple_new(idx, name="p", freq="M")
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
msg = r"PeriodIndex\._simple_new does not accept floats"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex._simple_new(floats, freq="M")
msg = "PeriodIndex does not allow floating point in construction"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex(floats, freq="M")
def test_constructor_nat(self):
msg = "start and end must not be NaT"
with pytest.raises(ValueError, match=msg):
period_range(start="NaT", end="2011-01-01", freq="M")
with pytest.raises(ValueError, match=msg):
period_range(start="2011-01-01", end="NaT", freq="M")
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ["%dQ%d" % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
@pytest.mark.parametrize(
"func, warning", [(PeriodIndex, FutureWarning), (period_range, None)]
)
def test_constructor_freq_mult(self, func, warning):
# GH #7811
with tm.assert_produces_warning(warning):
# must be the same, but for sure...
pidx = func(start="2014-01", freq="2M", periods=4)
expected = PeriodIndex(["2014-01", "2014-03", "2014-05", "2014-07"], freq="2M")
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(start="2014-01-02", end="2014-01-15", freq="3D")
expected = PeriodIndex(
["2014-01-02", "2014-01-05", "2014-01-08", "2014-01-11", "2014-01-14"],
freq="3D",
)
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(end="2014-01-01 17:00", freq="4H", periods=3)
expected = PeriodIndex(
["2014-01-01 09:00", "2014-01-01 13:00", "2014-01-01 17:00"], freq="4H"
)
tm.assert_index_equal(pidx, expected)
msg = "Frequency must be positive, because it" " represents span: -1M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="-1M")
msg = "Frequency must be positive, because it" " represents span: 0M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="0M")
msg = "Frequency must be positive, because it" " represents span: 0M"
with pytest.raises(ValueError, match=msg):
period_range("2011-01", periods=3, freq="0M")
@pytest.mark.parametrize("freq", ["A", "M", "D", "T", "S"])
@pytest.mark.parametrize("mult", [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq):
freqstr = str(mult) + freq
pidx = period_range(start="2014-04-01", freq=freqstr, periods=10)
expected = date_range(start="2014-04-01", freq=freqstr, periods=10).to_period(
freqstr
)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ["1D1H", "1H1D"]:
pidx = PeriodIndex(["2016-01-01", "2016-01-02"], freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 00:00"], freq="25H")
for freq in ["1D1H", "1H1D"]:
pidx = period_range(start="2016-01-01", periods=2, freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 01:00"], freq="25H")
tm.assert_index_equal(pidx, expected)
def test_constructor_range_based_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
pi = PeriodIndex(freq="A", start="1/1/2001", end="12/1/2009")
assert len(pi) == 9
def test_constructor_range_based_deprecated_different_freq(self):
with tm.assert_produces_warning(FutureWarning) as m:
PeriodIndex(start="2000", periods=2)
warning, = m
assert 'freq="A-DEC"' in str(warning.message)
def test_constructor(self):
pi = period_range(freq="A", start="1/1/2001", end="12/1/2009")
assert len(pi) == 9
pi = period_range(freq="Q", start="1/1/2001", end="12/1/2009")
assert len(pi) == 4 * 9
pi = period_range(freq="M", start="1/1/2001", end="12/1/2009")
assert len(pi) == 12 * 9
pi = period_range(freq="D", start="1/1/2001", end="12/31/2009")
assert len(pi) == 365 * 9 + 2
pi = period_range(freq="B", start="1/1/2001", end="12/31/2009")
assert len(pi) == 261 * 9
pi = period_range(freq="H", start="1/1/2001", end="12/31/2001 23:00")
assert len(pi) == 365 * 24
pi = period_range(freq="Min", start="1/1/2001", end="1/1/2001 23:59")
assert len(pi) == 24 * 60
pi = period_range(freq="S", start="1/1/2001", end="1/1/2001 23:59:59")
assert len(pi) == 24 * 60 * 60
start = Period("02-Apr-2005", "B")
i1 = period_range(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period("2006-12-31", "W")
i1 = period_range(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period("2006-12-31", "1w")
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period("2006-12-31", ("w", 1))
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period("2005-05-01", "B")
i1 = period_range(start=start, end=end_intv)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period("2005-05-05", "B")])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period("2005-05-05", "B")]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period("2006-12-31", "w")]
msg = r"Input has different freq=W-SUN from PeriodIndex\(freq=B\)"
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
vals = np.array(vals)
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
def test_constructor_error(self):
start = Period("02-Apr-2005", "B")
end_intv = Period("2006-12-31", ("w", 1))
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end_intv)
msg = (
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
@pytest.mark.parametrize(
"freq", ["M", "Q", "A", "D", "B", "T", "S", "L", "U", "N", "H"]
)
def test_recreate_from_data(self, freq):
org = period_range(start="2001/04/01", freq=freq, periods=1)
idx = PeriodIndex(org.values, freq=freq)
tm.assert_index_equal(idx, org)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq="A")
expected = Index([str(num) for num in raw])
res = index.map(str)
# should return an Index
assert isinstance(res, Index)
# preserve element types
assert all(isinstance(resi, str) for resi in res)
# lastly, values should compare equal
tm.assert_index_equal(res, expected)
class TestSeriesPeriod:
def setup_method(self, method):
self.series = Series(period_range("2000-01-01", periods=10, freq="D"))
def test_constructor_cant_cast_period(self):
msg = "Cannot cast PeriodArray to dtype float64"
with pytest.raises(TypeError, match=msg):
Series(period_range("2000-01-01", periods=10, freq="D"), dtype=float)
def test_constructor_cast_object(self):
s = Series(period_range("1/1/2000", periods=10), dtype= | PeriodDtype("D") | pandas.core.dtypes.dtypes.PeriodDtype |
# imports
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, roc_auc_score
# read in data - part-1: heros information file
heros_info = pd.read_csv('./data/heroes_information.csv')
# read in data - part-2: heros superpowers file
heros_power = pd.read_csv('./data/super_hero_powers.csv')
# since only binary classification is required, create a feature that defines weather the hero is human or not
heros_info['isHuman'] = heros_info['Race'].apply(lambda i: 1 if i == 'Human' else 0)
heros_info.drop(['Unnamed: 0'], inplace=True, axis=1)
# create model matrix (aka dummies in pyworld) for superpowers data
power_bool_cols = heros_power.columns.drop("hero_names")
heros_power_dummies = pd.get_dummies(heros_power, columns=power_bool_cols)
# model-matrix for info dataframe
info_bool_cols = ['Gender', 'Eye color', 'Hair color', 'Skin color', 'Alignment']
heros_info_dummies = | pd.get_dummies(heros_info, columns=info_bool_cols) | pandas.get_dummies |
# -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
def test_trend_bridge():
from ddf_utils.transformer import trend_bridge
from numpy.testing import assert_almost_equal
tr1 = pd.Index(range(0, 5))
tr2 = pd.Index(range(3, 8))
s1 = | pd.Series([1, 2, 3, 4, 5], index=tr1) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 21 15:28:02 2019
Ing.,Mgr. (MSc.) <NAME>
Biomedical engineering
International Clinical Research Center
St. Anne's University Hospital in Brno
Czech Republic
&
Mayo systems electrophysiology lab
Mayo Clinic
200 1st St SW
Rochester, MN
United States
"""
# Standard library imports
# Third party imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import label_binarize
# Local imports
# %% Load data
path_to_data = 'data.pkl'
svm_df = | pd.read_pickle(path_to_data) | pandas.read_pickle |
#Script generated by SupremolecularAnalyser
import sys
sys.path.insert(0, "/net/archive/groups/plggkatksdh/pyplotTest")
import matplotlib.pyplot as plt
from simpleFilters import *
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.colors as mcolors
from os.path import isdir, join, basename
from os import makedirs, remove
from glob import glob
import json
from collections import defaultdict
from itertools import combinations
from math import pi
mpl.rcParams['mathtext.default'] = 'regular'
plt.rcParams.update({'font.size': 16})
logDir = "logs/"
postprocessingDir = "postprocessing/"
resUniqueDir = join(postprocessingDir, "general")
if not isdir(resUniqueDir):
makedirs(resUniqueDir)
# cases2run = { "preprocessing" : True, "a-g" : True, "UniqueSeq" : True, "histogram2d" : True,
# "histogram2d-planar" : True, "histograms-linear" : True , "barplots": True , "resolutionplot":False,
# "occurencesTable" : True, "occurencesPairs" : True,
# "chainNeoghbors" : True }
cases2run = { "preprocessing" : False, "a-g" : False, "UniqueSeq" : False, "histogram2d" : True, "hydrogenBondAnalysis" : False , "ligandsAnalysis" : False ,
"histogram2d-planar" : False, "histograms-linear" : False , "barplots": False , "resolutionplot":False,
"occurencesTable" : False, "occurencesPairs" : False,
"chainNeoghbors" : False }
def saveUniqueRecordsSeq( logPath, logOut, chain1Key, chain2Key, headersSubset ):
print("Aktualnie przetwarzam: ", logPath)
uniqueSeqDf = pd.read_csv("chainClusters.csv", sep = "\t")
uniqueSeqDf1 = uniqueSeqDf.rename( columns = { "Chain" : chain1Key, "Sequence ID" : "Sequence ID 1" } )
uniqueSeqDf2 = uniqueSeqDf.rename( columns = { "Chain" : chain2Key, "Sequence ID" : "Sequence ID 2" } )
df = | pd.read_table(logPath) | pandas.read_table |
##
import os,pandas, pickle, tqdm
##
def convert(item):
## Order
order = item[['$' in key for key in item.keys()]]
order = order[order>0].sort_values()
order = [key.split('$')[1] for key in order.keys()]
## Count
count = item[['#' in key for key in item.keys()]]
count = count[[key.split('#')[1] in order for key in count.keys()]]
count = round(count).astype('int').astype('str')
count.index = [key.split("#")[1] for key in count.keys()]
##
text = ""
for key, value in count[order].to_dict().items():
text += key + value
pass
return(text)
##
with open("SOURCE/LOG/test.pickle", "rb") as paper:
paper = pickle.load(paper)
pass
##
likelihood = | pandas.DataFrame(paper['test']['likelihood']) | pandas.DataFrame |
'''
Factor risk premium
'''
# %% set system path
import sys,os
sys.path.append(os.path.abspath(".."))
# %% import data
import pandas as pd
month_return = pd.read_hdf('.\data\month_return.h5', key='month_return')
company_data = pd.read_hdf('.\data\last_filter_pe.h5', key='data')
trade_data = pd.read_hdf('.\data\mean_filter_trade.h5', key='data')
beta = pd.read_hdf('.\\data\\beta.h5', key='data')
# %% data preprocessing
# forward the monthly return for each stock
# emrwd is the return including dividend
month_return['emrwd'] = month_return.groupby(['Stkcd'])['Mretwd'].shift(-1)
# emrnd is the return including no dividend
month_return['emrnd'] = month_return.groupby(['Stkcd'])['Mretnd'].shift(-1)
# select the A share stock
month_return = month_return[month_return['Markettype'].isin([1, 4, 16])]
# % distinguish the stocks whose size is among the up 30% stocks in each month
def percentile(stocks) :
return stocks >= stocks.quantile(q=.3)
month_return['cap'] = month_return.groupby(['Trdmnt'])['Msmvttl'].apply(percentile)
# %% Construct proxy variable
import numpy as np
# SMB
# log(Size)
month_return['Size'] = np.log(month_return['Msmvttl'])
# HML
company_data['BM'] = 1 / company_data['PBV1A']
# RMW
# in this demo, the ROE(TTM) are used
# ROE(TTM) = PBV1B/PE(TTM)
company_data['ROE(TTM)'] = company_data['PBV1B']/company_data['PE1TTM']
# CMA
# % calculate the total asset
# asset = debt + equity
# debt = company_value - market_value
# equity = market_value / PB
company_data['debt'] = company_data['EV1'] - company_data['MarketValue']
company_data['equity'] = company_data['MarketValue']/company_data['PBV1A']
company_data['asset'] = company_data['debt'] + company_data['equity']
# asset growth rate
company_data['asset_growth_rate'] = company_data['asset'].groupby(['Symbol']).diff(12)/company_data['asset']
# Momentum
month_return['rolling_12'] = np.array(month_return.groupby(['Stkcd'])['Mretwd'].rolling(12).sum())
month_return['momentum'] = month_return['rolling_12'] - month_return['Mretwd']
# Turnover
trade_data['rolling_Turnover'] = np.array(trade_data['Turnover'].groupby('Symbol').rolling(12).mean())
trade_data['specific_Turnover'] = trade_data['Turnover'] / trade_data['rolling_Turnover']
# %% merge data
from pandas.tseries.offsets import *
month_return['Stkcd_merge'] = month_return['Stkcd'].astype(dtype='string')
month_return['Date_merge'] = | pd.to_datetime(month_return['Trdmnt']) | pandas.to_datetime |
import logging
import math
import re
from collections import Counter
import numpy as np
import pandas as pd
from certa.utils import diff
def get_original_prediction(r1, r2, predict_fn):
r1r2 = get_row(r1, r2)
return predict_fn(r1r2)[['nomatch_score', 'match_score']].values[0]
def get_row(r1, r2, lprefix='ltable_', rprefix='rtable_'):
r1_df = pd.DataFrame(data=[r1.values], columns=r1.index)
r2_df = pd.DataFrame(data=[r2.values], columns=r2.index)
r1_df.columns = list(map(lambda col: lprefix + col, r1_df.columns))
r2_df.columns = list(map(lambda col: rprefix + col, r2_df.columns))
r1r2 = pd.concat([r1_df, r2_df], axis=1)
return r1r2
def support_predictions(r1: pd.Series, r2: pd.Series, lsource: pd.DataFrame,
rsource: pd.DataFrame, predict_fn, lprefix, rprefix, num_triangles: int = 100,
class_to_explain: int = None, max_predict: int = -1,
use_w: bool = True, use_q: bool = True):
'''
generate a pd.DataFrame of support predictions to be used to generate open triangles.
:param r1: the "left" record
:param r2: the "right" record
:param lsource: the "left" data source
:param rsource: the "right" data source
:param predict_fn: the ER model prediction function
:param lprefix: the prefix of attributes from the "left" table
:param rprefix: the prefix of attributes from the "right" table
:param num_triangles: number of open triangles to be used to generate the explanation
:param class_to_explain: the class to be explained
:param max_predict: the maximum number of predictions to be performed by the ER model to generate the requested
number of open triangles
:param use_w: whether to use left open triangles
:param use_q: whether to use right open triangles
:return: a pd.DataFrame of record pairs with one record from the original prediction and one record yielding an
opposite prediction by the ER model
'''
r1r2 = get_row(r1, r2)
original_prediction = predict_fn(r1r2)[['nomatch_score', 'match_score']].values[0]
r1r2['id'] = "0@" + str(r1r2[lprefix + 'id'].values[0]) + "#" + "1@" + str(r1r2[rprefix + 'id'].values[0])
copies, copies_left, copies_right = expand_copies(lprefix, lsource, r1, r2, rprefix, rsource)
find_positives, support = get_support(class_to_explain, pd.concat([lsource, copies_left]), max_predict,
original_prediction, predict_fn, r1, r2, pd.concat([rsource, copies_right]),
use_w, use_q, lprefix, rprefix, num_triangles)
if len(support) > 0:
if len(support) > num_triangles:
support = support.sample(n=num_triangles)
else:
logging.warning(f'could find {str(len(support))} triangles of the {str(num_triangles)} requested')
support['label'] = list(map(lambda predictions: int(round(predictions)),
support.match_score.values))
support = support.drop(['match_score', 'nomatch_score'], axis=1)
if class_to_explain == None:
r1r2['label'] = np.argmax(original_prediction)
else:
r1r2['label'] = class_to_explain
support_pairs = pd.concat([r1r2, support], ignore_index=True)
return support_pairs, copies_left, copies_right
else:
logging.warning('no triangles found')
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
def find_candidates_predict(record, source, find_positives, predict_fn, num_candidates, lj=True,
max=-1, lprefix='ltable_', rprefix='rtable_'):
if lj:
records = pd.DataFrame()
records = records.append([record] * len(source), ignore_index=True)
copy = source.copy()
records.columns = list(map(lambda col: lprefix + col, records.columns))
copy.columns = list(map(lambda col: rprefix + col, copy.columns))
records.index = copy.index
samples = pd.concat([records, copy], axis=1)
else:
copy = source.copy()
records = pd.DataFrame()
records = records.append([record] * len(source), ignore_index=True)
records.index = copy.index
copy.columns = list(map(lambda col: lprefix + col, copy.columns))
records.columns = list(map(lambda col: rprefix + col, records.columns))
samples = pd.concat([copy, records], axis=1)
if max > 0:
samples = samples.sample(frac=1)[:max]
record2text = " ".join([str(val) for k, val in record.to_dict().items() if k not in ['id']])
samples['score'] = samples.T.apply(lambda row: cs(record2text, " ".join(row.astype(str))))
samples = samples.sort_values(by='score', ascending=not find_positives)
samples = samples.drop(['score'], axis=1)
result = pd.DataFrame()
batch = num_candidates * 4
splits = min(10, int(len(samples) / batch))
i = 0
while len(result) < num_candidates and i < splits:
batch_samples = samples[batch * i:batch * (i + 1)]
predicted = predict_fn(batch_samples)
if find_positives:
out = predicted[predicted["match_score"] > 0.5]
else:
out = predicted[predicted["match_score"] < 0.5]
if len(out) > 0:
result = pd.concat([result, out], axis=0)
logging.info(f'{i}:{len(out)},{len(result)}')
i += 1
return result
def generate_subsequences(lsource, rsource, max=-1):
new_records_left_df = pd.DataFrame()
for i in np.arange(len(lsource[:max])):
r = lsource.iloc[i]
nr_df = pd.DataFrame(generate_modified(r, start_id=len(new_records_left_df) + len(lsource)))
if len(nr_df) > 0:
nr_df.columns = lsource.columns
new_records_left_df = pd.concat([new_records_left_df, nr_df])
new_records_right_df = pd.DataFrame()
for i in np.arange(len(rsource[:max])):
r = rsource.iloc[i]
nr_df = pd.DataFrame(generate_modified(r, start_id=len(new_records_right_df) + len(rsource)))
if len(nr_df) > 0:
nr_df.columns = rsource.columns
new_records_right_df = pd.concat([new_records_right_df, nr_df])
return new_records_left_df, new_records_right_df
def get_support(class_to_explain, lsource, max_predict, original_prediction, predict_fn, r1, r2,
rsource, use_w, use_q, lprefix, rprefix, num_triangles):
candidates4r1 = pd.DataFrame()
candidates4r2 = pd.DataFrame()
num_candidates = int(num_triangles / 2)
if class_to_explain == None:
findPositives = bool(original_prediction[0] > original_prediction[1])
else:
findPositives = bool(0 == int(class_to_explain))
if use_q:
candidates4r1 = find_candidates_predict(r1, rsource, findPositives, predict_fn, num_candidates,
lj=True, max=max_predict, lprefix=lprefix, rprefix=rprefix)
if use_w:
candidates4r2 = find_candidates_predict(r2, lsource, findPositives, predict_fn, num_candidates,
lj=False, max=max_predict, lprefix=lprefix, rprefix=rprefix)
neighborhood = pd.DataFrame()
candidates = pd.concat([candidates4r1, candidates4r2], ignore_index=True)
if len(candidates) > 0:
candidates['id'] = "0@" + candidates[lprefix + 'id'].astype(str) + "#" + "1@" + candidates[
rprefix + 'id'].astype(str)
if findPositives:
neighborhood = candidates[candidates.match_score >= 0.5].copy()
else:
neighborhood = candidates[candidates.match_score < 0.5].copy()
return findPositives, neighborhood
def generate_modified(record, start_id: int = 0):
new_copies = []
t_len = len(record)
copy = record.copy()
for t in range(t_len):
attr_value = str(copy.get(t))
values = attr_value.split()
for cut in range(1, len(values)):
for new_val in [" ".join(values[cut:]),
" ".join(values[:cut])]: # generate new values with prefix / suffix dropped
new_copy = record.copy()
new_copy[t] = new_val # substitute the new value with missing prefix / suffix on the target attribute
if start_id > 0:
new_copy['id'] = len(new_copies) + start_id
new_copies.append(new_copy)
return new_copies
WORD = re.compile(r'\w+')
def cs(text1, text2):
vec1 = Counter(WORD.findall(text1))
vec2 = Counter(WORD.findall(text2))
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x] ** 2 for x in vec1.keys()])
sum2 = sum([vec2[x] ** 2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def expand_copies(lprefix, lsource, r1, r2, rprefix, rsource):
generated_df = pd.DataFrame()
new_copies_left = []
new_copies_right = []
left = True
for record in [r1, r2]:
r1_df = pd.DataFrame(data=[record.values], columns=record.index)
r2_df = pd.DataFrame(data=[record.values], columns=record.index)
r1_df.columns = list(map(lambda col: 'ltable_' + col, r1_df.columns))
r2_df.columns = list(map(lambda col: 'rtable_' + col, r2_df.columns))
r1r2c = | pd.concat([r1_df, r2_df], axis=1) | pandas.concat |
import os
import pandas as pd
import json
import argparse
def main(args):
align_heavy = args.heavy.split(',')
print(align_heavy)
align_light = args.light.split(',')
print(align_light)
align_combined = list()
for heavy in align_heavy:
sample = os.path.basename(heavy).replace('_corr_align.txt', '')
for light in align_light:
if sample == os.path.basename(light).replace('_corr_align.txt', ''):
align_combined.append({
"sample": sample,
"heavy": heavy,
"light": light
})
break
else:
raise Exception("Couldn't find corresponding light results for heavy object")
print("aligncombined", align_combined)
for results in align_combined:
print("result:", results)
light = pd.read_csv(results["light"], sep='\t', header=0)
heavy = pd.read_csv(results["heavy"], sep='\t', header=0)
combined = | pd.concat([light, heavy]) | pandas.concat |
from . import wrapper_double, wrapper_float
import numpy as np, pandas as pd
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix, issparse, isspmatrix_coo, isspmatrix_csr, isspmatrix_csc
import multiprocessing
import ctypes
import warnings
__all__ = ["CMF", "CMF_implicit",
"OMF_explicit", "OMF_implicit",
"MostPopular", "ContentBased",
"CMF_imputer"]
### TODO: this module should move from doing operations in Python to
### using the new designated C functions for each type of prediction.
### TODO: eliminate the hard dependency on pandas.
class _CMF:
def __repr__(self):
return self.__str__()
def set_params(self, **params):
"""
Set the parameters of this estimator.
Kept for compatibility with scikit-learn.
Note
----
Setting any parameter that is related to model hyperparameters (i.e. anything not
related to verbosity or number of threads) will reset the model - that is,
it will no longer be possible to use it for predictions without a new refit.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
"""
if not params:
return self
valid_params = self.get_params()
for k,v in params.items():
if k not in valid_params.keys():
raise ValueError("Invalid parameter %s" % k)
else:
if v not in ["verbose", "nthreads", "n_jobs", "print_every", "handle_interrupt", "random_state"]:
self.is_fitted_ = False
setattr(self, k, v)
return self
def _take_params(self, implicit=False, alpha=40., downweight=False,
apply_log_transf=False,
nonneg=False, nonneg_C=False, nonneg_D=False,
max_cd_steps=100,
k=50, lambda_=1e2, method="als", add_implicit_features=False,
scale_lam=False, scale_lam_sideinfo=False, scale_bias_const=False,
use_cg=False, max_cg_steps=3, finalize_chol=False,
user_bias=True, item_bias=True, center=False,
k_user=0, k_item=0, k_main=0,
w_main=1., w_user=1., w_item=1., w_implicit=0.5,
l1_lambda=0., center_U=True, center_I=True,
maxiter=400, niter=10, parallelize="separate", corr_pairs=4,
NA_as_zero=False, NA_as_zero_user=False, NA_as_zero_item=False,
precompute_for_predictions=True, use_float=False,
random_state=1, verbose=True,
print_every=10, handle_interrupt=True,
produce_dicts=False, nthreads=-1, n_jobs=None):
assert method in ["als", "lbfgs"]
assert parallelize in ["separate", "single"]
k = int(k) if isinstance(k, float) else k
k_user = int(k_user) if isinstance(k_user, float) else k_user
k_item = int(k_item) if isinstance(k_item, float) else k_item
k_main = int(k_main) if isinstance(k_main, float) else k_main
if not isinstance(self, OMF_explicit):
assert isinstance(k, int) and k > 0
else:
assert isinstance(k, int) and k >= 0
assert isinstance(k_user, int) and k_user >= 0
assert isinstance(k_item, int) and k_item >= 0
assert isinstance(k_main, int) and k_main >= 0
if ((max(k_user, k_item) + k + k_main + max(user_bias, item_bias))**2) > np.iinfo(ctypes.c_int).max:
raise ValueError("Number of factors is too large.")
lambda_ = float(lambda_) if isinstance(lambda_, int) else lambda_
if (isinstance(lambda_, list) or isinstance(lambda_, tuple) or isinstance(lambda_, pd.Series)):
lambda_ = np.array(lambda_)
if isinstance(lambda_, np.ndarray):
lambda_ = lambda_.reshape(-1)
assert lambda_.shape[0] == 6
assert np.all(lambda_ >= 0.)
else:
assert isinstance(lambda_, float) and lambda_ >= 0.
l1_lambda = float(l1_lambda) if isinstance(l1_lambda, int) else l1_lambda
if (isinstance(l1_lambda, list) or isinstance(l1_lambda, tuple) or isinstance(l1_lambda, pd.Series)):
l1_lambda = np.array(l1_lambda)
if isinstance(l1_lambda, np.ndarray):
l1_lambda = l1_lambda.reshape(-1)
assert l1_lambda.shape[0] == 6
assert np.all(l1_lambda >= 0.)
else:
assert isinstance(l1_lambda, float) and l1_lambda >= 0.
niter = int(niter) if isinstance(niter, float) else niter
assert isinstance(niter, int) and niter >= 0
if not implicit and method == "lbfgs":
maxiter = int(maxiter) if isinstance(maxiter, float) else maxiter
assert isinstance(maxiter, int) and maxiter >= 0
if n_jobs is not None:
nthreads = n_jobs
if nthreads < 1:
nthreads = multiprocessing.cpu_count() + 1 - nthreads
if nthreads is None:
nthreads = 1
assert isinstance(nthreads, int) and nthreads > 0
if (nthreads > 1) and (not wrapper_double._get_has_openmp()):
msg_omp = "Attempting to use more than 1 thread, but "
msg_omp += "package was built without multi-threading "
msg_omp += "support - see the project's GitHub page for "
msg_omp += "more information."
warnings.warn(msg_omp)
if not implicit and method == "lbfgs":
print_every = int(print_every) if isinstance(print_every, float) else print_every
assert isinstance(print_every, int) and print_every >= 0
if not implicit and method == "lbfgs":
corr_pairs = int(corr_pairs) if isinstance(corr_pairs, float) else corr_pairs
assert isinstance(corr_pairs, int) and corr_pairs >= 2
if random_state is None:
random_state = rng.default_rng()
if isinstance(random_state, np.random.RandomState):
random_state = random_state.randint(np.iinfo(np.int32).max)
elif isinstance(random_state, np.random.Generator):
random_state = random_state.integers(np.iinfo(np.int32).max)
if (method == "lbfgs"):
if (NA_as_zero or NA_as_zero_user or NA_as_zero_item):
raise ValueError("Option 'NA_as_zero' not supported with method='lbfgs'.")
if add_implicit_features:
raise ValueError("Option 'add_implicit_features' not supported with method='lbfgs'.")
if (nonneg) or (nonneg_C) or (nonneg_D):
raise ValueError("non-negativity constraints not supported with method='lbfgs'.")
if (scale_lam) or (scale_lam_sideinfo):
raise ValueError("'scale_lam' not supported with method='lbfgs'.")
if l1_lambda != 0.:
raise ValueError("L1 regularization not supported with method='lbfgs'.")
if method == "als":
assert max_cg_steps > 0
if max_cd_steps is None:
max_cd_steps = 0
if isinstance(max_cd_steps, float):
max_cd_steps = int(max_cd_steps)
assert max_cd_steps >= 0
assert isinstance(max_cd_steps, int)
w_main = float(w_main) if isinstance(w_main, int) else w_main
w_user = float(w_user) if isinstance(w_user, int) else w_user
w_item = float(w_item) if isinstance(w_item, int) else w_item
w_implicit = float(w_implicit) if isinstance(w_implicit, int) else w_implicit
assert isinstance(w_main, float) and w_main > 0
assert isinstance(w_user, float) and w_user > 0
assert isinstance(w_item, float) and w_item > 0
assert isinstance(w_implicit, float) and w_implicit > 0
if implicit:
alpha = float(alpha) if isinstance(alpha, int) else alpha
assert isinstance(alpha, float) and alpha > 0.
if (center and nonneg):
warnings.warn("Warning: will fit a model with centering and non-negativity constraints.")
if (center_U and nonneg_C):
warnings.warn("Warning: will fit a model with centering in 'U' and non-negativity constraints in 'C'.")
if (center_I and nonneg_D):
warnings.warn("Warning: will fit a model with centering in 'I' and non-negativity constraints in 'D'.")
if (NA_as_zero and add_implicit_features):
warnings.warn("Warning: will add implicit features while having 'NA_as_zero'.")
self.k = k
self.k_user = k_user
self.k_item = k_item
self.k_main = k_main
self.lambda_ = lambda_
self.l1_lambda = l1_lambda
self.scale_lam = bool(scale_lam)
self.scale_lam_sideinfo = bool(scale_lam_sideinfo) or self.scale_lam
self.scale_bias_const = bool(scale_bias_const)
self.alpha = alpha
self.w_main = w_main
self.w_user = w_user
self.w_item = w_item
self.w_implicit = w_implicit
self.downweight = bool(downweight)
self.user_bias = bool(user_bias)
self.item_bias = bool(item_bias)
self.center = bool(center) and not bool(implicit)
self.center_U = bool(center_U)
self.center_I = bool(center_I)
self.method = method
self.add_implicit_features = bool(add_implicit_features)
self.apply_log_transf = bool(apply_log_transf)
self.use_cg = bool(use_cg)
self.max_cg_steps = int(max_cg_steps)
self.max_cd_steps = int(max_cd_steps)
self.finalize_chol = bool(finalize_chol)
self.maxiter = maxiter
self.niter = niter
self.parallelize = parallelize
self.NA_as_zero = bool(NA_as_zero)
self.NA_as_zero_user = bool(NA_as_zero_user)
self.NA_as_zero_item = bool(NA_as_zero_item)
self.nonneg = bool(nonneg)
self.nonneg_C = bool(nonneg_C)
self.nonneg_D = bool(nonneg_D)
self.precompute_for_predictions = bool(precompute_for_predictions)
self.include_all_X = True
self.use_float = bool(use_float)
self.verbose = bool(verbose)
self.print_every = print_every
self.corr_pairs = corr_pairs
self.random_state = int(random_state)
self.produce_dicts = bool(produce_dicts)
self.handle_interrupt = bool(handle_interrupt)
self.nthreads = nthreads
self._implicit = bool(implicit)
self.dtype_ = ctypes.c_float if use_float else ctypes.c_double
self._k_pred = k
self._k_main_col = self.k_main
if isinstance(self.lambda_, np.ndarray):
if self.lambda_.dtype != self.dtype_:
self.lambda_ = self.lambda_.astype(self.dtype_)
if isinstance(self.l1_lambda, np.ndarray):
if self.l1_lambda.dtype != self.dtype_:
self.l1_lambda = self.l1_lambda.astype(self.dtype_)
self._reset()
def _reset(self):
self.A_ = np.empty((0,0), dtype=self.dtype_)
self.B_ = np.empty((0,0), dtype=self.dtype_)
self.C_ = np.empty((0,0), dtype=self.dtype_)
self.D_ = np.empty((0,0), dtype=self.dtype_)
self.Cbin_ = np.empty((0,0), dtype=self.dtype_)
self.Dbin_ = np.empty((0,0), dtype=self.dtype_)
self.Ai_ = np.empty((0,0), dtype=self.dtype_)
self.Bi_ = np.empty((0,0), dtype=self.dtype_)
self.user_bias_ = np.empty(0, dtype=self.dtype_)
self.item_bias_ = np.empty(0, dtype=self.dtype_)
self.scaling_biasA_ = 0.
self.scaling_biasB_ = 0.
self.C_bias_ = np.empty(0, dtype=self.dtype_)
self.D_bias_ = np.empty(0, dtype=self.dtype_)
self.glob_mean_ = 0.
self._TransBtBinvBt = np.empty((0,0), dtype=self.dtype_)
## will have lambda added for implicit but not for explicit, dim is k+k_main
self._BtB = np.empty((0,0), dtype=self.dtype_)
self._BtXbias = np.empty(0, dtype=self.dtype_)
self._TransCtCinvCt = np.empty((0,0), dtype=self.dtype_)
## will be multiplied by w_user already
self._CtC = np.empty((0,0), dtype=self.dtype_)
self._BeTBe = np.empty((0,0), dtype=self.dtype_)
self._BeTBeChol = np.empty((0,0), dtype=self.dtype_)
self._BiTBi = np.empty((0,0), dtype=self.dtype_)
self._CtUbias = np.empty(0, dtype=self.dtype_)
self._A_pred = np.empty((0,0), dtype=self.dtype_)
self._B_pred = np.empty((0,0), dtype=self.dtype_)
self._B_plus_bias = np.empty((0,0), dtype=self.dtype_)
self._U_cols = np.empty(0, dtype=object)
self._I_cols = np.empty(0, dtype=object)
self._Ub_cols = np.empty(0, dtype=object)
self._Ib_cols = np.empty(0, dtype=object)
self._U_colmeans = np.empty(0, dtype=self.dtype_)
self._I_colmeans = np.empty(0, dtype=self.dtype_)
self._w_main_multiplier = 1.
self.is_fitted_ = False
self._only_prediction_info = False
self.nfev_ = None
self.nupd_ = None
self.user_mapping_ = np.array([], dtype=object)
self.item_mapping_ = np.array([], dtype=object)
self.reindex_ = False
self.user_dict_ = dict()
self.item_dict_ = dict()
def _take_params_offsets(self, k_sec=0, k_main=0, add_intercepts=True):
k_sec = int(k_sec) if isinstance(k_sec, float) else k_sec
k_main = int(k_main) if isinstance(k_main, float) else k_main
assert isinstance(k_sec, int) and k_sec >= 0
assert isinstance(k_main, int) and k_main >= 0
if ((max(k_sec, k_main) + self.k)**2 + 1) > np.iinfo(ctypes.c_int).max:
raise ValueError("Number of factors is too large.")
if self.method == "als":
if self._implicit:
msg = " not supported for implicit-feedback."
else:
msg = " not supported with method='als'."
if k_sec > 0 or k_main > 0:
raise ValueError("'k_sec' and 'k_main'" + msg)
if isinstance(self.lambda_, np.ndarray):
raise ValueError("Different regularization for each parameter is" + msg)
if self.w_user != 1. or self.w_item != 1.:
raise ValueError("'w_user' and 'w_main' are" + msg)
self.k_sec = k_sec
self.k_main = k_main
self._k_pred = self.k_sec + self.k + self.k_main
self._k_main_col = 0
self.add_intercepts = bool(add_intercepts)
def _append_NAs(self, U, m_u, p, append_U):
U_new = np.repeat(np.nan, m_u*p).reshape((m_u, p))
if U_new.dtype != self.dtype_:
U_new = U_new.astype(U.dtype)
if not U_new.flags["C_CONTIGUOUS"]:
U_new = np.ascontiguousarray(U_new)
U_new[np.setdiff1d(np.arange(m_u), append_U), :] = U
if U_new.dtype != self.dtype_:
U_new = U_new.astype(U.dtype)
return U_new
def _decompose_coo(self, X):
row = X.row
col = X.col
val = X.data
if row.dtype != ctypes.c_int:
row = row.astype(ctypes.c_int)
if col.dtype != ctypes.c_int:
col = col.astype(ctypes.c_int)
if val.dtype != self.dtype_:
val = val.astype(self.dtype_)
return row, col, val
def _process_U_arr(self, U):
Urow = np.empty(0, dtype=ctypes.c_int)
Ucol = np.empty(0, dtype=ctypes.c_int)
Uval = np.empty(0, dtype=self.dtype_)
Uarr = np.empty((0,0), dtype=self.dtype_)
Ucols = np.empty(0, dtype=object)
m = 0
p = 0
if issparse(U) and not isspmatrix_coo(U):
U = U.tocoo()
if isspmatrix_coo(U):
Urow, Ucol, Uval = self._decompose_coo(U)
m, p = U.shape
elif U is not None:
if isinstance(U, pd.DataFrame):
Ucols = U.columns.to_numpy()
U = U.to_numpy()
if not U.flags["C_CONTIGUOUS"]:
U = np.ascontiguousarray(U)
if U.dtype != self.dtype_:
U = U.astype(self.dtype_)
Uarr = U
m, p = Uarr.shape
return Urow, Ucol, Uval, Uarr, Ucols, m, p
def _convert_ids(self, X, U, U_bin, col="UserId"):
### Note: if one 'UserId' column is a Pandas Categorical, then all
### of them in the other DataFrames have to be too.
swapped = False
append_U = np.empty(0, dtype=object)
append_Ub = np.empty(0, dtype=object)
msg = "'X' and side info have no IDs in common."
if (U is not None) and (U_bin is not None):
user_ids1 = np.intersect1d(U[col].to_numpy(), X[col].to_numpy())
user_ids2 = np.intersect1d(U_bin[col].to_numpy(), X[col].to_numpy())
user_ids3 = np.intersect1d(U_bin[col].to_numpy(), U[col].to_numpy())
if (user_ids1.shape[0] == 0) and (user_ids2.shape[0] == 0):
raise ValueError(msg)
user_ids = np.intersect1d(user_ids1, user_ids2)
u_not_x = np.setdiff1d(U[col].to_numpy(), X[col].to_numpy())
x_not_u = np.setdiff1d(X[col].to_numpy(), U[col].to_numpy())
b_not_x = np.setdiff1d(U_bin[col].to_numpy(), X[col].to_numpy())
x_not_b = np.setdiff1d(X[col].to_numpy(), U_bin[col].to_numpy())
b_not_u = np.setdiff1d(U_bin[col].to_numpy(), U[col].to_numpy())
u_not_b = np.setdiff1d(U[col].to_numpy(), U_bin[col].to_numpy())
### There can be cases in which the sets are disjoint,
### and will need to add NAs to one of the inputs.
if (u_not_x.shape[0] == 0 and
x_not_u.shape[0] == 0 and
b_not_x.shape[0] == 0 and
x_not_b.shape[0] == 0 and
b_not_u.shape[0] == 0 and
u_not_b.shape[0] == 0):
user_ids = user_ids
else:
if u_not_b.shape[0] >= b_not_u.shape[0]:
user_ids = np.r_[user_ids, user_ids1, X[col].to_numpy(), user_ids3, U[col].to_numpy(), U_bin[col].to_numpy()]
append_U = x_not_u
append_Ub = np.r_[x_not_b, u_not_b]
else:
user_ids = np.r_[user_ids, user_ids2, X[col].to_numpy(), user_ids3, U_bin[col].to_numpy(), U[col].to_numpy()]
append_U = np.r_[x_not_u, b_not_u]
append_Ub = x_not_b
_, user_mapping_ = pd.factorize(user_ids)
X = X.assign(**{col : pd.Categorical(X[col], user_mapping_).codes})
if X[col].dtype != ctypes.c_int:
X = X.assign(**{col : X[col].astype(ctypes.c_int)})
U = U.assign(**{col : pd.Categorical(U[col], user_mapping_).codes})
if U[col].dtype != ctypes.c_int:
U = U.assign({col : U[col].astype(ctypes.c_int)})
U_bin = U_bin.assign(**{col : pd.Categorical(U_bin[col], user_mapping_).codes})
if U_bin[col].dtype != ctypes.c_int:
U_bin = U_bin.assign(**{col : U_bin[col].astype(ctypes.c_int)})
if append_U.shape[0]:
append_U = pd.Categorical(np.unique(append_U), user_mapping_).codes.astype(ctypes.c_int)
append_U = np.sort(append_U)
if append_Ub.shape[0]:
append_Ub = pd.Categorical(np.unique(append_Ub), user_mapping_).codes.astype(ctypes.c_int)
append_Ub = np.sort(append_Ub)
else:
if (U is None) and (U_bin is not None):
U, U_bin = U_bin, U
swapped = True
if (U is not None):
user_ids = np.intersect1d(U[col].to_numpy(), X[col].to_numpy())
if user_ids.shape[0] == 0:
raise ValueError(msg)
u_not_x = np.setdiff1d(U[col].to_numpy(), X[col].to_numpy())
x_not_u = np.setdiff1d(X[col].to_numpy(), U[col].to_numpy())
if (u_not_x.shape[0]) or (x_not_u.shape[0]):
### Case0: both have the same entries
### This is the ideal situation
if (x_not_u.shape[0] == 0) and (u_not_x.shape[0] == 0):
user_ids = user_ids
### Case1: X has IDs that U doesn't, but not the other way around
### Here there's no need to do anything special afterwards
if (x_not_u.shape[0] > 0) and (u_not_x.shape[0] == 0):
user_ids = np.r_[user_ids, x_not_u]
### Case2: U has IDs that X doesn't, but not the other way around
### Don't need to do anything special afterwards either
elif (u_not_x.shape[0] > 0) and (x_not_u.shape[0] == 0):
user_ids = np.r_[user_ids, u_not_x]
### Case3: both have IDs that the others don't
else:
user_ids = np.r_[user_ids, X[col].to_numpy(), U[col].to_numpy()]
append_U = x_not_u
_, user_mapping_ = pd.factorize(user_ids)
if not isinstance(user_mapping_, np.ndarray):
user_mapping_ = user_mapping_.to_numpy()
X = X.assign(**{col : pd.Categorical(X[col], user_mapping_).codes})
if X[col].dtype != ctypes.c_int:
X = X.assign(**{col : X[col].astype(ctypes.c_int)})
U = U.assign(**{col : pd.Categorical(U[col], user_mapping_).codes})
if U[col].dtype != ctypes.c_int:
U = U.assign(**{col : U[col].astype(ctypes.c_int)})
if append_U.shape[0]:
append_U = pd.Categorical(append_U, user_mapping_).codes.astype(ctypes.c_int)
append_U = np.sort(append_U)
else:
X_col, user_mapping_ = pd.factorize(X[col].to_numpy())
X = X.assign(**{col : X_col})
if X[col].dtype != ctypes.c_int:
X = X.assign(**{col : X[col].astype(ctypes.c_int)})
if not isinstance(user_mapping_, np.ndarray):
user_mapping_ = user_mapping_.to_numpy()
if swapped:
U, U_bin = U_bin, U
append_U, append_Ub = append_Ub, append_U
return X, U, U_bin, user_mapping_, append_U, append_Ub
def _process_U_df(self, U, is_I=False, df_name="U"):
Urow = np.empty(0, dtype=ctypes.c_int)
Ucol = np.empty(0, dtype=ctypes.c_int)
Uval = np.empty(0, dtype=self.dtype_)
Uarr = np.empty((0,0), dtype=self.dtype_)
Ucols = np.empty(0, dtype=object)
cl_take = "ItemId" if is_I else "UserId"
m = 0
p = 0
if U is not None:
if "ColumnId" in U.columns.values:
Urow = U[cl_take].astype(ctypes.c_int).to_numpy()
Ucol = U.ColumnId.astype(ctypes.c_int).to_numpy()
if "Value" not in U.columns.values:
msg = "If passing sparse '%s', must have column 'Value'."
msg = msg % df_name
raise ValueError(msg)
Uval = U.Value.astype(self.dtype_).to_numpy()
m = int(Urow.max() + 1)
p = int(Ucol.max() + 1)
else:
U = U.sort_values(cl_take)
Uarr = U[[cl for cl in U.columns.values if cl != cl_take]]
Ucols = Uarr.columns.to_numpy()
Uarr = Uarr.to_numpy()
if not Uarr.flags["C_CONTIGUOUS"]:
Uarr = np.ascontiguousarray(Uarr)
if Uarr.dtype != self.dtype_:
Uarr = Uarr.astype(self.dtype_)
m, p = Uarr.shape
return Urow, Ucol, Uval, Uarr, Ucols, m, p
def _process_new_U(self, U, U_col, U_val, U_bin, is_I=False):
letter = "U" if not is_I else "I"
name = "user" if not is_I else "item"
Mat = self.C_ if not is_I else self.D_
MatBin = self.Cbin_ if not is_I else self.Dbin_
Cols = self._U_cols if not is_I else self._I_cols
ColsBin = self._Ub_cols if not is_I else self._Ib_cols
dct = self.user_dict_ if not is_I else self.item_dict_
mapping = self.user_mapping_ if not is_I else self.item_mapping_
if ((U_col is not None) and (U_val is None)) or ((U_col is None) and (U_val is not None)):
raise ValueError("Must pass '%s_col' and '%s_val' together."
% (letter, letter))
if (U_col is not None) and (U is not None):
raise ValueError("Can only pass %s info in one format."
% name)
if (U is None) and (U_col is None) and (U_bin is None):
raise ValueError("Must pass %s side information in some format."
% name)
###
if U is not None:
if Mat.shape[0] == 0:
raise ValueError("Model was not fit to %s data." % name)
if isinstance(U, pd.DataFrame) and Cols.shape[0]:
U = U[Cols]
U = np.array(U).reshape(-1).astype(self.dtype_)
if U.shape[0] != Mat.shape[0]:
raise ValueError("Dimensions of %s don't match with earlier data."
% letter)
else:
U = np.empty(0, dtype=self.dtype_)
###
if U_bin is not None:
if MatBin.shape[0] == 0:
raise ValueError("Model was not fit to %s binary data." % name)
if isinstance(U_bin, pd.DataFrame) and (ColsBin.shape[0]):
U_bin = U_bin[ColsBin]
U_bin = np.array(U_bin).reshape(-1).astype(self.dtype_)
if U_bin.shape[0] != MatBin.shape[0]:
raise ValueError("Dimensions of %s_bin don't match with earlier data."
% letter)
else:
U_bin = np.empty(0, dtype=self.dtype_)
###
if U_col is not None:
if Mat.shape[0] == 0:
raise ValueError("Model was not fit to %s data." % name)
U_val = np.array(U_val).reshape(-1).astype(self.dtype_)
if U_val.shape[0] == 0:
if np.array(U_col).shape[0] > 0:
raise ValueError("'%s_col' and '%s_val' must have the same number of entries." % (letter, letter))
U_col = np.empty(0, dtype=ctypes.c_int)
U_val = np.empty(0, dtype=self.dtype_)
else:
if self.reindex_:
if len(dct):
try:
U_col = np.array([dct[u] for u in U_col])
except:
raise ValueError("Sparse inputs cannot contain missing values.")
else:
U_col = pd.Categorical(U_col, mapping).codes.astype(ctypes.c_int)
if np.any(U_col < 0):
raise ValueError("Sparse inputs cannot contain missing values.")
U_col = U_col.astype(ctypes.c_int)
else:
U_col = np.array(U_col).reshape(-1).astype(ctypes.c_int)
imin, imax = U_col.min(), U_col.max()
if np.isnan(imin) or np.isnan(imax):
raise ValueError("Sparse inputs cannot contain missing values.")
if (imin < 0) or (imax >= Mat.shape[0]):
msg = "Column indices for user info must be within the range"
msg += " of the data that was pased to 'fit'."
raise ValueError(msg)
if U_val.shape[0] != U_col.shape[0]:
raise ValueError("'%s_col' and '%s_val' must have the same number of entries." % (letter, letter))
else:
U_col = np.empty(0, dtype=ctypes.c_int)
U_val = np.empty(0, dtype=self.dtype_)
###
return U, U_col, U_val, U_bin
def _process_new_U_2d(self, U, is_I=False, allow_csr=False):
letter = "U" if not is_I else "I"
col_id = "UserId" if not is_I else "ItemId"
Cols = self._U_cols if not is_I else self._I_cols
Mat = self.C_ if not is_I else self.D_
Uarr = np.empty((0,0), dtype=self.dtype_)
Urow = np.empty(0, dtype=ctypes.c_int)
Ucol = np.empty(0, dtype=ctypes.c_int)
Uval = np.empty(0, dtype=self.dtype_)
Ucsr_p = np.empty(0, dtype=ctypes.c_size_t)
Ucsr_i = np.empty(0, dtype=ctypes.c_int)
Ucsr = np.empty(0, dtype=self.dtype_)
m, p = U.shape if U is not None else (0,0)
if (p != Mat.shape[0]) and (Mat.shape[0] > 0) and (p > 0):
msg = "'%s' must have the same columns "
msg += "as the data passed to 'fit'."
raise ValueError(msg % letter)
if issparse(U) and (not isspmatrix_coo(U)) and (not isspmatrix_csr(U)):
U = U.tocoo()
elif isspmatrix_csr(U) and not allow_csr:
U = U.tocoo()
if isinstance(U, pd.DataFrame):
if col_id in U.columns.values:
warnings.warn("'%s' not meaningful for new inputs." % col_id)
if Cols.shape[0]:
U = U[Cols]
Uarr = U.to_numpy()
Uarr = np.ascontiguousarray(Uarr)
if Uarr.dtype != self.dtype_:
Uarr = Uarr.astype(self.dtype_)
elif isspmatrix_coo(U):
Urow = U.row.astype(ctypes.c_int)
Ucol = U.col.astype(ctypes.c_int)
Uval = U.data.astype(self.dtype_)
elif isspmatrix_csr(U):
if not allow_csr:
raise ValueError("Unexpected error.")
Ucsr_p = U.indptr.astype(ctypes.c_size_t)
Ucsr_i = U.indices.astype(ctypes.c_int)
Ucsr = U.data.astype(self.dtype_)
elif isinstance(U, np.ndarray):
if not U.flags["C_CONTIGUOUS"]:
U = np.ascontiguousarray(U)
if U.dtype != self.dtype_:
U = U.astype(self.dtype_)
Uarr = U
elif U is None:
pass
else:
if not allow_csr:
msg = "'%s' must be a Pandas DataFrame, SciPy sparse COO, or NumPy array."
else:
msg = "'%s' must be a Pandas DataFrame, SciPy sparse CSR or COO, or NumPy array."
raise ValueError(msg % letter)
return Uarr, Urow, Ucol, Uval, Ucsr_p, Ucsr_i, Ucsr, m, p
def _process_new_Ub_2d(self, U_bin, is_I=False):
letter = "U" if not is_I else "I"
col_id = "UserId" if not is_I else "ItemId"
Cols = self._Ub_cols if not is_I else self._Ib_cols
Mat = self.Cbin_ if not is_I else self.Dbin_
Ub_arr = np.empty((0,0), dtype=self.dtype_)
m_ub, pbin = U_bin.shape if U_bin is not None else (0,0)
if max(m_ub, pbin) and (not Mat.shape[0] or not Mat.shape[1]):
raise ValueError("Cannot pass binary data if model was not fit to binary side info.")
if (pbin != Mat.shape[0]) and (Mat.shape[0] > 0) and (pbin > 0):
msg = "'%s_bin' must have the same columns "
msg += "as the data passed to 'fit'."
raise ValueError(msg % letter)
if isinstance(U_bin, pd.DataFrame):
if col_id in U_bin.columns.values:
warnings.warn("'%s' not meaningful for new inputs." % col_id)
if Cols.shape[0]:
U_bin = U_bin[Cols]
Ub_arr = U_bin.to_numpy()
Ub_arr = np.ascontiguousarray(Ub_arr)
if Ub_arr.dtype != self.dtype_:
Ub_arr = Ub_arr.astype(self.dtype_)
elif isinstance(Ub_arr, np.ndarray):
if not Ub_arr.flags["C_CONTIGUOUS"]:
Ub_arr = np.ascontiguousarray(Ub_arr)
if Ub_arr.dtype != self.dtype_:
Ub_arr = Ub_arr.astype(self.dtype_)
elif Ub_arr is None:
pass
else:
raise ValueError("'%s_bin' must be a Pandas DataFrame or NumPy array."
% letter)
return Ub_arr, m_ub, pbin
def _process_new_X_2d(self, X, W=None):
if len(X.shape) != 2:
raise ValueError("'X' must be 2-dimensional.")
Xarr = np.empty((0,0), dtype=self.dtype_)
Xrow = np.empty(0, dtype=ctypes.c_int)
Xcol = np.empty(0, dtype=ctypes.c_int)
Xval = np.empty(0, dtype=self.dtype_)
Xcsr_p = np.empty(0, dtype=ctypes.c_size_t)
Xcsr_i = np.empty(0, dtype=ctypes.c_int)
Xcsr = np.empty(0, dtype=self.dtype_)
W_dense = np.empty((0,0), dtype=self.dtype_)
W_sp = np.empty(0, dtype=self.dtype_)
m, n = X.shape
if issparse(X) and (not isspmatrix_coo(X)) and (not isspmatrix_csr(X)):
if (W is not None) and (not issparse(W)):
if not isinstance(W, np.ndarray):
W = np.array(W).reshape(-1)
if W.shape[0] != X.nnz:
raise ValueError("'X' and 'W' have different number of entries.")
if isspmatrix_csc(X):
W = csc_matrix((W, X.indices, X.indptr), shape=(X.shape[0], X.shape[1]))
W = W.tocoo()
else:
raise ValueError("Must pass 'X' as SciPy sparse COO if there are weights.")
X = X.tocoo()
if issparse(W) and (not isspmatrix_coo(W)) and (not isspmatrix_csr(W)):
W = W.tocoo()
if (isspmatrix_coo(X) != isspmatrix_coo(W)):
if not isspmatrix_coo(X):
X = X.tocoo()
if not isspmatrix_coo(W):
W = W.tocoo()
if issparse(W):
W = W.data
if isspmatrix_coo(X):
Xrow = X.row.astype(ctypes.c_int)
Xcol = X.col.astype(ctypes.c_int)
Xval = X.data.astype(self.dtype_)
if W is not None:
W_sp = np.array(W).reshape(-1).astype(self.dtype_)
if W_sp.shape[0] != Xval.shape[0]:
msg = "'W' must have the same number of non-zero entries "
msg += "as 'X'."
raise ValueError(msg)
elif isspmatrix_csr(X):
Xcsr_p = X.indptr.astype(ctypes.c_size_t)
Xcsr_i = X.indices.astype(ctypes.c_int)
Xcsr = X.data.astype(self.dtype_)
if W is not None:
W_sp = np.array(W).reshape(-1).astype(self.dtype_)
if W_sp.shape[0] != Xcsr.shape[0]:
msg = "'W' must have the same number of non-zero entries "
msg += "as 'X'."
raise ValueError(msg)
elif isinstance(X, np.ndarray):
if not X.flags["C_CONTIGUOUS"]:
X = np.ascontiguousarray(X)
if X.dtype != self.dtype_:
X = X.astype(self.dtype_)
Xarr = X
if W is not None:
assert W.shape[0] == X.shape[0]
assert W.shape[1] == X.shape[1]
if not W.flags["C_CONTIGUOUS"]:
W = np.ascontiguousarray(W)
if W.dtype != self.dtype_:
W = W.astype(self.dtype_)
W_dense = W
else:
raise ValueError("'X' must be a SciPy CSR or COO matrix, or NumPy array.")
if n > self._n_orig:
raise ValueError("'X' has more columns than what was passed to 'fit'.")
if self.apply_log_transf:
if Xval.min() < 1:
raise ValueError("Cannot pass values below 1 with 'apply_log_transf=True'.")
return Xarr, Xrow, Xcol, Xval, Xcsr_p, Xcsr_i, Xcsr, m, n, W_dense, W_sp
def _process_users_items(self, user, item, include, exclude, allows_no_item=True):
if (include is not None and np.any(pd.isnull(include))) \
or (exclude is not None and np.any(pd.isnull(exclude))):
raise ValueError("'include' and 'exclude' should not contain missing values.")
if include is not None and exclude is not None:
raise ValueError("Cannot pass 'include' and 'exclude' together.")
include = np.array(include).reshape(-1) if include is not None \
else np.empty(0, dtype=ctypes.c_int)
exclude = np.array(exclude).reshape(-1) if exclude is not None \
else np.empty(0, dtype=ctypes.c_int)
if isinstance(user, list) or isinstance(user, tuple):
user = np.array(user)
if isinstance(item, list) or isinstance(item, tuple):
item = np.array(item)
if isinstance(user, pd.Series):
user = user.to_numpy()
if isinstance(item, pd.Series):
item = item.to_numpy()
if user is not None:
if isinstance(user, np.ndarray):
if len(user.shape) > 1:
user = user.reshape(-1)
assert user.shape[0] > 0
if self.reindex_:
if user.shape[0] > 1:
user = pd.Categorical(user, self.user_mapping_).codes
if user.dtype != ctypes.c_int:
user = user.astype(ctypes.c_int)
else:
if len(self.user_dict_):
try:
user = self.user_dict_[user]
except:
user = -1
else:
user = pd.Categorical(user, self.user_mapping_).codes[0]
else:
if self.reindex_:
if len(self.user_dict_):
try:
user = self.user_dict_[user]
except:
user = -1
else:
user = pd.Categorical(np.array([user]), self.user_mapping_).codes[0]
user = np.array([user])
if item is not None:
if isinstance(item, np.ndarray):
if len(item.shape) > 1:
item = item.reshape(-1)
assert item.shape[0] > 0
if self.reindex_:
if item.shape[0] > 1:
item = pd.Categorical(item, self.item_mapping_).codes
if item.dtype != ctypes.c_int:
item = item.astype(ctypes.c_int)
else:
if len(self.item_dict_):
try:
item = self.item_dict_[item[0]]
except:
item = -1
else:
item = pd.Categorical(item, self.item_mapping_).codes[0]
else:
if self.reindex_:
if len(self.item_dict_):
try:
item = self.item_dict_[item]
except:
item = -1
else:
item = pd.Categorical(np.array([item]), self.item_mapping_).codes[0]
item = np.array([item])
else:
if not allows_no_item:
raise ValueError("Must pass IDs for 'item'.")
if self.reindex_:
msg = "'%s' should contain only items that were passed to 'fit'."
if include.shape[0]:
if len(self.item_dict_):
try:
include = np.array([self.item_dict_[i] for i in include])
except:
raise ValueError(msg % "include")
else:
include = | pd.Categorical(include, self.item_mapping_) | pandas.Categorical |
from bs4 import BeautifulSoup as Bs4
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pandas as pd
def scroll(driver, timeout):
scroll_pause_time = timeout
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
sleep(scroll_pause_time)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
# If heights are the same it will exit the function
break
last_height = new_height
driver = webdriver.Chrome(executable_path='C:/WebDrivers/chromedriver.exe')
url = 'https://www.facebook.com/login'
driver.get(url)
driver.implicitly_wait(10)
email = 'Your Email'
email_xpath = """//*[@id="email"]"""
find_email_element = driver.find_element_by_xpath(email_xpath)
find_email_element.send_keys(email)
driver.implicitly_wait(10)
password = '<PASSWORD>'
password_xpath = """//*[@id="pass"]"""
find_password_element = driver.find_element_by_xpath(password_xpath)
find_password_element.send_keys(password)
find_password_element.send_keys(Keys.ENTER)
sleep(6)
group_url = "https://www.facebook.com/groups/group-name/members"
driver.get(group_url)
driver.implicitly_wait(10)
scroll(driver, 2)
names = []
final_names = []
src = driver.page_source
html_soup = Bs4(src, 'lxml')
html_soup.prettify()
for name in html_soup.find_all('a', {'class': "oajrlxb2 g5ia77u1 qu0x051f esr5mh6w e9989ue4 r7d6kgcz rq0escxv nhd2j8a9 nc684nl6 p7hjln8o kvgmc6g5 cxmmr5t8 oygrvhab hcukyx3x jb3vyjys rz4wbd8a qt6c0cv9 a8nywdso i1ao9s8h esuyzwwr f1sip0of lzcic4wl oo9gr5id gpro0wi8 lrazzd5p"}):
text = name.get_text()
list_0 = names.append(text)
for final_name in names[1:]:
final_names.append(final_name)
df = | pd.DataFrame(final_names) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
from pandas import Index, MultiIndex, Series
import pandas._testing as tm
class TestSeriesAlterAxes:
def test_setindex(self, string_series):
# wrong type
msg = (
r"Index\(\.\.\.\) must be called with a collection of some "
r"kind, None was passed"
)
with pytest.raises(TypeError, match=msg):
string_series.index = None
# wrong length
msg = (
"Length mismatch: Expected axis has 30 elements, "
"new values have 29 elements"
)
with pytest.raises(ValueError, match=msg):
string_series.index = np.arange(len(string_series) - 1)
# works
string_series.index = np.arange(len(string_series))
assert isinstance(string_series.index, Index)
# Renaming
def test_set_name_attribute(self):
s = Series([1, 2, 3])
s2 = Series([1, 2, 3], name="bar")
for name in [7, 7.0, "name", datetime(2001, 1, 1), (1,), "\u05D0"]:
s.name = name
assert s.name == name
s2.name = name
assert s2.name == name
def test_set_name(self):
s = Series([1, 2, 3])
s2 = s._set_name("foo")
assert s2.name == "foo"
assert s.name is None
assert s is not s2
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(range(10))
s.index = idx
assert s.index.is_all_dates
def test_reorder_levels(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
tm.assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(["L0", "L1", "L2"])
tm.assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = Series(np.arange(6), index=e_idx)
tm.assert_series_equal(result, expected)
def test_rename_axis_mapper(self):
# GH 19978
mi = | MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"]) | pandas.MultiIndex.from_product |
from collections import OrderedDict
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import Index, MultiIndex, date_range
import pandas.util.testing as tm
def test_constructor_single_level():
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
codes=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels():
msg = "non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=[], codes=[])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=[])
with pytest.raises(TypeError, match=msg):
MultiIndex(codes=[])
def test_constructor_nonhashable_names():
# GH 20527
levels = [[1, 2], ['one', 'two']]
codes = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = (['foo'], ['bar'])
msg = r"MultiIndex\.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=levels, codes=codes, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
with pytest.raises(TypeError, match=msg):
mi.rename(names=renamed)
# With .set_names()
with pytest.raises(TypeError, match=msg):
mi.set_names(names=renamed)
def test_constructor_mismatched_codes_levels(idx):
codes = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
msg = "Length of levels and codes must be the same"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=levels, codes=codes)
length_error = (r"On level 0, code max \(3\) >= length of level \(1\)\."
" NOTE: this index is in an inconsistent state")
label_error = r"Unequal code lengths: \[4, 2\]"
code_value_error = r"On level 0, code value \(-2\) < -1"
# important to check that it's looking at the right thing.
with pytest.raises(ValueError, match=length_error):
MultiIndex(levels=[['a'], ['b']],
codes=[[0, 1, 2, 3], [0, 3, 4, 1]])
with pytest.raises(ValueError, match=label_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, 0, 0, 0], [0, 0]])
# external API
with pytest.raises(ValueError, match=length_error):
idx.copy().set_levels([['a'], ['b']])
with pytest.raises(ValueError, match=label_error):
idx.copy().set_codes([[0, 0, 0, 0], [0, 0]])
# test set_codes with verify_integrity=False
# the setting should not raise any value error
idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]],
verify_integrity=False)
# code value smaller than -1
with pytest.raises(ValueError, match=code_value_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, -2], [0, 0]])
def test_na_levels():
# GH26408
# test if codes are re-assigned value -1 for levels
# with mising values (NaN, NaT, None)
result = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[-1, -1, -1, -1, 3, 4]])
tm.assert_index_equal(result, expected)
result = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[-1, -1, 1, -1, 3, -1]])
tm.assert_index_equal(result, expected)
# verify set_levels and set_codes
result = MultiIndex(
levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]]).set_levels(
[[np.nan, 's', pd.NaT, 128, None]])
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[1, 2, 2, 2, 2, 2]]).set_codes(
[[0, -1, 1, 2, 3, 4]])
tm.assert_index_equal(result, expected)
def test_labels_deprecated(idx):
# GH23752
with tm.assert_produces_warning(FutureWarning):
MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
with tm.assert_produces_warning(FutureWarning):
idx.labels
def test_copy_in_constructor():
levels = np.array(["a", "b", "c"])
codes = np.array([1, 1, 2, 0, 0, 1, 1])
val = codes[0]
mi = MultiIndex(levels=[levels, levels], codes=[codes, codes],
copy=True)
assert mi.codes[0][0] == val
codes[0] = 15
assert mi.codes[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
# ----------------------------------------------------------------------------
# from_arrays
# ----------------------------------------------------------------------------
def test_from_arrays(idx):
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(idx):
# GH 18434
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=idx.names)
tm.assert_index_equal(result, idx)
# invalid iterator input
msg = "Input must be a list / sequence of array-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(0)
def test_from_arrays_tuples(idx):
arrays = tuple(tuple(np.asarray(lev).take(level_codes))
for lev, level_codes in zip(idx.levels, idx.codes))
# tuple of tuples as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
def test_from_arrays_index_series_datetimetz():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta():
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period():
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical():
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, codes=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('invalid_sequence_of_arrays', [
1, [1], [1, 2], [[1], 2], [1, [2]], 'a', ['a'], ['a', 'b'], [['a'], 'b'],
(1,), (1, 2), ([1], 2), (1, [2]), 'a', ('a',), ('a', 'b'), (['a'], 'b'),
[(1,), 2], [1, (2,)], [('a',), 'b'],
((1,), 2), (1, (2,)), (('a',), 'b')
])
def test_from_arrays_invalid_input(invalid_sequence_of_arrays):
msg = "Input must be a list / sequence of array-likes"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays)
@pytest.mark.parametrize('idx1, idx2', [
([1, 2, 3], ['a', 'b']),
([], ['a', 'b']),
([1, 2, 3], [])
])
def test_from_arrays_different_lengths(idx1, idx2):
# see gh-13599
msg = '^all arrays must be same length$'
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays([idx1, idx2])
# ----------------------------------------------------------------------------
# from_tuples
# ----------------------------------------------------------------------------
def test_from_tuples():
msg = 'Cannot infer number of levels from empty list'
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples([])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
codes=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator():
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
codes=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
msg = 'Input must be a list / sequence of tuple-likes.'
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples(0)
def test_from_tuples_empty():
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_index_values(idx):
result = MultiIndex.from_tuples(idx)
assert (result.values == idx.values).all()
def test_tuples_with_name_string():
# GH 15110 and GH 14848
li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)]
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
pd.Index(li, name='abc')
with pytest.raises(ValueError, match=msg):
pd.Index(li, name='a')
def test_from_tuples_with_tuple_label():
# GH 15457
expected = pd.DataFrame([[2, 1, 2], [4, (1, 2), 3]],
columns=['a', 'b', 'c']).set_index(['a', 'b'])
idx = pd.MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=('a', 'b'))
result = pd.DataFrame([2, 3], columns=['c'], index=idx)
tm.assert_frame_equal(expected, result)
# ----------------------------------------------------------------------------
# from_product
# ----------------------------------------------------------------------------
def test_from_product_empty_zero_levels():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_product([])
def test_from_product_empty_one_level():
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
@pytest.mark.parametrize('first, second', [
([], []),
(['foo', 'bar', 'baz'], []),
([], ['a', 'b', 'c']),
])
def test_from_product_empty_two_levels(first, second):
names = ['A', 'B']
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
codes=[[], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('N', list(range(4)))
def test_from_product_empty_three_levels(N):
# GH12258
names = ['A', 'B', 'C']
lvl2 = list(range(N))
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
codes=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('invalid_input', [
1,
[1],
[1, 2],
[[1], 2],
'a',
['a'],
['a', 'b'],
[['a'], 'b'],
])
def test_from_product_invalid_input(invalid_input):
msg = (r"Input must be a list / sequence of iterables|"
"Input must be list-like")
with pytest.raises(TypeError, match=msg):
MultiIndex.from_product(iterables=invalid_input)
def test_from_product_datetimeindex():
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([
(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02')),
])
tm.assert_numpy_array_equal(mi.values, etalon)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('f', [
lambda x: x,
lambda x: pd.Series(x),
lambda x: x.values
])
def test_from_product_index_series_categorical(ordered, f):
# GH13743
first = ['foo', 'bar']
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
result = pd.MultiIndex.from_product([first, f(idx)])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_from_product():
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator():
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = | MultiIndex.from_tuples(tuples, names=names) | pandas.MultiIndex.from_tuples |
#######################
# Header.R
from datetime import time
from operator import index
from os import path, times
import numpy as np
import pandas as pd
import os
import logging
from pathlib import Path
from pandas.core.reshape.merge import merge
from powergenome.util import regions_to_keep
from powergenome.us_state_abbrev import (state2abbr, abbr2state)
path_in = r"..\data\load_profiles_data\input" # fix
#read in state proportions
#how much state load should be distributed to GenXRegion
# pop = pd.read_parquet(path_in + "\GenX_State_Pop_Weight.parquet")
pop = pd.read_parquet(path_in + "\ipm_state_pop_weight_20210517.parquet")
states = pop.drop_duplicates(subset=["State"])["State"]
states_abb = list(map(state2abbr, states))
pop["State"] = list(map(state2abbr, pop["State"]))
states_eastern_abbr = ["ME","VT","NH","MA","RI","CT","NY","PA","NJ","DE","MD","DC","MI","IN","OH","KY","WV","VA","NC","SC","GA","FL"]
states_central_abbr = ["IL","MO","TN","AL","MS","WI","AR","LA","TX","OK","KS","NE","SD","ND","IA","MN"]
states_mountain_abbr = ["MT","WY","CO","NM","AZ","UT","ID"]
states_pacific_abbr = ["CA","NV","OR","WA"]
states_eastern = list(map(abbr2state, states_eastern_abbr))
states_central = list(map(abbr2state, states_central_abbr))
states_mountain = list(map(abbr2state, states_mountain_abbr))
states_pacific = list(map(abbr2state, states_pacific_abbr))
# some parameters
stated_states = ["New Jersey", "New York", "Virginia"]
# Date Jan 29, 2021
# (2) PA, NJ, VA, NY, MI all have EV and heat pump stocks from NZA DD case
# consistent with their economywide decarbonization goals.
# https://www.c2es.org/content/state-climate-policy/
# Date Feb 10, 2021
# Remove high electrification growth in PA and MI in stated policies;
# they dont have clean energy goals so kind of confusing/inconsistent to require high electrification in these states.
# So our new "Stated Policies" definition for electrification is states
# with BOTH economywide emissions goals + 100% carbon-free electricity standards
# = NY, NJ, VA.
stated_states_abbr = list(map(state2abbr, stated_states))
#years = ["2022", "2025", "2030", "2040", "2050"]
cases = ["current_policy", "stated_policy", "deep_decarbonization"]
running_sector = ['Residential','Residential', 'Commercial', 'Commercial','Transportation','Transportation','Transportation', 'Transportation']
running_subsector = ['space heating and cooling','water heating', 'space heating and cooling', 'water heating','light-duty vehicles','medium-duty trucks','heavy-duty trucks','transit buses']
Nsubsector = len(running_subsector)
logger = logging.getLogger(__name__)
#Define function for adjusting time-difference
def addhour(x):
x += 1
x = x.replace(8761, 1)
return x
def SolveThreeUnknowns(a1, b1, c1, d1, a2, b2, c2, d2, a3, b3, c3, d3):
D = a1*b2*c3 + b1*c2*a3 + c1*a2*b3 - a1*c2*b3 - b1*a2*c3 - c1*b2*a3
Dx = d1*b2*c3 + b1*c2*d3 + c1*d2*b3 - d1*c2*b3 - b1*d2*c3 - c1*b2*d3
Dy = a1*d2*c3 + d1*c2*a3 + c1*a2*d3 - a1*c2*d3 - d1*a2*c3 - c1*d2*a3
Dz = a1*b2*d3 + b1*d2*a3 + d1*a2*b3 - a1*d2*b3 - b1*a2*d3 - d1*b2*a3
Sx = Dx/D
Sy = Dy/D
Sz = Dz/D
d = {'Sx':Sx, 'Sy':Sy, 'Sz':Sz}
return pd.DataFrame(d)
def SolveTwoUnknowns(a1, b1, c1, a2, b2, c2):
D = a1*b2 - a2*b1
Dx = c1*b2 - c2*b1
Dy = a1*c2 - a2*c1
Sx = Dx/D
Sy = Dy/D
d = {'Sx':Sx, 'Sy':Sy}
return pd.DataFrame(d)
def CreateOutputFolder(case_folder):
path = case_folder / "extra_outputs"
if not os.path.exists(path):
os.makedirs(path)
######################################
# CreatingBaseLoad.R
def CreateBaseLoad(years, regions, output_folder, path_growthrate):
path_processed = path_in
path_result = output_folder.__str__()
years = years
regions = regions
path_growthrate = path_growthrate
## Method 3: annually
EFS_2020_LoadProf = pd.read_parquet(path_in + "\EFS_REF_load_2020.parquet")
EFS_2020_LoadProf = pd.merge(EFS_2020_LoadProf, pop, on = ["State"])
EFS_2020_LoadProf = EFS_2020_LoadProf.assign(weighted = EFS_2020_LoadProf["LoadMW"]*EFS_2020_LoadProf["State Prop"])
EFS_2020_LoadProf = EFS_2020_LoadProf.groupby(["Year", "GenX.Region", "LocalHourID", "Sector", "Subsector"], as_index = False).agg({"weighted" : "sum"})
# Read in 2019 Demand
Original_Load_2019 = pd.read_parquet(path_in + "\ipm_load_curves_2019_EST.parquet")
# Reorganize Demand
Original_Load_2019 = Original_Load_2019.melt(id_vars="LocalHourID").rename(columns={"variable" : "GenX.Region", "value": "LoadMW_original"})
Original_Load_2019 = Original_Load_2019.groupby(["LocalHourID"], as_index = False).agg({"LoadMW_original" : "sum"})
ratio_A = Original_Load_2019["LoadMW_original"].sum() / EFS_2020_LoadProf["weighted"].sum()
EFS_2020_LoadProf = EFS_2020_LoadProf.assign(weighted = EFS_2020_LoadProf["weighted"]*ratio_A)
Base_Load_2019 = EFS_2020_LoadProf.rename(columns ={"weighted" : "LoadMW"})
breakpoint()
# Read in the Growth Rate
GrowthRate = pd.read_parquet(path_in + "\ipm_growthrate_2019.parquet")
try:
GrowthRate = pd.read_parquet(path_growthrate)
except:
pass
# Create Base loads
Base_Load_2019 = Base_Load_2019[Base_Load_2019["GenX.Region"].isin(regions)]
Base_Load_2019.loc[(Base_Load_2019["Sector"] == "Industrial") & (Base_Load_2019["Subsector"].isin(["process heat", "machine drives"])), "Subsector"] = "other"
Base_Load_2019 = Base_Load_2019[Base_Load_2019["Subsector"] == "other"]
Base_Load_2019 = Base_Load_2019.groupby(["Year", "LocalHourID", "GenX.Region", "Sector"], as_index= False).agg({'LoadMW' : 'sum'})
Base_Load = Base_Load_2019
for y in years:
ScaleFactor = GrowthRate.assign(ScaleFactor = (1+GrowthRate["growth_rate"])**(int(y) - 2019)) \
.drop(columns = "growth_rate")
Base_Load_temp = pd.merge(Base_Load_2019, ScaleFactor, on = ["GenX.Region"])
Base_Load_temp = Base_Load_temp.assign(Year = y, LoadMW = Base_Load_temp["LoadMW"]*Base_Load_temp["ScaleFactor"])\
.drop(columns = "ScaleFactor")
Base_Load = Base_Load.append(Base_Load_temp, ignore_index=True)
Base_Load.to_parquet(path_result + "\Base_Load.parquet", index = False)
del Base_Load, Base_Load_2019, Base_Load_temp, ScaleFactor,GrowthRate, Original_Load_2019
#####################################
# Add_Electrification.R
def AddElectrification(years, regions, electrification, output_folder, path_stock):
path_processed = path_in
path_result = output_folder.__str__()
path_stock = path_stock
years = years
electrification = electrification
regions = regions
#Creating Time-series
SCENARIO_STOCK = pd.read_parquet(path_processed + "\SCENARIO_STOCK.parquet")
SCENARIO_STOCK = SCENARIO_STOCK[(SCENARIO_STOCK["YEAR"].isin(years)) & (SCENARIO_STOCK["SCENARIO"].isin(electrification))]
SCENARIO_STOCK_temp = pd.DataFrame()
for year, case in zip(years, electrification):
SCENARIO_STOCK_temp = SCENARIO_STOCK_temp.append(SCENARIO_STOCK[(SCENARIO_STOCK["YEAR"] == year) & (SCENARIO_STOCK["SCENARIO"] == case)])
SCENARIO_STOCK = SCENARIO_STOCK_temp
del SCENARIO_STOCK_temp
try:
CUSTOM_STOCK = pd.read_parquet(path_stock)
CUSTOM_STOCK = CUSTOM_STOCK[(CUSTOM_STOCK["YEAR"].isin(years)) & (CUSTOM_STOCK["SCENARIO"].isin(electrification))]
SCENARIO_STOCK = SCENARIO_STOCK.append(CUSTOM_STOCK)
except:
pass
#Method 1 Calculate from Type1 and Type 2
for i in range(0, Nsubsector):
timeseries = pd.read_parquet(path_processed + "\\" + running_sector[i] + "_" + running_subsector[i] + "_Incremental_Factor.parquet")
timeseries = timeseries[["State", "Year", "LocalHourID", "Unit", "Factor_Type1", "Factor_Type2" ]]
stock_temp = SCENARIO_STOCK[(SCENARIO_STOCK["SECTOR"] == running_sector[i]) & (SCENARIO_STOCK["SUBSECTOR"] == running_subsector[i])]
stock_temp = stock_temp[["SCENARIO", "STATE", "YEAR", "AGG_STOCK_TYPE1", "AGG_STOCK_TYPE2"]].rename(columns={"STATE" : "State", "YEAR" : "Year"})
years_pd = pd.Series(years)
IF_years = pd.Series(timeseries["Year"].unique())
for year in years_pd:
exists = year in IF_years.values
if not exists:
diff = np.array(IF_years - year)
index = diff[np.where(diff <= 0)].argmax()
year_approx = IF_years[index]
timeseries_temp = timeseries[timeseries["Year"] == year_approx]
timeseries_temp["Year"] = year
logger.warning("No incremental factor available for year " + str(year) + ": using factors from year " + str(year_approx) + ".")
timeseries = timeseries.append(timeseries_temp)
timeseries = pd.merge(timeseries, stock_temp, on = ["State", "Year"])
timeseries = timeseries.assign(LoadMW = timeseries["AGG_STOCK_TYPE1"]*timeseries["Factor_Type1"] + timeseries["AGG_STOCK_TYPE2"]*timeseries["Factor_Type2"])
timeseries = timeseries[["SCENARIO", "State", "Year", "LocalHourID", "LoadMW"]].dropna()
timeseries.to_parquet(path_result + "\\" + running_sector[i] + "_" + running_subsector[i] + "_Scenario_Timeseries_Method1.parquet", index = False)
del timeseries, stock_temp
##########################
# Read in time series and combine them
Method = "Method1"
Res_SPH = pd.read_parquet(path_result + "\Residential_space heating and cooling_Scenario_Timeseries_" + Method + ".parquet")
Res_SPH = Res_SPH.rename(columns={"LoadMW" : "Res_SPH_LoadMW"})
Res_SPH_sum = Res_SPH
Res_SPH_sum = Res_SPH.groupby(["SCENARIO", "State", "Year"], as_index = False)["Res_SPH_LoadMW"].agg({"Total_Res_SPH_TWh" : "sum"})
Res_SPH_sum["Total_Res_SPH_TWh"] = 10**-6*Res_SPH_sum["Total_Res_SPH_TWh"]
Res_WH = pd.read_parquet(path_result + "\Residential_water heating_Scenario_Timeseries_" + Method +".parquet")
Res_WH = Res_WH.rename(columns ={"LoadMW" : "Res_WH_LoadMW"})
Res_WH_sum = Res_WH
Res_WH_sum = Res_WH.groupby(["SCENARIO", "State", "Year"], as_index = False)["Res_WH_LoadMW"].agg({"Total_Res_WH_TWh" : "sum"})
Res_WH_sum["Total_Res_WH_TWh"] = 10**-6*Res_WH_sum["Total_Res_WH_TWh"]
Com_SPH = pd.read_parquet(path_result + "\Commercial_space heating and cooling_Scenario_Timeseries_" + Method +".parquet")
Com_SPH = Com_SPH.rename(columns={"LoadMW" : "Com_SPH_LoadMW"})
Com_SPH_sum = Com_SPH
Com_SPH_sum = Com_SPH.groupby(["SCENARIO", "State", "Year"], as_index = False)["Com_SPH_LoadMW"].agg({"Total_Com_SPH_TWh" : "sum"})
Com_SPH_sum["Total_Com_SPH_TWh"] = 10**-6*Com_SPH_sum["Total_Com_SPH_TWh"]
Com_WH = pd.read_parquet(path_result + "\Commercial_water heating_Scenario_Timeseries_" + Method +".parquet")
Com_WH = Com_WH.rename(columns ={"LoadMW" : "Com_WH_LoadMW"})
Com_WH_sum = Com_WH
Com_WH_sum = Com_WH.groupby(["SCENARIO", "State", "Year"], as_index = False)["Com_WH_LoadMW"].agg({"Total_Com_WH_TWh" : "sum"})
Com_WH_sum["Total_Com_WH_TWh"] = 10**-6*Com_WH_sum["Total_Com_WH_TWh"]
Trans_LDV = | pd.read_parquet(path_result + "\Transportation_light-duty vehicles_Scenario_Timeseries_" + Method +".parquet") | pandas.read_parquet |
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import numpy as np, tensorflow as tf
from sklearn.preprocessing import OneHotEncoder
import os
import csv
import gc
from sklearn.metrics import mean_squared_error
import math
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel, RBF
from sklearn.gaussian_process.kernels import RationalQuadratic
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedKFold
from sklearn import linear_model
from xgboost.sklearn import XGBRegressor
from sklearn.decomposition import PCA
import copy
import pyflux as pf
import datetime
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
PRICED_BITCOIN_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/pricedBitcoin2009-2018.csv"
DAILY_OCCURRENCE_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/dailyOccmatrices/"
betti0_input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/betti_0(100).csv"
betti1_input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/betti_1(100).csv"
DAILY_FILTERED_OCCURRENCE_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/filteredDailyOccMatrices/"
ROW = -1
COLUMN = -1
TEST_SPLIT = 0.01
ALL_YEAR_INPUT_ALLOWED = False
YEAR = 2017
# Baseline
from sklearn.metrics import mean_squared_error
from sklearn import metrics
import matplotlib.pyplot as plt
def exclude_days(train, test):
row, column = train.shape
train_days = np.asarray(train[:, -1]).reshape(-1, 1)
x_train = train[:, 0:column - 1]
test_days = np.asarray(test[:, -1]).reshape(-1, 1)
x_test = test[:, 0:column - 1]
return x_train, x_test, train_days, test_days
def merge_data(occurrence_data, daily_occurrence_normalized_matrix, aggregation_of_previous_days_allowed):
if(aggregation_of_previous_days_allowed):
if(occurrence_data.size==0):
occurrence_data = daily_occurrence_normalized_matrix
else:
occurrence_data = np.add(occurrence_data, daily_occurrence_normalized_matrix)
else:
if(occurrence_data.size == 0):
occurrence_data = daily_occurrence_normalized_matrix
else:
occurrence_data = np.concatenate((occurrence_data, daily_occurrence_normalized_matrix), axis=0)
#print("merge_data shape: {} occurrence_data: {} ".format(occurrence_data.shape, occurrence_data))
return occurrence_data
def get_normalized_matrix_from_file(day, year, totaltx):
daily_occurrence_matrix_path_name = DAILY_OCCURRENCE_FILE_PATH + "occ" + str(year) + '{:03}'.format(day) + '.csv'
daily_occurence_matrix = pd.read_csv(daily_occurrence_matrix_path_name, sep=",", header=None).values
return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx
def fl_get_normalized_matrix_from_file(day, year, totaltx, n_components):
daily_occurence_matrix = np.asarray([],dtype=np.float32)
for filter_number in range(0, 50, 10):
daily_occurrence_matrix_path_name = DAILY_FILTERED_OCCURRENCE_FILE_PATH + "occ" + str(year) + '{:03}'.format(day) + "_" + str(filter_number) +'.csv'
daily_occurence_matrix_read = pd.read_csv(daily_occurrence_matrix_path_name, sep=",", header=None).values
if(daily_occurence_matrix.size == 0):
daily_occurence_matrix = daily_occurence_matrix_read
else:
daily_occurence_matrix = np.concatenate((daily_occurence_matrix, daily_occurence_matrix_read), axis = 1)
pca = PCA(n_components = 20)
pca.fit(daily_occurence_matrix)
daily_occurence_matrix = pca.transform(daily_occurence_matrix)
#print("daily_occurence_matrix: ", daily_occurence_matrix, daily_occurence_matrix.shape)
#return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx
return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)
def get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
#print("priced_bitcoin: ", priced_bitcoin, priced_bitcoin.shape)
#print("current_row: ", current_row, current_row.shape)
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
#print("previous_price_data: ", previous_price_data,row['day'], row['year'], row['totaltx'])
#print("occurrence_data: ", occurrence_data)
if(is_price_of_previous_days_allowed):
#print("previous_price_data: ", np.asarray(previous_price_data).reshape(1, -1), np.asarray(previous_price_data).reshape(1, -1).shape)
occurrence_data = np.asarray(previous_price_data).reshape(1, -1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
#print("current_row: ", current_row, current_row.shape)
#print(" price occurrence_input: ", np.asarray(current_row['price']).reshape(1,1), (np.asarray(current_row['price']).reshape(1,1)).shape)
#print("concatenate with price occurrence_input: ", occurrence_input, occurrence_input.shape)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
#print(" price occurrence_input: ", np.asarray(current_row['day']).reshape(1,1), (np.asarray(current_row['day']).reshape(1,1)).shape)
#print("concatenate with day occurrence_input: ", occurrence_input, occurrence_input.shape)
return occurrence_input
def betti_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.array([], dtype=np.float32)
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
betti0_50 = read_betti(betti0_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti0_50).reshape(1,-1))
betti1_50 = read_betti(betti1_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti1_50).reshape(1,-1))
if occurrence_data.size == 0:
occurrence_data = previous_price_data
else:
occurrence_data = np.row_stack((occurrence_data,previous_price_data))
#print(occurrence_data, occurrence_data.shape)
#print(previous_price_data, previous_price_data.shape)
occurrence_data = np.asarray(occurrence_data).reshape(1, -1)
#betti0_50 = read_betti(betti0_input_path, current_row['day'])
#occurrence_input = np.concatenate((occurrence_data, np.asarray(betti0_50).reshape(1,-1)), axis=1)
#betti1_50 = read_betti(betti1_input_path, current_row['day'])
#occurrence_input = np.concatenate((occurrence_input, np.asarray(betti1_50).reshape(1,-1)), axis=1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
return occurrence_input
def betti_der_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
#print("priced_bitcoin: ", priced_bitcoin, priced_bitcoin.shape)
#print("current_row: ", current_row, current_row.shape)
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
betti0_50 = read_betti(betti0_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti0_50).reshape(1,-1))
betti1_50 = read_betti(betti1_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti1_50).reshape(1,-1))
betti0_50_diff1 = betti0_50.diff(1).dropna()
previous_price_data = np.concatenate((previous_price_data.reshape(1,-1), np.asarray(betti0_50_diff1).reshape(1,-1)), axis=1)
betti1_50_diff1 = betti1_50.diff(1).dropna()
previous_price_data = np.concatenate((previous_price_data, np.asarray(betti1_50_diff1).reshape(1,-1)), axis=1)
if occurrence_data.size == 0:
occurrence_data = previous_price_data
else:
occurrence_data = np.concatenate((occurrence_data, previous_price_data.reshape(1,-1)), axis=1)
#print(occurrence_data, occurrence_data.shape)
#print("previous_price_data: ", previous_price_data,row['day'], row['year'], row['totaltx'])
occurrence_data = np.asarray(occurrence_data).reshape(1, -1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
return occurrence_input
def fl_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
daily_occurrence_normalized_matrix = fl_get_normalized_matrix_from_file(row['day'], row['year'], row['totaltx'], 20)
occurrence_data = merge_data(occurrence_data, daily_occurrence_normalized_matrix, aggregation_of_previous_days_allowed)
#print("occurrence_data: ",occurrence_data, occurrence_data.shape)
if(is_price_of_previous_days_allowed):
occurrence_data = np.concatenate((occurrence_data.reshape(1,-1), np.asarray(previous_price_data).reshape(1,-1)), axis=1)
occurrence_input = np.concatenate((occurrence_data.reshape(1,-1), np.asarray(current_row['price']).reshape(1,1)), axis=1)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
#print("occurrence_input: ",occurrence_input, occurrence_input.shape)
return occurrence_input
def read_betti(file_path, day):
day = day - 1
betti = pd.read_csv(file_path, index_col=0)
try:
betti_50 = betti.iloc[day, 0:50]
except:
print("day:", day)
return betti_50
def rf_base_rmse_mode(train_input, train_target, test_input, test_target):
rf_regression = RandomForestRegressor(max_depth=2, random_state=0)
rf_regression.fit(train_input, train_target.ravel() )
predicted = rf_regression.predict(test_input)
rf_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, predicted))
return rf_base_rmse
def gp_base_rmse_mode(train_input, train_target, test_input, test_target):
param = {
'kernel': RationalQuadratic(alpha=0.01, length_scale=1),
'n_restarts_optimizer': 2
}
adj_params = {'kernel': [RationalQuadratic(alpha=0.01,length_scale=1)],
'n_restarts_optimizer': [2]}
gpr = GaussianProcessRegressor(**param)
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
cscv = GridSearchCV(gpr, adj_params, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
cscv.fit(train_input,train_target)
#print("cv_results_:",cscv.cv_results_)
print("best_params_: ",cscv.best_params_)
gpr = GaussianProcessRegressor(**cscv.best_params_)
gpr.fit(train_input, train_target)
mu, cov = gpr.predict(test_input, return_cov=True)
test_y = mu.ravel()
#uncertainty = 1.96 * np.sqrt(np.diag(cov))
gp_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, test_y))
print(gp_base_rmse)
return gp_base_rmse
def enet_base_rmse_mode(train_input, train_target, test_input, test_target):
param = {
'alpha': 10,
'l1_ratio': 1,
}
elastic = linear_model.ElasticNet(**param)
adj_params = {'alpha': [10],
'l1_ratio': [ 1]}
#'max_iter': [100000]}
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
cscv = GridSearchCV(elastic, adj_params, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
cscv.fit(train_input, train_target)
print("best_params_: ",cscv.best_params_)
elastic= linear_model.ElasticNet(**cscv.best_params_)
elastic.fit(train_input,train_target.ravel())
predicted = elastic.predict(test_input)
enet_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, predicted))
print("enet_base_rmse: ", enet_base_rmse)
#print ("RMSE:", np.sqrt(metrics.mean_squared_error(test_target, predicted)))
return enet_base_rmse
def xgbt_base_rmse_mode(train_input, train_target, test_input, test_target):
param = {
'n_estimators':1000,
'learning_rate': 0.01,
}
adj_params = {
'n_estimators':[1000],
'learning_rate': [0.01]
}
xgbt = XGBRegressor(**param)
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
cscv = GridSearchCV(xgbt, adj_params, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
cscv.fit(train_input, train_target)
print("best_params_: ", cscv.best_params_)
xgbt= XGBRegressor(**cscv.best_params_)
xgbt.fit(train_input,train_target.ravel())
predicted = xgbt.predict(test_input)
xgbt_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, predicted))
print("xgbt_base_rmse: ", xgbt_base_rmse)
#print ("RMSE:", np.sqrt(metrics.mean_squared_error(test_target, predicted)))
return xgbt_base_rmse
def arimax_initialize_setting(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
data = preprocess_data(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
train = data[0:100, :]
test = data[100:100+prediction_horizon, :]
x_train, x_test, train_days, test_days = exclude_days(train, test)
row, column = x_train.shape
train_target = np.asarray(x_train[:, -1]).reshape(-1)
train_input = x_train[:, 0:column - 1]
test_target = x_test[: , -1]
test_input = x_test[ : , 0:column - 1]
return train_input, train_target, test_input, test_target, train_days, test_days
def arimax_base_rmse_mode(train_input, train_target, test_input, test_target):
train_input_diff_arr = np.array([])
train_columns_name = []
train_input_column = int(train_input.shape[1])
for i in range(train_input_column):
if(i%2==0):
train_columns_name.append('price_' + str(i))
else:
train_columns_name.append('totaltx_' + str(i))
train_input_diff = np.diff(train_input[:,i] )
if i == 0:
train_input_diff_arr = train_input_diff
else:
train_input_diff_arr = np.dstack((train_input_diff_arr, train_input_diff))
columns_name = copy.deepcopy(train_columns_name)
columns_name.append('current_price')
train_target_diff = np.diff(train_target )
train_input_diff_arr = np.dstack((train_input_diff_arr, train_target_diff))
train_input_diff_arr = pd.DataFrame(train_input_diff_arr[0], columns = columns_name)
model = pf.ARIMAX(data=train_input_diff_arr,formula="current_price~totaltx_5",ar=1,ma=2,integ=0)
model_1 = model.fit("MLE")
model_1.summary()
test_input_pd = pd.DataFrame(test_input, columns = train_columns_name)
test_target_pd = pd.DataFrame(test_target, columns = ['current_price'])
test_input_target = pd.concat([test_input_pd, test_target_pd], axis=1)
pred = model.predict(h=test_input_target.shape[0],
oos_data=test_input_target,
intervals=True, )
arimax_base_rmse = mean_squared_error([test_input_target.iloc[0, 6]],[(train_target[99])+pred.current_price[99]])
print("arimax_base_rmse:",arimax_base_rmse)
return arimax_base_rmse
def run_print_model(train_input, train_target, test_input, test_target, train_days, test_days):
rf_base_rmse = rf_base_rmse_mode(train_input, train_target, test_input, test_target)
xgbt_base_rmse = xgbt_base_rmse_mode(train_input, train_target, test_input, test_target)
gp_base_rmse = gp_base_rmse_mode(train_input, train_target, test_input, test_target)
enet_base_rmse = enet_base_rmse_mode(train_input, train_target, test_input, test_target)
return rf_base_rmse, xgbt_base_rmse, gp_base_rmse, enet_base_rmse
#print_results(predicted, test_target, original_log_return, predicted_log_return, cost, test_days, rmse)
#return rf_base_rmse
def preprocess_data(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
priced_bitcoin = pd.read_csv(PRICED_BITCOIN_FILE_PATH, sep=",")
if(ALL_YEAR_INPUT_ALLOWED):
pass
else:
priced_bitcoin = priced_bitcoin[priced_bitcoin['year']==YEAR].reset_index(drop=True)
# get normalized occurence matrix in a flat format and merge with totaltx
daily_occurrence_input = np.array([],dtype=np.float32)
temp = np.array([], dtype=np.float32)
for current_index, current_row in priced_bitcoin.iterrows():
if(current_index<(window_size+prediction_horizon-1)):
pass
else:
start_index = current_index - (window_size + prediction_horizon) + 1
end_index = current_index - prediction_horizon
if(dataset_model=="base"):
temp = get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
elif(dataset_model=="betti"):
temp = betti_get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
elif(dataset_model=="fl"):
temp = fl_get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
elif(dataset_model=="betti_der"):
temp = betti_der_get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
else:
sys.exit("Dataset model support only baseline, betti, fl and betti_der!")
if(daily_occurrence_input.size == 0):
daily_occurrence_input = temp
else:
daily_occurrence_input = np.concatenate((daily_occurrence_input, temp), axis=0)
return daily_occurrence_input
def initialize_setting(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
data = preprocess_data(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
train = data[0:100, :]
test = data[100, :].reshape(1, -1)
x_train, x_test, train_days, test_days = exclude_days(train, test)
#print("x_train:", x_train)
row, column = x_train.shape
train_target = np.asarray(x_train[:, -1]).reshape(-1)
train_input = x_train[:, 0:column - 1]
#x_test = x_test.reshape(-1,1)
test_target = x_test[: , -1]
test_input = x_test[ : , 0:column - 1]
return train_input, train_target, test_input, test_target, train_days, test_days
parameter_dict = {#0: dict({'is_price_of_previous_days_allowed':True, 'aggregation_of_previous_days_allowed':True})}
1: dict({'is_price_of_previous_days_allowed':True, 'aggregation_of_previous_days_allowed':False})}
for step in parameter_dict:
names = locals()
gc.collect()
evalParameter = parameter_dict.get(step)
is_price_of_previous_days_allowed = evalParameter.get('is_price_of_previous_days_allowed')
aggregation_of_previous_days_allowed = evalParameter.get('aggregation_of_previous_days_allowed')
print("IS_PRICE_OF_PREVIOUS_DAYS_ALLOWED: ", is_price_of_previous_days_allowed)
print("AGGREGATION_OF_PREVIOUS_DAYS_ALLOWED: ", aggregation_of_previous_days_allowed)
window_size_array = [3, 5, 7]
horizon_size_array = [1, 2, 5, 7, 10, 15, 20, 25, 30]
dataset_model_array = ["base", "betti", "fl","betti_der"]
for dataset_model in dataset_model_array:
print('dataset_model: ', dataset_model)
for window_size in window_size_array:
print('WINDOW_SIZE: ', window_size)
for prediction_horizon in horizon_size_array:
print("PREDICTION_HORIZON: ", prediction_horizon)
train_input, train_target, test_input, test_target, train_days, test_days = initialize_setting(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
rf_base_rmse, xgbt_base_rmse, gp_base_rmse, enet_base_rmse = run_print_model(train_input, train_target, test_input, test_target, train_days, test_days)
rmse = pd.DataFrame({'rf_' + dataset_model + '_rmse_'+str(window_size): [rf_base_rmse], 'xgbt_' + dataset_model + '_rmse_'+str(window_size): [xgbt_base_rmse], 'gp_' + dataset_model + '_rmse_'+str(window_size): [gp_base_rmse], 'enet_' + dataset_model + '_rmse_'+str(window_size): [enet_base_rmse]})
if(prediction_horizon==1):
rmse_total = rmse
else:
rmse_total = [rmse_total, rmse]
rmse_total = pd.concat(rmse_total)
if(window_size==3):
names['rmse_' + dataset_model + '_total'] = rmse_total
else:
names['rmse_' + dataset_model + '_total'] = pd.concat([names.get('rmse_' + dataset_model + '_total') , rmse_total], axis=1)
names['rmse_' + dataset_model + '_total'].index = pd.Series(horizon_size_array)
print('rmse_{}_total = {}'.format(dataset_model, names.get('rmse_' + dataset_model + '_total')))
t = datetime.datetime.now()
dir_name = t.strftime('%m_%d___%H_%M')
if not os.path.exists(dir_name):
os.makedirs(dir_name)
betti_gain = 100 * (1 -rmse_betti_total.div(rmse_base_total.values))
fl_gain = 100 * (1 -rmse_fl_total.div(rmse_base_total.values))
betti_der_gain = 100 * (1 -rmse_betti_der_total.div(rmse_base_total.values))
for i in range(12):
path = dir_name + "/"
perf = | pd.concat([betti_gain.iloc[:,i],betti_der_gain.iloc[:,i], fl_gain.iloc[:, i]], axis=1) | pandas.concat |
import itertools
import numba as nb
import numpy as np
import pandas as pd
import pytest
from sid.contacts import _consolidate_reason_of_infection
from sid.contacts import _numpy_replace
from sid.contacts import calculate_infections_by_contacts
from sid.contacts import create_group_indexer
@pytest.mark.unit
@pytest.mark.parametrize(
"states, group_code_name, expected",
[
(
pd.DataFrame({"a": [1] * 7 + [0] * 8}),
"a",
[list(range(7, 15)), list(range(7))],
),
(
pd.DataFrame({"a": pd.Series([0, 1, 2, 3, 0, 1, 2, 3]).astype("category")}),
"a",
[[0, 4], [1, 5], [2, 6], [3, 7]],
),
(
pd.DataFrame({"a": pd.Series([0, 1, 2, 3, 0, 1, 2, -1])}),
"a",
[[0, 4], [1, 5], [2, 6], [3]],
),
],
)
def test_create_group_indexer(states, group_code_name, expected):
result = create_group_indexer(states, group_code_name)
result = [r.tolist() for r in result]
assert result == expected
@pytest.fixture()
def households_w_one_infected():
states = pd.DataFrame(
{
"infectious": [True] + [False] * 7,
"cd_infectious_true": [-1] * 8,
"immunity": [1.0] + [0.0] * 7,
"group_codes_households": [0] * 4 + [1] * 4,
"households": [0] * 4 + [1] * 4,
"group_codes_non_rec": [0] * 4 + [1] * 4,
"n_has_infected": 0,
"virus_strain": pd.Series(["base_strain"] + [pd.NA] * 7, dtype="category"),
}
)
params = pd.DataFrame(
columns=["value"],
data=1,
index=pd.MultiIndex.from_tuples(
[("infection_prob", "households", "households")]
),
)
indexers = {"recurrent": nb.typed.List()}
indexers["recurrent"].append(create_group_indexer(states, ["households"]))
assortative_matching_cum_probs = nb.typed.List()
assortative_matching_cum_probs.append(np.zeros((0, 0)))
group_codes_info = {"households": {"name": "group_codes_households"}}
virus_strains = {
"names": ["base_strain"],
"contagiousness_factor": np.ones(1),
"immunity_resistance_factor": np.zeros(1),
}
return {
"states": states,
"recurrent_contacts": np.ones((len(states), 1), dtype=bool),
"random_contacts": None,
"params": params,
"indexers": indexers,
"assortative_matching_cum_probs": assortative_matching_cum_probs,
"group_codes_info": group_codes_info,
"susceptibility_factor": np.ones(len(states)),
"virus_strains": virus_strains,
"seasonality_factor": | pd.Series([1], index=["households"]) | pandas.Series |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)
if inferred_type == "boolean":
dtype = "bool"
elif inferred_type == "integer":
dtype = "int64"
elif inferred_type == "datetime64":
dtype = "datetime64[ns]"
elif inferred_type == "timedelta64":
dtype = "timedelta64[ns]"
# try to upcast here
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
do_round = True
else:
dtype = "object"
dtype = np.dtype(dtype)
elif dtype.type is Period:
from pandas.core.arrays import PeriodArray
with suppress(TypeError):
# e.g. TypeError: int() argument must be a string, a
# bytes-like object or a number, not 'Period
return PeriodArray(result, freq=dtype.freq)
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
return converted
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
if hasattr(dtype, "tz"):
# not a numpy dtype
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize("utc")
result = result.tz_convert(dtype.tz)
else:
result = result.astype(dtype)
return result
def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):
"""
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
Parameters
----------
result : ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
do_round : bool
Returns
-------
ndarray or ExtensionArray
"""
if not isinstance(dtype, np.dtype):
# e.g. SparseDtype has no itemsize attr
return result
if isinstance(result, list):
# reached via groupby.agg._ohlc; really this should be handled earlier
result = np.array(result)
def trans(x):
if do_round:
return x.round()
return x
if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
if result.dtype.itemsize <= dtype.itemsize and result.size:
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
if not result.size:
# if we don't have any elements, just astype it
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if isna(arr).any():
# if we have any nulls, then we are done
return result
elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
return result
if (
issubclass(result.dtype.type, (np.object_, np.number))
and notna(result).all()
):
new_result = trans(result).astype(dtype)
if new_result.dtype.kind == "O" or result.dtype.kind == "O":
# np.allclose may raise TypeError on object-dtype
if (new_result == result).all():
return new_result
else:
if np.allclose(new_result, result, rtol=0):
return new_result
elif (
issubclass(dtype.type, np.floating)
and not is_bool_dtype(result.dtype)
and not is_string_dtype(result.dtype)
):
return result.astype(dtype)
return result
def maybe_cast_result(
result: ArrayLike, obj: "Series", numeric_only: bool = False, how: str = ""
) -> ArrayLike:
"""
Try casting result to a different type if appropriate
Parameters
----------
result : array-like
Result to cast.
obj : Series
Input Series from which result was calculated.
numeric_only : bool, default False
Whether to cast only numerics or datetimes as well.
how : str, default ""
How the result was computed.
Returns
-------
result : array-like
result maybe casted to the dtype.
"""
dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
assert not is_scalar(result)
if (
is_extension_array_dtype(dtype)
and not is_categorical_dtype(dtype)
and dtype.kind != "M"
):
# We have to special case categorical so as not to upcast
# things like counts back to categorical
cls = dtype.construct_array_type()
result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
return dtype
def maybe_cast_to_extension_array(
cls: Type["ExtensionArray"], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None
) -> ArrayLike:
"""
Call to `_from_sequence` that returns the object unchanged on Exception.
Parameters
----------
cls : class, subclass of ExtensionArray
obj : arraylike
Values to pass to cls._from_sequence
dtype : ExtensionDtype, optional
Returns
-------
ExtensionArray or obj
"""
from pandas.core.arrays.string_ import StringArray
from pandas.core.arrays.string_arrow import ArrowStringArray
assert isinstance(cls, type), f"must pass a type: {cls}"
assertion_msg = f"must pass a subclass of ExtensionArray: {cls}"
assert issubclass(cls, ABCExtensionArray), assertion_msg
# Everything can be converted to StringArrays, but we may not want to convert
if (
issubclass(cls, (StringArray, ArrowStringArray))
and | lib.infer_dtype(obj) | pandas._libs.lib.infer_dtype |
import pandas as pd
import numpy as np
def fetch_total_population_df():
train_df = pd.read_csv('training_data.csv', encoding="ISO-8859-1")
matrix = train_df.groupby(['pickup_grid_number','dropoff_grid_number']).size().unstack().fillna(0)
matrixByHours = train_df.groupby(['hour','pickup_grid_number','dropoff_grid_number']).size().unstack().fillna(0)
pickup_matrix = train_df.groupby(['hour', 'pickup_grid_number']).size().unstack().fillna(0)
dropoff_matrix = train_df.groupby(['hour', 'dropoff_grid_number']).size().unstack().fillna(0)
pickup_matrix.fillna(0, inplace=True)
dropoff_matrix.fillna(0, inplace=True)
change_obj = {}
total_population_obj = {}
for i in range(1, 2500):
try:
if i in dropoff_matrix[:].columns and i in pickup_matrix[:].columns:
change_obj[str(i)] = dropoff_matrix[:][i] - pickup_matrix[:][i]
try:
total_population_obj[str(i)] = total_population_obj[str(i-1)] + change_obj[str(i)]
except KeyError:
total_population_obj[str(i)] = change_obj[str(i)]
pass
elif i in dropoff_matrix[:].columns:
change_obj[str(i)] = dropoff_matrix[:][i]
try:
total_population_obj[str(i)] = total_population_obj[str(i-1)] + change_obj[str(i)]
except KeyError:
total_population_obj[str(i)] = change_obj[str(i)]
pass
elif i in pickup_matrix[:].columns:
change_obj[str(i)] = -pickup_matrix[:][i]
try:
total_population_obj[str(i)] = total_population_obj[str(i-1)] + change_obj[str(i)]
except KeyError:
total_population_obj[str(i)] = change_obj[str(i)]
pass
except KeyError:
pass
#net_change_df is a 24x1010 (hour by zone) matrix containing the net change in a given zone
#a positive value implies more people entering the zone then leaving
#a negative value implies more people leaving the zone then entering
net_change_df = | pd.DataFrame(change_obj) | pandas.DataFrame |
import numpy as np
import pandas as pd
import lib.utils as utils
import lib.aux_utils as aux
from collections import defaultdict
def transform_vacineja(df):
'''
Description.
Args:
return_:
Bool.
colnames:
List of strings or String.
nrows:
Integer or None.
cur_date:
datetime.date.
Return:
self.vacineja_df:
pandas.DataFrame. If return_=True.
'''
date_fmt = "%Y-%m-%d"
date_cols = ["data_nascimento", "created_at"]
for j in date_cols:
df[j] = pd.to_datetime(df[j], format=date_fmt, errors="coerce")
# More specific filters and transformations
df = df.dropna(subset=["cpf", "data_nascimento"], axis=0)
# Include only the individuals with male and female sex (to avoid the problem of before)
df = df[df["sexo"].isin(["M", "F"])]
df["cpf"] = df["cpf"].apply(lambda x:f"{x:11.0f}".replace(" ","0") if type(x)!=str else np.nan)
df = df.drop_duplicates(subset=["cpf"], keep="first")
df['cns'] = df['cns'].apply(lambda x: f'{x}')
#df = df[df["cidade"]=="FORTALEZA"]
#df = df.drop("cidade", axis=1)
# --> Create custom primary keys to complement the linkage by 'cpf' with the other databases
df["nome tratado"] = df["nome"].apply(lambda x: utils.replace_string(x))
df["nome mae tratado"] = df["nome_mae"].apply(lambda x: utils.replace_string(x) if not pd.isna(x) else np.nan)
df["nome hashcode"] = df["nome"].apply(lambda x: utils.replace_string_hash(x) if not pd.isna(x) else np.nan)
df["nome mae hashcode"] = df["nome_mae"].apply(lambda x: utils.replace_string_hash(x) if not pd.isna(x) else np.nan)
# ----> CUSTOM PRIMARY KEYS
df["NOMENASCIMENTOCHAVE"] = df["nome tratado"].astype(str)+df["data_nascimento"].astype(str)
df["NOMENOMEMAECHAVE"] = df["nome tratado"].astype(str)+df["nome mae tratado"].astype(str)
df["NOMEMAENASCIMENTOCHAVE"] = df["nome mae tratado"].astype(str) + df["data_nascimento"].astype(str)
df["NOMEHASHNASCIMENTOCHAVE"] = df["nome hashcode"].astype(str) + df["data_nascimento"].astype(str)
df["NOMEMAEHASHNASCIMENTOCHAVE"] = df["nome mae hashcode"].astype(str) + df["data_nascimento"].astype(str)
df = df.drop(["nome tratado", "nome mae tratado", "nome hashcode", "nome mae hashcode"], axis=1)
return df
def transform_vacinados(df):
'''
'''
date_fmt = "%Y-%m-%d"
date_cols = ["data D1", "data D2", "data D3", "data D4", "data nascimento"]
for j in date_cols:
df[j] = pd.to_datetime(df[j], format=date_fmt, errors="coerce")
# Apply filters
# Remove records with the fields "cpf_usuario" and "data_nascimento" missing.
df = df.dropna(subset=["cpf","data nascimento","sexo","vacina"], how="any", axis=0)
# Format the field of "grupo prioritario" to reduce the rows with compound information.
df["grupo prioritario"] = df["grupo prioritario"].apply(lambda x: x.split("-")[0] if not pd.isna(x) else x)
# Process the CPF field
df["cpf"] = df["cpf"].astype(float, errors="ignore")
df["cpf"] = df["cpf"].apply(lambda x:f"{x:11.0f}".replace(" ","0") if type(x)!=str else np.nan)
df = df.drop_duplicates(subset=["cpf"], keep="first")
# Process the names of each person
# Find all persons having an inconsistent set of vaccination dates.
subset = ["data D1", "data D2"]
df["data aplicacao consistente"] = df[subset].apply(lambda x: aux.f_d1d2(x["data D1"], x["data D2"]), axis=1)
# --> Create custom primary keys to complement the linkage by 'cpf' with the other databases
df["nome tratado"] = df["nome"].apply(lambda x: utils.replace_string(x))
df["nome mae tratado"] = df["nome mae"].apply(lambda x: utils.replace_string(x) if not pd.isna(x) else np.nan)
df["nome hashcode"] = df["nome"].apply(lambda x: utils.replace_string_hash(x) if not pd.isna(x) else np.nan)
df["nome mae hashcode"] = df["nome mae"].apply(lambda x: utils.replace_string_hash(x) if not pd.isna(x) else np.nan)
df = df.add_suffix("(VACINADOS)")
# ----> CUSTOM PRIMARY KEYS
df["NOMENASCIMENTOCHAVE"] = df["nome tratado(VACINADOS)"].astype(str)+df["data nascimento(VACINADOS)"].astype(str)
df["NOMENOMEMAECHAVE"] = df["nome tratado(VACINADOS)"].astype(str)+df["nome mae tratado(VACINADOS)"].astype(str)
df["NOMEMAENASCIMENTOCHAVE"] = df["nome mae tratado(VACINADOS)"].astype(str) + df["data nascimento(VACINADOS)"].astype(str)
df["NOMEHASHNASCIMENTOCHAVE"] = df["nome hashcode(VACINADOS)"].astype(str) + df["data nascimento(VACINADOS)"].astype(str)
df["NOMEMAEHASHNASCIMENTOCHAVE"] = df["nome mae hashcode(VACINADOS)"].astype(str) + df["data nascimento(VACINADOS)"].astype(str)
df = df.drop(["nome tratado(VACINADOS)", "nome mae tratado(VACINADOS)", "nome hashcode(VACINADOS)", "nome mae hashcode(VACINADOS)"], axis=1)
return df
def transform_integrasus(df, init_cohort):
'''
'''
df["cpf"] = df["cpf"].apply(lambda x: f"{x:11.0f}".replace(" ", "0") if not pd.isna(x) else np.nan)
df['nome_mae'] = df['nome_mae'].apply(lambda x: x if len(x)>0 and not pd.isna(x) else np.nan)
df["nome tratado"] = df["nome_paciente"].apply(lambda x: utils.replace_string(x) if not pd.isna(x) else np.nan)
df["nome hashcode"] = df["nome_paciente"].apply(lambda x: utils.replace_string_hash(x) if not pd.isna(x) else np.nan)
df["nome mae tratado"] = df["nome_mae"].apply(lambda x: utils.replace_string(x) if not pd.isna(x) else np.nan)
df["nome mae hashcode"] = df["nome_mae"].apply(lambda x: utils.replace_string_hash(x) if not pd.isna(x) else np.nan)
df["cns"] = df["cns"].apply(lambda x: x if len(x)>0 or not pd.isna(x) else "INDETERMINADO")
# Transform date fields
df["data_nascimento"] = pd.to_datetime(df["data_nascimento"], errors="coerce")
df["data_coleta_exame"] = pd.to_datetime(df["data_coleta_exame"], errors="coerce")
df["data_inicio_sintomas_nova"] = pd.to_datetime(df["data_inicio_sintomas_nova"], errors="coerce")
df["data_internacao_sivep"] = pd.to_datetime(df["data_internacao_sivep"], errors="coerce")
df["data_entrada_uti_sivep"] = | pd.to_datetime(df["data_entrada_uti_sivep"], format="%Y/%m/%d", errors="coerce") | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 13:16:25 2018
@author: nooteboom
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 1 16:41:17 2018
@author: nooteboom
"""
import numpy as np
import matplotlib.pylab as plt
import matplotlib
import seaborn as sns
import pandas as pd
from matplotlib.font_manager import FontProperties
#%%
font = {'family' : 'Helvetica',
#'weight' : 'bold',
'size' : 60}
matplotlib.rc('font', **font)
ddeg = 2 # resolution of the binning
spl = [3, 6, 's1','s2', 11, 25, 50, 100, 200, 500]
dd = 10
res = 1
#Set for Pdensity plots:
scale = 'exponential'
k_depth = 'proportion'
outlier_prop = 0.5
whis = [5, 95]
meanpointprops = dict(marker='D', markeredgecolor='black',
markerfacecolor='firebrick', markersize=7)
#%% boxplot of different sinking velocities
toplot = np.load('process/toplot_TM-boxplot.npz')
d = {'$10^6\ km^2$':toplot['areas'],
'sinking speed (m day$^{-1}$)':toplot['sinkingspeed'],
' ':toplot['locations'] }
df = | pd.DataFrame(data=d) | pandas.DataFrame |
import warnings
warnings.filterwarnings("ignore")
import os
import json
import argparse
import time
import datetime
import json
import pickle
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from scipy.stats import spearmanr, mannwhitneyu
import scipy.cluster.hierarchy as shc
from skbio.stats.composition import clr
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from scipy.cluster.hierarchy import cut_tree
from src.models.MiMeNet import MiMeNet, tune_MiMeNet
###################################################
# Read in command line arguments
###################################################
parser = argparse.ArgumentParser(description='Perform MiMeNet')
parser.add_argument('-micro', '--micro', help='Comma delimited file representing matrix of samples by microbial features', required=True)
parser.add_argument('-metab', '--metab', help= 'Comma delimited file representing matrix of samples by metabolomic features', required=True)
parser.add_argument('-external_micro', '--external_micro', help='Comma delimited file representing matrix of samples by microbial features')
parser.add_argument('-external_metab', '--external_metab', help= 'Comma delimited file representing matrix of samples by metabolomic features')
parser.add_argument('-annotation', '--annotation', help='Comma delimited file annotating subset of metabolite features')
parser.add_argument('-labels', '--labels', help="Comma delimited file for sample labels to associate clusters with")
parser.add_argument('-output', '--output', help='Output directory', required=True)
parser.add_argument('-net_params', '--net_params', help='JSON file of network hyperparameters', default=None)
parser.add_argument('-background', '--background', help='Directory with previously generated background', default=None)
parser.add_argument('-num_background', '--num_background', help='Number of background CV Iterations', default=100, type=int)
parser.add_argument('-micro_norm', '--micro_norm', help='Microbiome normalization (RA, CLR, or None)', default='CLR')
parser.add_argument('-metab_norm', '--metab_norm', help='Metabolome normalization (RA, CLR, or None)', default='CLR')
parser.add_argument('-threshold', '--threshold', help='Define significant correlation threshold', default=None)
parser.add_argument('-num_run_cv', '--num_run_cv', help='Number of iterations for cross-validation', default=1, type=int)
parser.add_argument('-num_cv', '--num_cv', help='Number of cross-validated folds', default=10, type=int)
parser.add_argument('-num_run', '--num_run', help='Number of iterations for training full model', type=int, default=10)
args = parser.parse_args()
micro = args.micro
metab = args.metab
external_micro = args.external_micro
external_metab = args.external_metab
annotation = args.annotation
out = args.output
net_params = args.net_params
threshold = args.threshold
micro_norm = args.micro_norm
metab_norm = args.metab_norm
num_run_cv = args.num_run_cv
num_cv = args.num_cv
num_run = args.num_run
background_dir = args.background
labels = args.labels
num_bg = args.num_background
tuned = False
gen_background = True
if background_dir != None:
gen_background = False
start_time = time.time()
if external_metab != None and external_micro == None:
print("Warning: External metabolites found with no external microbiome...ignoring external set!")
external_metab = None
if net_params != None:
print("Loading network parameters...")
try:
with open(net_params, "r") as infile:
params = json.load(infile)
num_layer = params["num_layer"]
layer_nodes = params["layer_nodes"]
l1 = params["l1"]
l2 = params["l2"]
dropout = params["dropout"]
learning_rate = params["lr"]
tuned = True
print("Loaded network parameters...")
except:
print("Warning: Could not load network parameter file!")
###################################################
# Load Data
###################################################
metab_df = pd.read_csv(metab, index_col=0)
micro_df = pd.read_csv(micro, index_col=0)
if external_metab != None:
external_metab_df = pd.read_csv(external_metab, index_col=0)
if external_micro != None:
external_micro_df = | pd.read_csv(external_micro, index_col=0) | pandas.read_csv |
import operator
from shutil import get_terminal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable
from pandas._typing import ArrayLike, Dtype, Ordered, Scalar
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
doc,
)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
maybe_cast_to_extension_array,
maybe_infer_to_datetimelike,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import _get_data_algo, factorize, take, take_1d, unique1d
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
from pandas.core.indexers import check_array_indexer, deprecate_ndim_indexing
from pandas.core.missing import interpolate_2d
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.io.formats import console
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@unpack_zerodim_and_defer(opname)
def func(self, other):
if is_list_like(other) and len(other) != len(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif self.ordered and not (self.categories == other.categories).all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
f = getattr(self._codes, opname)
ret = f(other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.any():
# In other series, the leads to False, so do that here too
if opname == "__ne__":
ret[(self._codes == -1) & (other_codes == -1)] = True
else:
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
ret = getattr(self._codes, opname)(i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# check for NaN needed if we are not equal or larger
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.zeros(len(self), dtype=bool)
elif opname == "__ne__":
return np.ones(len(self), dtype=bool)
else:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with a "
"scalar, which is not a category."
)
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return getattr(np.array(self), opname)(np.array(other))
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
func.__name__ = opname
return func
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
class Categorical(ExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = PandasObject._deprecations | frozenset(["tolist"])
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
sanitize_dtype = np.dtype("O") if len(values) == 0 else None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
) from err
except ValueError as err:
# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
) from err
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
old_codes = (
values._values.codes if isinstance(values, ABCSeries) else values.codes
)
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""
The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype
@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered
@property
def dtype(self) -> CategoricalDtype:
"""
The :class:`~pandas.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None
def copy(self) -> "Categorical":
"""
Copy constructor.
"""
return self._constructor(
values=self._codes.copy(), dtype=self.dtype, fastpath=True
)
def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
"""
if is_categorical_dtype(dtype):
dtype = cast(Union[str, CategoricalDtype], dtype)
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
if is_extension_array_dtype(dtype):
return array(self, dtype=dtype, copy=copy) # type: ignore # GH 28770
if is_integer_dtype(dtype) and self.isna().any():
raise ValueError("Cannot convert float NaN to integer")
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def size(self) -> int:
"""
Return the len of myself.
"""
return self._codes.size
@cache_readonly
def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
def tolist(self) -> List[Scalar]:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
to_list = tolist
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (
isinstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
.. versionadded:: 0.24.0
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.
Returns
-------
Categorical
Examples
--------
>>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
[a, b, a, b]
Categories (2, object): [a < b]
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if is_extension_array_dtype(codes) and is_integer_dtype(codes):
# Avoid the implicit conversion of Int to object
if isna(codes).any():
raise ValueError("codes cannot contain NA values")
codes = codes.to_numpy(dtype=np.int64)
else:
codes = np.asarray(codes)
if len(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
@property
def codes(self) -> np.ndarray:
"""
The category codes of this categorical.
Codes are an array of integers which are the positions of the actual
values in the categories array.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
ndarray[int]
A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = | CategoricalDtype._from_fastpath(categories, self.ordered) | pandas.core.dtypes.dtypes.CategoricalDtype._from_fastpath |
import pandas as pd
import datetime as dt
import numpy as np
import pandas as pd
from pandas import concat
class BackTest():
def __init__(self,
pending_sell_steps,
stop_loss,
initial_value = 100,
value = 100,
verbose = False,
sell_on_profit = True
):
self.initial_value = initial_value
self.value = value
self.verbose = verbose
self.pending_sell_steps = pending_sell_steps
self.pending = -1
self.sell_on_profit = sell_on_profit
self.stop_loss = stop_loss
self.reset()
def reset(self):
self.current = self.initial_value
self.holding = 0
self.buy_price = 0
self.positive_trades = []
self.negative_trades = []
def on_state(self):
self.pending -= 1
def is_sell_pending(self):
return (self.pending >= 1)
def is_bought(self):
return (self.holding > 0)
def is_valid_sell(self, bid):
is_pending = self.is_sell_pending()
has_profit = (bid > self.buy_price)
is_not_pending = (not is_pending)
profit_before_pending = ((is_pending and (has_profit)) and self.sell_on_profit)
is_valid = (self.is_bought() and (is_not_pending or profit_before_pending))
#self.log(f"{is_valid} Pending {self.pending} Bid ({bid}): is_bought: {self.is_bought()} - is_not_pending: {is_not_pending} - profit_before_pending: {profit_before_pending}")
return is_valid
def is_obove_loss_limit(self, loss):
return loss < self.stop_loss
def on_up(self, bid, ask):
self.on_state()
if (self.is_bought()):
positive, profit = self.is_profit(bid)
should_sell = False
if (positive):
self.log(f"Profit detected bid: {bid} ask: {ask}")
should_sell = True
elif (self.is_obove_loss_limit(profit)):
self.log(f"Loss limit dedected {self.stop_loss}: {bid} ask: {ask}")
should_sell = True
if (should_sell):
self.sell(bid)
else:
self.buy(ask)
def on_down(self, bid, ask):
self.on_state()
if (self.is_valid_sell(bid)):
self.sell(bid)
self.pending = -1
def report(self):
percentage = self.get_profit()
print(f'{percentage}% -> {self.current}')
negative = 0
if (len(self.negative_trades) > 0):
negative = np.average(self.negative_trades)
print(f'Positive: {len(self.positive_trades)}({np.average(self.positive_trades)}) Negative: {len(self.negative_trades)}({negative})')
def get_profit(self):
percentage = ((self.current*100)/self.initial_value) - 100
#return self.positive_trades - self.negative_trades
return round(percentage, 5)
def buy(self, ask):
self.current = self.current - self.value
self.holding = self.value / ask
self.buy_price = ask
self.pending = self.pending_sell_steps
self.log(f'Bought: {ask}')
def is_profit(self, bid):
profit = (bid - self.buy_price) * self.holding
profit = round(profit, 4)
positive = (profit > 0)
return positive, profit
def sell(self, bid):
self.current = self.current + (bid * self.holding)
positive, profit = self.is_profit(bid)
if (positive):
self.positive_trades.append(profit)
result = f"PROFIT {profit}%"
else:
self.negative_trades.append(profit)
result = f"LOSS {profit}%"
self.log(f'SOLD >>>> Result: {result} total: {self.current}')
self.holding = 0
self.buy_price = 0
def log(self, message):
if (self.verbose):
print(f'{dt.datetime.now()} BackTest: {message}')
def __str__(self) -> str:
return f"BackTest (pending_sell_steps={self.pending_sell_steps} sell_on_profit={self.sell_on_profit} value={self.value})"
class ModelAgent():
def __init__(self,
model = [],
on_down = lambda bid, ask: bid,
on_up = lambda bid, ask: ask,
verbose = False,
simulate_on_price = True,
save_history = False
):
self.model = model
self.on_up = on_up
self.on_down = on_down
self.best_sell = 0
self.best_buy = 0
self.timestamp = 0
self.price = 0
self.verbose = verbose
self.simulate_on_price = simulate_on_price
self.last_action = {}
self.last_action["time"] = ''
self.last_action["action"] = ''
self.history = []
self.save_history = save_history
self.transction_enabled = True
def on_x(self, x):
y = self.model.predict(np.array([x]))
return self.on_predicted(y[0])
def on_new_state(self, timestamp, price, bid, ask):
self.best_ask = float(ask[-1][0])
self.best_bid = float(bid[-1][0])
self.timestamp = timestamp
self.price = price
#print(self.best_buy)
def on_predicted(self, y):
if (self.transction_enabled):
is_up = y > 0.5
if (is_up):
self.up()
else:
self.down()
return is_up
def up(self):
if (self.simulate_on_price):
self.log_action(f'UP')
self.on_up(ask = self.price, bid = self.price)
else:
self.log_action(f'UP on ask: {self.best_ask}')
self.on_up(ask = self.best_ask, bid = self.best_bid)
def down(self):
if (self.simulate_on_price):
self.log_action(f'DOWN')
self.on_down(ask = self.price, bid = self.price)
else:
self.log_action(f'DOWN on bid: {self.best_bid}')
self.on_down(ask = self.best_ask, bid = self.best_bid)
def log_action(self, action):
self.last_action["time"] = dt.datetime.now()
self.last_action["action"] = action
stock_date = pd.to_datetime(self.timestamp, unit='s')
if (self.verbose):
print(f"{dt.datetime.now()} ModelAgent({self.price}): {stock_date}({self.timestamp}) {self.get_last_action()}")
def get_last_action(self):
return f"{self.last_action['action']}"
def series_to_supervised(data, n_in=2, n_out=1):
n_vars = 1
df = pd.DataFrame(data)
cols = list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(i))
# put it all together
agg = | concat(cols, axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 16:34:40 2020
@author: skyjones
"""
import os
import pandas as pd
import shutil
stats_file = '/Users/manusdonahue/Documents/Sky/sienax_segmentations/K012/bin/axT1_raw_sienax/report.sienax'
output_csv = '/Users/manusdonahue/Documents/Sky/parsing_testing.csv'
def parse_sienax_stats(stats_file, output_csv):
out_df = | pd.DataFrame() | pandas.DataFrame |
import sys
import os
from collections import defaultdict
import numpy as np
import pandas as pd
import dill
import argparse
from tqdm import tqdm
from pyquaternion import Quaternion
from kalman_filter import NonlinearKinematicBicycle
nu_path = './devkit/python-sdk/'
sys.path.append(nu_path)
sys.path.append("../../mats")
from nuscenes.nuscenes import NuScenes
from nuscenes.prediction import PredictHelper
from nuscenes.map_expansion.map_api import NuScenesMap
from nuscenes.eval.prediction.splits import get_prediction_challenge_split
from environment import Environment, Scene, Node, GeometricMap, derivative_of
FREQUENCY = 2
dt = 1 / FREQUENCY
data_columns_vehicle = pd.MultiIndex.from_product([['position', 'velocity', 'acceleration', 'heading'], ['x', 'y']])
data_columns_vehicle = data_columns_vehicle.append(pd.MultiIndex.from_tuples([('heading', '°'), ('heading', 'd°')]))
data_columns_vehicle = data_columns_vehicle.append( | pd.MultiIndex.from_product([['velocity', 'acceleration'], ['norm']]) | pandas.MultiIndex.from_product |
"""
Functions for returning optimization results in several forms.
Contributor:
<NAME> - <EMAIL>
"""
import logging
import pandas as pd
from oemof import solph
from matplotlib import pyplot as plt
import os
def xlsx(nodes_data: dict, optimization_model: solph.Model, filepath: str):
"""
Returns model results as xlsx-files.
Saves the in- and outgoing flows of every bus of a given,
optimized energy system as .xlsx file
:param nodes_data: dictionary containing data from excel
scenario file
:type nodes_data: dict
:param optimization_model: optimized energy system
:type optimization_model: oemof.solph.model
:param filepath: path, where the results will be stored
:type filepath: str
:return: - **results** (.xlsx) - xlsx files containing in and \
outgoing flows of the energy systems' buses.
<NAME> - <EMAIL>
"""
results = solph.processing.results(optimization_model)
# Writes a spreadsheet containing the input and output flows into
# every bus of the energy system for every timestep of the
# timesystem
for i, b in nodes_data['buses'].iterrows():
if b['active']:
file_path = \
os.path.join(filepath, 'results_' + b['label'] + '.xlsx')
node_results = solph.views.node(results, b['label'])
df = node_results['sequences']
df.head(2)
with pd.ExcelWriter(file_path) as writer: # doctest: +SKIP
df.to_excel(writer, sheet_name=b['label'])
# returns logging info
logging.info(' ' + 'Results saved as xlsx for ' + b['label'])
def charts(nodes_data: dict, optimization_model: solph.Model,
energy_system: solph.EnergySystem):
"""
Plots model results.
Plots the in- and outgoing flows of every bus of a given,
optimized energy system
:param nodes_data: dictionary containing data from excel
scenario file
:type nodes_data: dict
:param optimization_model: optimized energy system
:type optimization_model: oemof.solph.Model
:param energy_system: original (unoptimized) energy system
:type energy_system: oemof.solph.Energysystem
:return: - **plots** (matplotlib.plot) plots displaying in \
and outgoing flows of the energy systems' buses.
<NAME> - <EMAIL>
"""
# rename variables
esys = energy_system
results = solph.processing.results(optimization_model)
for i, b in nodes_data['buses'].iterrows():
if b['active']:
logging.info(' ' + "******************************************"
+ "***************")
logging.info(' ' + 'RESULTS: ' + b['label'])
bus = solph.views.node(results, b['label'])
logging.info(' ' + bus['sequences'].sum())
fig, ax = plt.subplots(figsize=(10, 5))
bus['sequences'].plot(ax=ax)
ax.legend(loc='upper center', prop={'size': 8},
bbox_to_anchor=(0.5, 1.4), ncol=2)
fig.subplots_adjust(top=0.7)
plt.show()
esys.results['main'] = solph.processing.results(optimization_model)
esys.results['meta'] = solph.processing.meta_results(optimization_model)
esys.dump(dpath=None, filename=None)
class Results:
"""
Class for preparing Plotly results and logging the results of
Cbc-Solver
"""
results = None
esys = None
comp_input1 = None
comp_input2 = None
comp_output1 = None
comp_output2 = None
comp_capacity = None
df_list_of_components = None
df_result_table = pd.DataFrame()
# columns_of_plotly_table
copt = ['ID', 'type', 'input 1/kWh', 'input 2/kWh', 'output 1/kWh',
'output 2/kWh', 'capacity/kW', 'variable costs/CU',
'periodical costs/CU', 'investment/kW', 'max. invest./kW', 'constraints/CU']
@staticmethod
def log_category(component: str):
"""
Returns logging info for type of given components.
Which is the first step of the logging/ results producing process.
:param component: component type
:type component: str
"""
# returns logging infos
logging.info(' ' + 56 * "*")
logging.info(' ' + "***" + component + (53 - len(component)) * "*")
logging.info(' ' + 56 * "*")
logging.info(' ' + 56 * "-")
def create_flow_dataframes(self, comp, component,
excess_or_shortage=None):
"""
creates up to 5 pandas series consisting the flows of the
given component
"""
# clearing all possibly used variables
self.comp_input1 = None
self.comp_input2 = None
self.comp_output1 = None
self.comp_output2 = None
self.comp_capacity = None
label = comp['label']
# because of not every sheet consisting the same columns these
# tests are necessary
bus1 = comp['bus1'] if 'bus1' in comp else None
bus2 = comp['bus2'] if 'bus2' in comp else None
if 'input' in comp:
input1 = comp['input']
elif 'bus' in comp:
input1 = comp['bus']
else:
input1 = None
if 'output' in comp:
output = comp['output']
elif 'bus' in comp:
output = comp['bus']
else:
output = None
output2 = comp['output2'] if 'output2' in comp else None
for index, value in component['sequences'].sum().items():
# inflow 1
if index in [((input1, label), 'flow'),
((bus1, label), 'flow'),
((label, label + '_excess'), 'flow')]:
self.comp_input1 = component['sequences'][index]
# inflow2
elif index in [((label + '_low_temp' + '_bus', label), 'flow'),
((label + '_high_temp' + '_bus', label), 'flow'),
((bus2, label), 'flow'),
((label[:-10] + '_bus', label), 'flow')]:
self.comp_input2 = component['sequences'][index]
# outflow 1
elif index in [((label, output), 'flow'),
((label, bus2), 'flow'),
((label + '_shortage', label), 'flow')]:
self.comp_output1 = component['sequences'][index]
# outflow 2
elif index in [((label, output2), 'flow'),
((label, bus1), 'flow')]:
self.comp_output2 = component['sequences'][index]
# capacity
elif index == ((label, 'None'), 'storage_content'):
self.comp_capacity = component['sequences'][index]
if excess_or_shortage == "excess":
self.comp_output1 = None
elif excess_or_shortage == "shortage":
self.comp_input1 = None
def get_comp_investment(self, comp, comp_type):
component_investment = 0
if comp['max. investment capacity'] > 0:
component_node = self.esys.groups[comp['label']]
# defines bus_node for different components
if comp_type == 'source':
if comp['input'] in [0, 'None', 'none']:
bus_node = self.esys.groups[comp['output']]
else:
component_node = self.esys.groups[comp['label'][:-10]]
label = comp['label'][:-10] + '_bus'
bus_node = self.esys.groups[label]
elif comp_type == 'storage':
bus_node = None
elif comp_type == 'link':
bus_node = self.esys.groups[comp['bus2']]
elif comp_type == 'transformer':
bus_node = self.esys.groups[comp['output']]
else:
raise ValueError('comp_type not known')
# sets component investment
component_investment += (self.results[component_node, bus_node]
['scalars']['invest'])
if comp_type == 'source':
if comp['input'] not in [0, 'None', 'none']:
# considers area conversion factor on investment for
# solar heat sources
component_investment = \
component_investment / comp['Conversion Factor']
return component_investment
def calc_constraint_costs(self, comp, investment=None, comp_type=None):
# get flow values
inflow1 = 0 if self.comp_input1 is None else self.comp_input1.sum()
outflow1 = 0 if self.comp_output1 is None else self.comp_output1.sum()
outflow2 = 0 if self.comp_output2 is None else self.comp_output2.sum()
constraint_costs = 0
# calculating constraint costs for different components
if comp_type == 'source':
constraint_costs += outflow1 * comp['variable constraint costs']
elif comp_type == 'link':
constraint_costs += \
comp['variable constraint costs'] * (outflow1 + outflow2)
elif comp_type == 'excess':
constraint_costs += \
inflow1 * comp['excess constraint costs']
elif comp_type == 'shortage':
constraint_costs += \
outflow1 * comp['shortage constraint costs']
else:
constraint_costs += outflow1 \
* comp['variable output constraint costs']
if comp_type == 'transformer':
constraint_costs \
+= outflow2 * comp['variable output constraint costs 2']
constraint_costs \
+= inflow1 * comp['variable input constraint costs']
if comp_type == 'storage':
constraint_costs \
+= inflow1 * comp['variable input constraint costs']
if comp_type != 'excess' and comp_type != 'shortage':
constraint_costs += investment \
* comp['periodical constraint costs']
return constraint_costs
def calc_variable_costs(self, comp, comp_type):
# get flow values
inflow1 = 0 if self.comp_input1 is None else self.comp_input1.sum()
outflow1 = 0 if self.comp_output1 is None else self.comp_output1.sum()
outflow2 = 0 if self.comp_output2 is None else self.comp_output2.sum()
variable_costs = 0
if comp_type == 'sources':
variable_costs += comp['variable costs'] * outflow1
elif comp_type == 'storages' or comp_type == 'transformers':
variable_costs += comp['variable input costs'] * inflow1
variable_costs += comp['variable output costs'] * outflow1
if comp_type == 'transformers':
variable_costs += (comp['variable output costs 2'] * outflow2)
elif comp_type == 'links':
variable_costs += \
comp['variable output costs'] * (outflow1 + outflow2)
elif comp_type == 'excess':
variable_costs += comp['excess costs'] * inflow1
elif comp_type == 'shortage':
variable_costs += comp['shortage costs'] * outflow1
return variable_costs
@staticmethod
def calc_periodical_costs(comp, investment):
periodical_costs = 0
fix_invest = comp['fix investment costs'] \
if 'fix investment costs' in comp else 0
non_convex = comp['non-convex investment'] \
if 'non-convex investment' in comp else 0
periodical_costs += (fix_invest
if (non_convex == 1 and investment > 0) else 0)
periodical_costs += investment * comp['periodical costs']
return periodical_costs
def add_component_to_loc(self, label, comp_type,
capacity=None, variable_costs=None,
periodical_costs=None, investment=None, maxinvest='---',
constraints=None):
"""
adds the given component with its parameters to
list of components (loc)
"""
# creating strings for dataframe
inflow1 = '---' if self.comp_input1 is None \
else str(round(self.comp_input1.sum(), 2))
inflow2 = '---' if self.comp_input2 is None \
else str(round(self.comp_input2.sum(), 2))
outflow1 = '---' if self.comp_output1 is None \
else str(round(self.comp_output1.sum(), 2))
outflow2 = '---' if self.comp_output2 is None \
else str(round(self.comp_output2.sum(), 2))
capacity = '---' if capacity is None \
else str(round(capacity, 2))
variable_costs = '---' if variable_costs is None \
else str(round(variable_costs, 2))
periodical_costs = '---' if periodical_costs is None \
else str(round(periodical_costs, 2))
investment = '---' if investment is None \
else str(round(investment, 2))
constraints = '---' if constraints is None \
else str(round(constraints, 2))
self.df_list_of_components = \
self.df_list_of_components.append(
pd.DataFrame(
[[label, comp_type, inflow1, inflow2, outflow1, outflow2,
capacity, variable_costs, periodical_costs, investment,maxinvest,
constraints]], columns=self.copt))
@staticmethod
def get_first_node_flow(flow):
""" returns begin of the flow, used to log where the flow comes from"""
flow_name = str(flow.name)
flow_name = flow_name[2:-10]
flow_name = flow_name.split(',')
return flow_name[0]
@staticmethod
def get_last_node_flow(flow):
""" returns end of the flow, used to log where the flow goes to"""
flow_name = str(flow.name)
flow_name = flow_name[2:-10]
flow_name = flow_name.split(',')
return flow_name[1]
def console_logging(self, comp_type, capacity=None, variable_costs=None,
periodical_costs=None, investment=None,
transformer_type=None):
"""
consists of the different console logging entries and logs
the one for the given component
"""
inflow1 = self.comp_input1
inflow2 = self.comp_input2
outflow1 = self.comp_output1
outflow2 = self.comp_output2
if comp_type == 'sink':
logging.info(' ' + 'Total Energy Demand: ' + str(inflow1.sum())
+ ' kWh')
else:
if comp_type == 'source':
if inflow1 is None or \
'shortage' in self.get_first_node_flow(outflow1):
logging.info(' ' + 'Total Energy Input: '
+ str(outflow1.sum()) + ' kWh')
logging.info(' ' + 'Max. Capacity: ' + str(capacity)
+ ' kW')
else:
logging.info(' ' + 'Input from '
+ self.get_first_node_flow(inflow1) + ': '
+ str(round(inflow1.sum(), 2)) + ' kWh')
logging.info(' ' + 'Ambient Energy Input to '
+ self.get_first_node_flow(inflow2) + ': '
+ str(round(inflow2.sum(), 2)) + ' kWh')
logging.info(' ' + 'Energy Output to '
+ self.get_last_node_flow(outflow1) + ': '
+ str(round(outflow1.sum(), 2)) + ' kWh')
if comp_type == 'transformer':
if inflow2 is None:
logging.info(' ' + 'Total Energy Output to'
+ self.get_last_node_flow(outflow1) + ': '
+ str(round(outflow1.sum(), 2)) + ' kWh')
if outflow2 is not None:
logging.info(' ' + 'Total Energy Output to'
+ self.get_last_node_flow(outflow2) + ': '
+ str(round(outflow2.sum(), 2)) + ' kWh')
else:
logging.info(' ' + 'Electricity Energy Input to '
+ self.get_first_node_flow(inflow1) + ': '
+ str(round(inflow1.sum(), 2)) + ' kWh')
if transformer_type == 'absorption_heat_transformer':
logging.info(' ' + 'Heat Input to'
+ self.get_last_node_flow(inflow2) + ': '
+ str(round(inflow2.sum(), 2)) + ' kWh')
elif transformer_type == 'compression_heat_transformer':
logging.info(' ' + 'Ambient Energy Input to'
+ self.get_last_node_flow(inflow2) + ': '
+ str(round(inflow2.sum(), 2)) + ' kWh')
logging.info(' ' + 'Total Energy Output to'
+ self.get_last_node_flow(outflow1) + ': '
+ str(round(outflow1.sum(), 2)) + ' kWh')
logging.info(' ' + 'Max. Capacity: ' + str(capacity) + ' kW')
if comp_type == 'storage':
logging.info(
' ' + 'Energy Output from '
+ self.get_first_node_flow(outflow1) + ': '
+ str(round(outflow1.sum(), 2)) + 'kWh')
logging.info(' ' + 'Energy Input to '
+ self.get_last_node_flow(outflow1) + ': '
+ str(round(inflow1.sum(), 2)) + ' kWh')
if comp_type == 'link':
if capacity is None:
logging.info(' ' + 'Total Energy Output to '
+ self.get_last_node_flow(outflow1) + ': '
+ str(round(outflow1.sum(), 2)) + ' kWh')
logging.info(' ' + 'Total Energy Output to '
+ self.get_last_node_flow(outflow2) + ': '
+ str(round(outflow2.sum(), 2)) + ' kWh')
logging.info(' ' + 'Max. Capacity to '
+ self.get_last_node_flow(outflow1) + ': '
+ str(round(outflow1.max(), 2)) + ' kW')
logging.info(' ' + 'Max. Capacity to '
+ self.get_last_node_flow(outflow2) + ': '
+ str(round(outflow2.max(), 2)) + ' kW')
else:
logging.info(' ' + 'Total Energy Output to '
+ self.get_last_node_flow(outflow1) + ': '
+ str(round(outflow1.sum(), 2)) + ' kWh')
logging.info(' ' + 'Max. Capacity to '
+ self.get_last_node_flow(outflow1) + ': '
+ str(round(capacity, 2)) + ' kW')
if investment is not None:
logging.info(' ' + 'Investment Capacity: '
+ str(round(investment, 2)) + ' kW')
if periodical_costs is not None:
logging.info(' ' + 'Periodical costs: '
+ str(round(periodical_costs, 2))
+ ' cost units p.a.')
logging.info(' ' + 'Variable Costs: '
+ str(round(variable_costs, 2)) + ' cost units')
@staticmethod
def insert_line_end_of_component():
logging.info(
' ' + '--------------------------------------------------------')
def __init__(self, nodes_data: dict, optimization_model: solph.Model,
energy_system: solph.EnergySystem, result_path: str,
console_log: bool):
"""
Returns a list of all defined components with the following
information:
+------------+----------------------------------------------+
|component | information |
+------------+----------------------------------------------+
|sinks | Total Energy Demand |
+------------+----------------------------------------------+
|sources | Total Energy Input, Max. Capacity, |
| | Variable Costs, Periodical Costs |
+------------+----------------------------------------------+
|transformers| Total Energy Output, Max. Capacity, |
| | Variable Costs, Investment Capacity, |
| | Periodical Costs |
+------------+----------------------------------------------+
|storages | Energy Output, Energy Input, Max. Capacity,|
| | Total variable costs, Investment Capacity, |
| | Periodical Costs |
+------------+----------------------------------------------+
|links | Total Energy Output |
+------------+----------------------------------------------+
Furthermore, a list of recommended investments is printed.
The algorithm uses the following steps:
1. logging the component type for example "sinks"
2. creating pandas dataframe out of the results of the
optimization consisting of every single flow in/out
a component
3. calculating the investment and the costs regarding
the flows
4. adding the component to the list of components (loc)
which is part of the plotly dash and is the content
of components.csv
5. logging the component specific text with its parameters
in the console
:param nodes_data: dictionary containing data from excel \
scenario file
:type nodes_data: dict
:param optimization_model: optimized energy system
:type optimization_model: oemof.solph.Model
:param energy_system: original (unoptimized) energy system
:type energy_system: oemof.solph.Energysystem
:param result_path: Path where the results are saved.
:type result_path: str
<NAME> - <EMAIL>
<NAME> - <EMAIL>
"""
components_dict = {
"sinks": nodes_data['sinks'],
"buses_e": nodes_data['buses'],
"sources": nodes_data['sources'],
"buses_s": nodes_data['buses'],
"transformers": nodes_data['transformers'],
"storages": nodes_data['storages'],
"links": nodes_data['links']
}
# create excess and shortage sheet
components_dict['buses_e'] = components_dict['buses_e'].drop(
components_dict['buses_e']
[components_dict['buses_e']['excess'] == 0].index)
components_dict['buses_s'] = components_dict['buses_s'].drop(
components_dict['buses_s']
[components_dict['buses_s']['shortage'] == 0].index)
investments_to_be_made = {}
total_periodical_costs = 0
total_usage = 0
total_variable_costs = 0
total_constraint_costs = 0
total_demand = 0
# define class variables
self.esys = energy_system
self.results = solph.processing.results(optimization_model)
self.df_list_of_components = pd.DataFrame(columns=self.copt)
self.df_result_table = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
"""Parse Postgres log to retrieve the dataset ids and the IP of the API users."""
import argparse
import glob
import json
import os
import pandas as pd
# Limit to these postgrest queries
QUERY_STRINGS = [""]
def parseLog(log_file: str, parsed_log_file: str):
"""Parse the original log file."""
header = [
"log_time",
"user_name",
"database_name",
"process_id",
"connection_from",
"session_id",
"session_line_num",
"command_tag",
"session_start_time",
"virtual_transaction_id",
"transaction_id",
"error_severity",
"sql_state_code",
"message",
"detail",
"hint",
"internal_query",
"internal_query_pos",
"context",
"query",
"query_pos",
# "location" , # verbose off: no location available
"application_name",
"backend_type",
]
try:
log = pd.read_csv(
log_file, header=None, error_bad_lines=False, low_memory=False
)
log.columns = header
except (pd.errors.EmptyDataError, pd.errors.ParserError):
print("Cannot decode the content of the log file")
log = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import NearestNeighbors
import numpy as np
class ContentBased(object):
def __init__(self,stop_words=None,token_pattern=None,metric='cosine',n_neighbors=5):
self.tfidf_vectorizer=TfidfVectorizer( stop_words=stop_words, token_pattern=token_pattern)
self.nearest_neigbors=NearestNeighbors(metric=metric,n_neighbors=n_neighbors,algorithm='brute')
def fit(self,datos,columna_descripcion):
self.datos=datos
datos_por_tags = self.tfidf_vectorizer.fit_transform(datos[columna_descripcion])
self.nearest_neigbors.fit(datos_por_tags)
def predict(self,descripcion):
descripcion_tags = self.tfidf_vectorizer.transform(descripcion)
if descripcion_tags.sum() == 0:
return | pd.DataFrame(columns=datos.columns) | pandas.DataFrame |
import dateparser
from datetime import datetime
import re
from requests import Session
from retry_requests import retry
import pandas as pd
LCPS_API_URL = 'https://s3.eu-de.cloud-object-storage.appdomain.cloud/cloud-object-storage-lcps/news.json' # noqa
def get(url):
session = retry(Session(), retries=10, backoff_factor=0.2)
ret = session.get(url)
while ret.status_code != 200: # keep trying until we succeed
ret = session.get(url)
return ret
def titlenormalizer(title):
return ' '.join(title.split(' ')[0:2]).replace('.', '').lower().strip()
def titleclassifier(title):
split = title.split(' ')
if len(split) != 2:
return False
try:
if split[1].startswith('covid') or split[1].startswith('corona'):
return True
except ValueError:
pass
return False
if __name__ == '__main__':
news = get(LCPS_API_URL).json()
year = datetime.now().year
data = []
for item in news['updates']:
# print(item["content"])
title = titlenormalizer(item['title'])
# print(item['content'])
if titleclassifier(title):
patients = int(title.split(' ')[0])
date = str(dateparser.parse(item['date'], languages=["nl"]).date())
matches = re.search(
r"(\d+)\s+in\s+Duitsland",
item['content'],
re.MULTILINE
)
patients_in_de = 0
try:
patients_in_de = int(matches.group(1))
except AttributeError:
pass
data.append({'Date': date, 'Aantal': patients,
'AantalDuitsland': patients_in_de})
df_parsed_nums = pd.DataFrame(data).set_index('Date').sort_index()
df_lcps = pd.read_csv('data/lcps_ic.csv', index_col=0)
df_lcps = df_lcps.combine_first(df_parsed_nums)
df_lcps['Aantal'] = df_lcps['Aantal'].astype(pd.Int64Dtype())
df_lcps['AantalDuitsland'] = df_lcps['AantalDuitsland'].astype(pd.Int64Dtype())
df_lcps[~df_lcps.index.duplicated()]
df_lcps[["Aantal"]].to_csv('data/lcps_ic.csv')
df_lcps_country = df_lcps.copy()
df_lcps_country['Nederland'] = df_lcps['Aantal'] - df_lcps['AantalDuitsland']
df_lcps_country['Duitsland'] = df_lcps['AantalDuitsland']
df_lcps_country = df_lcps_country[["Nederland"]].stack(dropna=False)
df_lcps_country.index.names = ['Datum', 'Land']
df_lcps_country.name = "Aantal"
df_lcps_country = df_lcps_country.to_frame()
df_lcps_country_from_file = pd.read_csv('data/lcps_ic_country.csv', index_col=[0, 1])
df_lcps_country = df_lcps_country_from_file.combine_first(df_lcps_country)
df_lcps_country[~df_lcps_country.index.duplicated()]
df_lcps_country['Aantal'] = df_lcps_country['Aantal'].astype( | pd.Int64Dtype() | pandas.Int64Dtype |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 5 20:32:46 2019
@author: sbece
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
df = sm.datasets.macrodata.load_pandas().data
df.head()
print(sm.datasets.macrodata.NOTE)
index = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1' , '2009Q3'))
df.index = index
df.head()
##PLOT
df['realgdp'].plot()
##TRENDS
gdp_cycle, gdp_trend = sm.tsa.filters.hpfilter(df['realgdp'])
df['trend'] = gdp_trend
df[['realgdp','trend']]["2000-03-31":].plot()
##EMWA (EXPONENTIALLY WEIGHTED MOVING AVERAGES) MODELS
##SIMPLE MOVING AVERAGES (SMA)
airline = pd.read_csv('airline-passengers.csv', index_col = "Month")
airline.head()
airline.dropna(inplace=True)
airline.index = pd.to_datetime(airline.index)
airline.head()
airline.index
airline['6-month-SMA'] = airline['#Passengers'].rolling(window=6).mean()
airline['12-month-SMA'] = airline['#Passengers'].rolling(window=12).mean()
airline.plot(figsize=(10,8))
airline['EWMA-12'] = airline['#Passengers'].ewm(span=12).mean()
airline[["#Passengers","EWMA-12"]].plot()
##ETS (Error-Trend-Seasonality) MODELS
##Exponential Smoothing
##Trend Methods Models
##ETS Decomposition
airline.plot()
from statsmodels.tsa.seasonal import seasonal_decompose
result = seasonal_decompose(airline['#Passengers'], model = 'multiplicative')
result.seasonal.plot()
result.trend.plot()
result.plot()
##ARIMA MODELS
##Step 1
df = pd.read_csv('monthly-milk-production.csv')
df.head()
df.columns = ["Month", "Milk in pounds per cow"]
df.head()
df.tail()
##to drop a row
##df.drop(168, axis=0, inplace=True)
df['Month'] = pd.to_datetime(df['Month'])
df.set_index('Month', inplace=True)
df.head()
df.describe()
df.describe().transpose()
##Step 2
df.plot()
time_series = df['Milk in pounds per cow']
type(time_series)
time_series.rolling(12).mean().plot(label = '12-Month Rolling Mean')
time_series.rolling(12).std().plot(label = '12-Month Rolling Std')
time_series.plot()
plt.legend()
from statsmodels.tsa.seasonal import seasonal_decompose
decomp = seasonal_decompose(time_series)
decomp.plot()
fig = decomp.plot()
fig.set_size_inches(15,8)
from statsmodels.tsa.stattools import adfuller
result = adfuller(df['Milk in pounds per cow'])
def adf_check(time_series):
result = adfuller(time_series)
print(" Augmented Dicky-Fuller Test")
labels = ['ADF Test Statistic', 'p-value', '# of lags', 'Num of Observations used']
for value, label in zip(result, labels):
print (label+ ' : ' + str(value))
if result[1] <= 0.05:
print("Strong evidence against null hypothesis")
print("Reject null hypothesis")
print("Data has no unit root and is stationary")
else:
print("Weak evidence against null hypothesis")
print("Failed to reject null hypothesis")
print("Data has a unit root and is non-stationary")
adf_check(df['Milk in pounds per cow'])
df['First Difference'] = df['Milk in pounds per cow'] -df['Milk in pounds per cow'].shift(1)
df['First Difference'].plot()
adf_check(df['First Difference'].dropna())
##Si la primera diferencia es NO-estacionaria, tenemos que diferenciar tantas veces hasta
##que la sea
##A modo de ejemplo mostraremos la segunda diferencia
df['Second Difference'] = df['First Difference'] -df['First Difference'].shift(1)
df['Second Difference'].plot()
adf_check(df['Second Difference'].dropna())
#####Seasonal difference
df['Seasonal Difference'] = df['Milk in pounds per cow'] -df['Milk in pounds per cow'].shift(12)
df['Seasonal Difference'].plot()
adf_check(df['Seasonal Difference'].dropna())
#####Seasonal difference - 1st difference
df['Seasonal First Difference'] = df['First Difference'] - df['First Difference'].shift(12)
df['Seasonal First Difference'].plot()
adf_check(df['Seasonal First Difference'].dropna())
##ACF and PACF
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
fig_first = plot_acf(df['First Difference'].dropna())
fig_seasonal_first = plot_acf(df['Seasonal First Difference'].dropna())
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(df['Seasonal First Difference'].dropna())
result = plot_pacf(df['Seasonal First Difference'].dropna())
plot_acf(df['Seasonal First Difference'].dropna())
plot_pacf(df['Seasonal First Difference'].dropna())
##deploy an ARIMA model
from statsmodels.tsa.arima_model import ARIMA
model = sm.tsa.statespace.SARIMAX(df['Milk in pounds per cow'], order = (0,1,0), seasonal_order=(1,1,1,12))
results = model.fit()
print(results.summary())
results.resid #residuals
results.resid.plot()
results.resid.plot(kind='kde') #kernel density estimator
df['forecast'] = results.predict(start=150, end=168)
df[['Milk in pounds per cow', 'forecast']].plot()
##Predict new values
from pandas.tseries.offsets import DateOffset
future_dates = [df.index[-1] + DateOffset(months=x) for x in range(1,24)]
future_dates
future_df = pd.DataFrame(index=future_dates, columns=df.columns)
future_df
final_df = | pd.concat([df, future_df]) | pandas.concat |
# coding: utf-8
# In[1]:
import numpy as np
import os
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import cvxopt
os.chdir('H:/')
# ### define functions
# In[2]:
#create xy for least square
def create_xy(target_crop,grande,malay_gdp,malay_pop):
y=grande['price'][target_crop]
x=pd.concat([malay_gdp['Value'],malay_pop['Value'],
grande['production'][target_crop]],axis=1)
x=sm.add_constant(x)
#set production negative
x[target_crop.lower()]=x[target_crop].apply(lambda x:-1*x)
del x[target_crop]
return x,y
# In[3]:
#linear regression
def lin_reg(crops,grande,malay_gdp,malay_pop,viz=False):
D={}
#run regression
for target_crop in crops:
#create xy
x,y=create_xy(target_crop,grande,malay_gdp,malay_pop)
m=sm.OLS(y,sm.add_constant(x)).fit()
D[target_crop]=(m.rsquared,m.params.tolist())
#viz
if viz:
fig=plt.figure(figsize=(10,5))
ax=fig.add_subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.ylabel('USD/Tonnes')
plt.xlabel('Year')
plt.plot(range(beginyear,endyear),m.predict(),label='Est',color='#C8C8A9')
plt.plot(range(beginyear,endyear),y,label='Act',color='#EDE574')
plt.title(target_crop+' Price')
plt.legend()
plt.show()
return D
# In[4]:
def constrained_ols(x,y):
linear_coeff=cvxopt.matrix(-1*np.mat(y.tolist())*np.mat(x)).T
quadratic_coeff=cvxopt.matrix(np.mat(x).T*np.mat(x))
#inequality constraint
inequality_coeff=cvxopt.matrix(0.0,(len(x.columns),len(x.columns)))
#diagonal matrix
#use -1 to achieve larger than
inequality_coeff[::len(x.columns)+1]=-1
inequality_value=cvxopt.matrix([0.0 for _ in range(len(x.columns))])
cvxopt.solvers.options['show_progress']=False
ans=cvxopt.solvers.qp(P=quadratic_coeff,q=linear_coeff,
G=inequality_coeff,h=inequality_value)['x']
return ans
# In[5]:
#create params by using constrained ols
def get_params(crops,grande,malay_gdp,malay_pop,viz=False):
D={}
for target_crop in crops:
#create xy
x,y=create_xy(target_crop,grande,malay_gdp,malay_pop)
#constrained ols
ans=constrained_ols(x,y)
#viz
if viz:
#get forecast and convert to list
forecast=np.mat(ans).T*np.mat(x).T
forecast=forecast.ravel().tolist()[0]
fig=plt.figure(figsize=(10,5))
ax=fig.add_subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.ylabel('USD/Tonnes')
plt.xlabel('Year')
plt.plot(range(beginyear,endyear),forecast,label='Est',color='#C8C8A9',)
plt.plot(range(beginyear,endyear),y,label='Act',color='#EDE574')
plt.legend()
plt.title(target_crop+' Price')
plt.show()
D[target_crop]=list(ans)
return D
# ### execution
# In[6]:
global beginyear,endyear
beginyear=2012
endyear=2019
# In[7]:
grand=pd.read_csv('grand.csv')
malay_pop=pd.read_csv('malay_pop.csv')
malay_gdp= | pd.read_csv('malay_gdp.csv') | pandas.read_csv |
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "f"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
result = df.combine_first(other)
tm.assert_frame_equal(result, df)
df.loc[0, "A"] = np.nan
result = df.combine_first(other)
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
)
df2 = DataFrame(
{
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
}
)
result = df1.combine_first(df2)
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
def test_combine_first_return_obj_type_with_bools(self):
# GH3552
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
expected = Series([True, True, False], name=2, dtype=bool)
result_12 = df1.combine_first(df2)[2]
tm.assert_series_equal(result_12, expected)
result_21 = df2.combine_first(df1)[2]
tm.assert_series_equal(result_21, expected)
@pytest.mark.parametrize(
"data1, data2, data_expected",
(
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
),
)
def test_combine_first_convert_datatime_correctly(
self, data1, data2, data_expected
):
# GH 3593
df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
result = df1.combine_first(df2)
expected = DataFrame({"a": data_expected})
tm.assert_frame_equal(result, expected)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
dfb = DataFrame([[4], [5]], columns=["b"])
assert dfa["a"].dtype == "datetime64[ns]"
assert dfa["b"].dtype == "int64"
res = dfa.combine_first(dfb)
exp = DataFrame(
{"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2, 5]},
columns=["a", "b"],
)
tm.assert_frame_equal(res, exp)
assert res["a"].dtype == "datetime64[ns]"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
res = dfa.iloc[:0].combine_first(dfb)
exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
tm.assert_frame_equal(res, exp)
# ToDo: this must be datetime64
assert res["a"].dtype == "float64"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
def test_combine_first_timezone(self):
# see gh-7630
data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
df1 = DataFrame(
columns=["UTCdatetime", "abc"],
data=data1,
index=pd.date_range("20140627", periods=1),
)
data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
df2 = DataFrame(
columns=["UTCdatetime", "xyz"],
data=data2,
index=pd.date_range("20140628", periods=1),
)
res = df2[["UTCdatetime"]].combine_first(df1)
exp = DataFrame(
{
"UTCdatetime": [
pd.Timestamp("2010-01-01 01:01", tz="UTC"),
pd.Timestamp("2012-12-12 12:12", tz="UTC"),
],
"abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
},
columns=["UTCdatetime", "abc"],
index=pd.date_range("20140627", periods=2, freq="D"),
)
assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
assert res["abc"].dtype == "datetime64[ns, UTC]"
tm.assert_frame_equal(res, exp)
# see gh-10567
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, UTC]"
dts1 = pd.DatetimeIndex(
["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
)
df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
dts2 = pd.DatetimeIndex(
["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
)
df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.DatetimeIndex(
[
"2011-01-01",
"2012-01-01",
"NaT",
"2012-01-02",
"2011-01-03",
"2011-01-04",
],
tz="US/Eastern",
)
exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
# different tz
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05")
df2 = DataFrame({"DATE": dts2})
# if df1 doesn't have NaN, keep its dtype
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-01", "2015-01-03")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
exp_dts = [
pd.Timestamp("2015-01-01", tz="US/Eastern"),
pd.Timestamp("2015-01-02", tz="US/Eastern"),
pd.Timestamp("2015-01-03"),
]
exp = DataFrame({"DATE": exp_dts})
tm.assert_frame_equal(res, exp)
assert res["DATE"].dtype == "object"
def test_combine_first_timedelta(self):
data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7])
data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
df2 = DataFrame({"TD": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.TimedeltaIndex(
["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
)
exp = DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["TD"].dtype == "timedelta64[ns]"
def test_combine_first_period(self):
data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M")
df1 = DataFrame({"P": data1}, index=[1, 3, 5, 7])
data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M")
df2 = DataFrame({"P": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.PeriodIndex(
["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M"
)
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == data1.dtype
# different freq
dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D")
df2 = DataFrame({"P": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = [
pd.Period("2011-01", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.NaT,
pd.Period("2012-01-02", freq="D"),
pd.Period("2011-03", freq="M"),
pd.Period("2011-04", freq="M"),
]
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == "object"
def test_combine_first_int(self):
# GH14687 - integer series that do no align exactly
df1 = DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
df2 = DataFrame({"a": [1, 4]}, dtype="int64")
result_12 = df1.combine_first(df2)
expected_12 = DataFrame({"a": [0, 1, 3, 5]})
tm.assert_frame_equal(result_12, expected_12)
result_21 = df2.combine_first(df1)
expected_21 = DataFrame({"a": [1, 4, 3, 5]})
| tm.assert_frame_equal(result_21, expected_21) | pandas._testing.assert_frame_equal |
import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
| assert_index_equal(a, b) | pandas.util.testing.assert_index_equal |
######################################################################################88
import scanpy as sc
import random
import pandas as pd
import matplotlib.pyplot as plt
from os.path import exists
from pathlib import Path
from collections import Counter
from sklearn.metrics import pairwise_distances
from sklearn.utils import sparsefuncs
from sklearn.decomposition import KernelPCA
import numpy as np
import scipy
from scipy.cluster import hierarchy
from scipy.spatial.distance import squareform, cdist
from scipy.sparse import issparse, csr_matrix
from anndata import AnnData
import sys
import os
from sys import exit
from . import tcr_scoring
from . import util
from . import pmhc_scoring
from . import plotting
from .tcrdist.tcr_distances import TcrDistCalculator
from .util import tcrdist_cpp_available
# we also allow 'va' in place of 'va_gene' / 'ja' in place of 'ja_gene', etc:
CLONES_FILE_REQUIRED_COLUMNS = 'clone_id va_gene ja_gene cdr3a cdr3a_nucseq vb_gene jb_gene cdr3b cdr3b_nucseq'.split()
# silly hack
all_sexlinked_genes = frozenset('XIST DDX3Y EIF1AY KDM5D LINC00278 NLGN4Y RPS4Y1 TTTY14 TTTY15 USP9Y UTY ZFY'.split())
def check_if_raw_matrix_is_logged( adata ):
return adata.uns.get( 'raw_matrix_is_logged', False )
def set_raw_matrix_is_logged_to_true( adata ):
adata.uns[ 'raw_matrix_is_logged' ] = True
def add_mait_info_to_adata_obs( adata, key_added = 'is_invariant' ):
''' This sets up a boolean array reflecting the presence of
canonical MAIT or iNKT TCR chains. It uses pretty crude definitions!
So for example a MAIT or iNKT cluster might be expected to have
many clonotypes matching these but certainly not all.
Note that it doesn't use the GEX data at all
'''
if key_added in adata.obs:
#print('adata.obs already has mait info')
return
tcrs = retrieve_tcrs_from_adata(adata)
organism = 'human' if 'organism' not in adata.uns_keys() else \
adata.uns['organism']
if 'human' in organism:
is_mait = [ tcr_scoring.is_human_mait_alpha_chain(x[0]) for x in tcrs ]
is_inkt = [ tcr_scoring.is_human_inkt_tcr(x) for x in tcrs ]
else:
assert 'mouse' in organism
is_mait = [ tcr_scoring.is_mouse_mait_alpha_chain(x[0]) for x in tcrs ]
is_inkt = [ tcr_scoring.is_mouse_inkt_alpha_chain(x[0]) for x in tcrs ]
adata.obs[key_added] = (np.array(is_mait) | np.array(is_inkt))
def normalize_and_log_the_raw_matrix(
adata,
normalize_antibody_features_CLR=True, # centered log normalize
counts_per_cell_after = 1e4,
):
'''
The GEX features are normalized to sum to `counts_per_cell_after` then
log1p'ed
If present, antibody aka protein/citeseq/dextramer features will log1p'ed
and CLR log normalized (if `normalize_antibody_features_CLR` is True) or
just log1p'ed
'''
if check_if_raw_matrix_is_logged( adata ):
print('normalize_and_log_the_raw_matrix:: matrix is already logged')
return adata
print('Normalize and logging matrix...')
ft_varname = util.get_feature_types_varname(adata)
if ft_varname:
ftypes_counts = Counter(adata.raw.var[ft_varname]).most_common()
print('feature_types counter:', ftypes_counts)
ngenes = sum( adata.raw.var[ft_varname] == util.GENE_EXPRESSION_FEATURE_TYPE)
#ngenes = sum( adata.raw.var[ft_varname] != 'Antibody Capture' )
else:
ngenes = adata.raw.shape[1]
n_ab_features = adata.raw.shape[1] - ngenes
X_gex = adata.raw.X[:,:ngenes]
counts_per_cell = np.sum( X_gex, axis=1 ).A1 # A1 since X_gex is sparse
assert np.min( counts_per_cell ) > 0
##### PARANOID: we don't want to accidentally normalize and log again
if np.median( counts_per_cell ) < 100:
# this actually not that great a check...
print('WARNING normalize_and_log_the_raw_matrix:',
'low median counts_per_cell.', np.median(counts_per_cell),'\n',
'has the matrix already been log1p-ed???')
exit()
maxval = X_gex.max()
print('normalize_and_log_the_raw_matrix: adata.raw.X.max()= ', maxval)
if np.isnan(maxval):
print('ERROR X matrix has nans!')
exit()
if maxval < 12.:
print('WARNING!!!\n'*20)
print('adata.raw.X.max() seems too low', maxval)
print('Has the adata.raw.X array already been normalized and logged??')
print('normalize_and_log_the_raw_matrix:: problem in setup?')
print('confused? contact <EMAIL> (sorry for the trouble!)')
print('WARNING!!!\n'*20)
counts_per_cell /= counts_per_cell_after
sparsefuncs.inplace_row_scale(X_gex, 1/counts_per_cell)
new_counts_per_cell = np.sum( X_gex, axis=1 ).A1 # A1 since X_gex is sparse
assert min(new_counts_per_cell) > counts_per_cell_after-1 and max(new_counts_per_cell) < counts_per_cell_after+1
# now log1p transform
np.log1p( X_gex.data, out = X_gex.data )
if n_ab_features:
X_ab = adata.raw.X[:,ngenes:]
np.log1p( X_ab.data, out = X_ab.data )
if normalize_antibody_features_CLR:
X_ab_mn = X_ab.mean(axis=1)
X_ab = scipy.sparse.csr_matrix(X_ab - X_ab_mn)
new_X = scipy.sparse.hstack( [X_gex, X_ab], format="csr" )
else:
new_X = X_gex
adata_new = AnnData( X = new_X, obs = adata.obs, var = adata.raw.var )
adata.raw = adata_new
set_raw_matrix_is_logged_to_true( adata )
#print(adata)
return adata
tcr_keys = '<KEY>'.split() # ugly
def store_tcrs_in_adata(adata, tcrs):
''' returns NOTHING, modifies adata
tcrs is a list of (atcr,btcr) tuples, where
atcr = (va,ja,cdr3a,cdr3a_nucseq) ...
'''
assert len(tcrs) == adata.shape[0]
global tcr_keys
tcr_indices = ((x,y) for x in range(2) for y in range(4))
for tag, (i,j) in zip( tcr_keys, tcr_indices):
adata.obs[tag] = [x[i][j] for x in tcrs ]
# ensure lower case
adata.obs['cdr3a_nucseq'] = adata.obs.cdr3a_nucseq.str.lower()
adata.obs['cdr3b_nucseq'] = adata.obs.cdr3b_nucseq.str.lower()
return
def retrieve_tcrs_from_adata(adata, include_subject_id_if_present=False):
''' include_subject_id_if_present = True means that we add to the tcr-tuples the subject_id
as given in the obs array. This will prevent clones from being condensed across individuals
'''
global tcr_keys
tcrs = []
if include_subject_id_if_present and util.SUBJECT_ID_OBS_KEY in adata.obs_keys():
print(f'retrieve_tcrs_from_adata: include_subject_id_if_present is True and {util.SUBJECT_ID_OBS_KEY} present')
arrays = [ adata.obs[x] for x in tcr_keys+[util.SUBJECT_ID_OBS_KEY] ]
for va, ja, cdr3a, cdr3a_nucseq, vb, jb, cdr3b, cdr3b_nucseq, subject_id in zip( *arrays):
tcrs.append(((va, ja, cdr3a, cdr3a_nucseq.lower(), subject_id),
(vb, jb, cdr3b, cdr3b_nucseq.lower(), subject_id)))
else:
arrays = [ adata.obs[x] for x in tcr_keys ]
for va,ja,cdr3a,cdr3a_nucseq,vb,jb,cdr3b,cdr3b_nucseq in zip(*arrays):
tcrs.append(((va, ja, cdr3a, cdr3a_nucseq.lower()),
(vb, jb, cdr3b, cdr3b_nucseq.lower()) ) )
return tcrs
def read_adata(
gex_data, # filename
gex_data_type, # string describing file type
gex_only = True
):
''' Split this out so that other code can use it. Read GEX data
'''
print('reading:', gex_data, 'of type', gex_data_type)
if gex_data_type == 'h5ad':
adata = sc.read_h5ad( gex_data )
elif gex_data_type == '10x_mtx':
adata = sc.read_10x_mtx( gex_data, gex_only=gex_only )
elif gex_data_type == '10x_h5':
adata = sc.read_10x_h5( gex_data, gex_only=gex_only )
adata.var_names_make_unique()
elif gex_data_type == 'loom':
adata = sc.read_loom( gex_data )
else:
print('unrecognized gex_data_type:', gex_data_type,
"should be one of ['h5ad', '10x_mtx', '10x_h5', 'loom']")
exit()
if adata.isview: # ran into trouble with AnnData views vs copies
adata = adata.copy()
return adata
def read_dataset(
gex_data,
gex_data_type,
clones_file,
make_var_names_unique = True,
keep_cells_without_tcrs = False,
kpca_file = None,
allow_missing_kpca_file = False, # only relevant if clones_file!=None
gex_only=True, #only applies to 10x-formatted data
suffix_for_non_gene_features=None, #None or a string
):
''' returns adata
stores the tcr-dist kPCA info in adata.obsm under the key 'X_pca_tcr'
stores the tcr info in adata.obs under multiple keys (see store_tcrs_in_adata(...) function)
if clones_file is None, gex_data_type must be 'h5ad' and the tcr info
must already be in the AnnData object (ie adata) when we load it
'''
include_tcr_nucseq = True
adata = read_adata(gex_data, gex_data_type, gex_only=gex_only)
util.setup_uns_dicts(adata) # for storing conga results
adata.uns['conga_stats']['num_cells_w_gex'] = adata.shape[0]
adata.uns['conga_stats']['num_features_start'] = adata.shape[1]
if suffix_for_non_gene_features is not None:
feature_types_colname = util.get_feature_types_varname( adata )
assert feature_types_colname, \
'cant identify non-gene features, no feature_types data column'
ab_mask = np.array(adata.var[feature_types_colname] !=
util.GENE_EXPRESSION_FEATURE_TYPE)
print(f'adding {suffix_for_non_gene_features} to {np.sum(ab_mask)}'
f' non-GEX features: {adata.var_names[ab_mask]}')
newnames = [ x+suffix_for_non_gene_features if y else x
for x,y in zip(adata.var_names, ab_mask)]
adata.var.index = newnames
if make_var_names_unique:
adata.var_names_make_unique() # added
if clones_file is None:
# adata should already contain the tcr information
colnames = 'va ja cdr3a cdr3a_nucseq vb jb cdr3b cdr3b_nucseq'.split()
for colname in colnames:
if colname not in adata.obs_keys():
print('ERROR read_dataset: clones_file = None',
'but adata doesnt already contain', colname)
sys.exit()
# what about 'X_pca_tcr' ie the kernel PCs??
# should we worry about those now?
if 'X_pca_tcr' not in adata.obsm_keys():
print('WARNING:: reading dataset without clones file',
'kernel PCs will not be set ie X_pca_tcr array',
'will be missing from adata.obsm!!!!!!!!!!!!!', sep='\n')
return adata ########################################## EARLY RETURN
barcodes = set( adata.obs.index )
print('total barcodes:',len(barcodes),adata.shape)
# read the kpcs, etc
bcmap_file = clones_file+'.barcode_mapping.tsv'
if kpca_file is None:
kpca_file = clones_file[:-4]+'_AB.dist_50_kpcs'
assert exists(clones_file)
assert exists(bcmap_file)
if not allow_missing_kpca_file:
assert exists(kpca_file)
print('reading:',clones_file)
tmpdata = open(clones_file,'r')
clones_file_header = tmpdata.readline()
tmpdata.close()
#clones_file_header = popen('head -n1 '+clones_file).readlines()[0]
clones_df = | pd.read_csv(clones_file, sep='\t') | pandas.read_csv |
import pandas as pd
def get_zipcode(lat, lon, all_l):
row = all_l[(all_l['latitude'] == lat) & (all_l['longitude'] == lon)]
print(row)
print("*")
if __name__ == "__main__":
root_path = "/Users/shravya/Documents/CMU/Interactive_Data_Science/Assignments/3/Code2/data/"
reviews = {'NYC': pd.read_csv(root_path + 'NYC_reviews.csv')}
NYC_listings = {'01': pd.read_csv(root_path + '2020/NYC/listings_01.csv'),
'02': pd.read_csv(root_path + '2020/NYC/listings_02.csv'),
'03': pd.read_csv(root_path + '2020/NYC/listings_03.csv'),
'04': pd.read_csv(root_path + '2020/NYC/listings_04.csv'),
'05': pd.read_csv(root_path + '2020/NYC/listings_05.csv'),
'06': | pd.read_csv(root_path + '2020/NYC/listings_06.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 7 11:42:15 2018
@author: akumler
This script contains all the hard code that produces the solar forecast. The
solar forecation application imports this module to get desired data.
This is the second version, removing the requirement that a previous observation
is needed in order to make the forecast (reconstruction). Other improvements
are added.
"""
import pandas as pd
import numpy as np
from math import *
from datetime import datetime
import math
from pvlib.solarposition import *
from pvlib.atmosphere import *
from pvlib.clearsky import *
from pvlib.irradiance import *
# from bird_clear_sky_model import *
from sklearn.metrics import *
# import seaborn as sns;
#
# sns.set()
# import skill_metrics as sm
from scipy import stats
from datetime import datetime
import time
from time import strptime, strftime, mktime, gmtime
from calendar import timegm
def valid_datetime(date_time):
"""
Checks to make sure the datetime received from the platform is valid.
Parameters
----------
date_time: 'Pandas DatetimeIndex'
Current time. Usually a contains a year, month, day, hour, and minute.
Returns
-------
valid_time: 'datetimeindex'
Current time. If 'valid_datetime' receives an invalid input, one is
assumed from the previous valid time given.
"""
if (date_time is None):
valid_time = np.array([pd.Timestamp.now()])
valid_time = pd.DatetimeIndex(valid_time).round('min')
return valid_time
elif (isinstance(date_time, datetime) == True):
valid_time = np.array([ | pd.to_datetime(date_time) | pandas.to_datetime |
import os
import json
import torch
import pandas as pd
import argparse
import habitat
import habitat_extensions
import tqdm
import habitat_sim
from config.default import get_config
from tools.generate_topdown_maps.gt_map_generator import DummyRLEnv, get_items_list
def get_obj_per_scene(env,label_idx):
#Get the semantic scene of the env
scene = env.habitat_env.sim.semantic_scene
#Map all objects and their position/dimension in the current floor
obj_pos = {}
for name in label_idx.keys():
obj_pos[name] = {obj.id : [obj.aabb.center,obj.aabb.sizes]
for obj in scene.objects
if name == obj.category.name()}
return obj_pos
def switch_to_next_scene(env, scene_id):
env.habitat_env.current_episode.scene_id = scene_id
env.habitat_env.reconfigure(env.habitat_env._config)
_ = env.habitat_env.task.reset(env.habitat_env.current_episode)
def main(args):
file_path = args.map_info
with open(file_path) as json_file:
all_maps_info = json.load(json_file)
_ , label_idx = get_items_list("data/mpcat40.tsv")
config_path = "tools/generate_topdown_maps/config/mp3d_train.yaml"
minDistance = 0
maxDistance = 2.5
config = get_config(config_path)
config = habitat_extensions.get_extended_config(config_path)
try:
env.close()
except NameError:
pass
env = DummyRLEnv(config=config)
env.seed(1234)
device = torch.device("cuda:0")
_ = env.reset()
scene_objects = {}
scene_ids = sorted(list(all_maps_info.keys()))
for scene_id in scene_ids:
scene_objects[scene_id] = {}
floor_id = 0
for scene_floor in all_maps_info[scene_id]:
scene_objects[scene_id][floor_id] = {}
floor_id += 1
for scene in tqdm.tqdm(scene_ids):
switch_to_next_scene(env, all_maps_info[scene][0]["scene_id"])
obj_pos = get_obj_per_scene(env,label_idx)
for floor_id in scene_objects[scene]:
floor_height = all_maps_info[scene][floor_id]["floor_height"]
floor_objects = {}
scene_objects[scene][floor_id] = floor_objects
for target_obj in obj_pos.keys():
object_class = target_obj
floor_objects[object_class] = 0
target_objects = obj_pos[object_class]
for obj_id in target_objects:
objectHeight = target_objects[obj_id][0][1]
dObjectToFloor = objectHeight - floor_height
if dObjectToFloor > minDistance and dObjectToFloor < maxDistance: #Check if Object is within 2.5m above the floor height
floor_objects[object_class] += 1
else:
continue
#Create dataframe out of scene_objects dictionary
df_scene_objects = pd.concat({k: | pd.DataFrame.from_dict(v, 'index') | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
from scipy.stats import hmean
import cirpy
import datetime
from matplotlib import pyplot as plt
import seaborn as sns
from data_loader import GraphCancerMolecules
sns.set()
sns.set_context('talk')
def read_in_cpdb():
cpdb_lit = pd.read_csv('../data/cpdb.lit.tab.txt', sep='\t')
cpdb_nci = pd.read_csv('../data/cpdb.ncintp.tab.txt', sep='\t')
cpdb_df = pd.concat([cpdb_lit, cpdb_nci])
print(f"number of unique chemcodes in rats cpdb {cpdb_df[cpdb_df['species']=='r']['chemcode'].nunique()}")
print(f"number of unique chemcodes in cpdb {cpdb_df['chemcode'].nunique()}")
cpdb_name = pd.read_csv('../data/cpdb_name.tsv', sep='\t')
assert not cpdb_name['chemcode'].duplicated().sum()
cpdb_df_len = len(cpdb_df)
cpdb_df = cpdb_df.merge(cpdb_name, on='chemcode')
assert cpdb_df_len == len(cpdb_df)
cpdb_df['td50_log'] = np.log10(cpdb_df['td50'])
return cpdb_df
def genereate_summary_cpdb_plots(cpdb_df):
plt.figure(figsize=(20,5))
vc = cpdb_df['chemcode'].value_counts()
print(f"Number of experiments per compound stats: \n{pd.Series(vc.values).describe()}")
print(f"number of compounds with more than 50 studies: {len(vc[vc>50])}")
sns.distplot(vc[vc<=50],kde=False, bins=50)
plt.xticks(np.arange(1,51, 1))
plt.title('Number of experiments per compound')
plt.xlim(0, 50)
plt.savefig('../plots/cpdb_num_experiments_per_compound.pdf')
plt.figure(figsize=(10,5))
plt.title('Distribution of TD50 values per animal')
ax = sns.violinplot(data=cpdb_df[cpdb_df['species'].isin(['m', 'r', 'h'])],x='species', y='td50_log')
sns.stripplot(data=cpdb_df[cpdb_df['species'].isin(['m', 'r', 'h'])],x='species', y='td50_log',linewidth=1, color='white',alpha=.5)
ax.set_xticklabels(['rat', 'mouse', 'hamster'])
plt.savefig('../plots/TD50_distribution_per_species.pdf')
def agregate_cpdb(cpdb_df):
# These cas IDS map to the same smiles strings. So first remap these cas IDS so don't have to do
# SMILES matching on all the rows
cas_map = {'14026-03-0': '36702-44-0', '104-46-1': '4180-23-8', '13073-35-3': '67-21-0', '1150-37-4': '1150-42-1',
'121-14-2': '25321-14-6', '72-20-8': '60-57-1', '319-84-6': '58-89-9', '319-85-7': '58-89-9',
'608-73-1': '58-89-9', '764-41-0': '110-57-6', '107-06-2':
'7572-29-4', '73-22-3': '54-12-6', '100-63-0': '59-88-1', '488-41-5': '10318-26-0', '9006-42-2': '12122-67-7'}
cpdb_df['cas'] = [cas_map[x] if x in cas_map.keys() else x for x in cpdb_df['cas'].values]
cpdb_g = cpdb_df[~cpdb_df['cas'].duplicated()][['name', 'cas', 'chemcode', 'species']]
cpdb_g_len = len(cpdb_g)
temp = cpdb_df[['cas', 'td50_log', 'td50', ]].groupby('cas').min().reset_index()
columns_t = temp.columns.tolist()
columns_t.remove('cas')
columns_t = {x: x + '_min' for x in columns_t}
temp.rename(columns=columns_t, inplace=True)
cpdb_g = | pd.merge(cpdb_g, temp, on='cas') | pandas.merge |
import json, logging, pickle, os, sys, time
from shapely import geometry
from pulp import *
import pandas as pd
import geopandas as gpd
import numpy as np
from geopy.distance import geodesic
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
tic = time.time()
from shapely.strtree import STRtree
import networkx as nx
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
gpd.options.use_pygeos=False
def milp_geodesic_network_satisficing(pts_A, pts_B, alpha,mipgap=0.0001,v=False):
pts_A_dict = {pt.name:pt for pt in pts_A}
pts_B_dict = {pt.name:pt for pt in pts_B}
A_names = [pt.name for pt in pts_A]
B_names = [pt.name for pt in pts_B]
Z = {pt.name:{} for pt in pts_A}
MW_A = {pt.name:pt.MW for pt in pts_A}
MW_B = {pt.name:pt.MW for pt in pts_B}
if v:
print ('generating Z..')
for pt_A in pts_A:
for pt_B in pts_B:
Z[pt_A.name][pt_B.name]=(geodesic([pt_A.y,pt_A.x], [pt_B.y,pt_B.x]).kilometers)**2
sum_Z = sum([Z[A_name][B_name] for A_name in A_names for B_name in B_names])
### declare model
model = LpProblem("Network Satisficing Problem",LpMinimize)
### Declare variables
# B -> Bipartite Network
B = LpVariable.dicts("Bipartite",(A_names,B_names),0,1,LpInteger)
# abs_diffs -> absolute value forcing variable
abs_diffs = LpVariable.dicts("abs_diffs",B_names,cat='Continuous')
### Declare constraints
# Contstraint - abs diffs edges
for B_name in B_names:
model += abs_diffs[B_name] >= (MW_B[B_name] - lpSum([MW_A[A_name]*B[A_name][B_name] for A_name in A_names]))/MW_B[B_name],"abs forcing pos {}".format(B_name)
model += abs_diffs[B_name] >= -1 * (MW_B[B_name] - lpSum([MW_A[A_name]*B[A_name][B_name] for A_name in A_names]))/MW_B[B_name], "abs forcing neg {}".format(B_name)
# Constraint - bipartite edges
for A_name in A_names:
model += lpSum([B[A_name][B_name] for B_name in B_names]) <= 1,"Bipartite Edges {}".format(A_name)
### Affine equations
# Impedence error
E_z = sum([Z[A_name][B_name]*B[A_name][B_name] for A_name in A_names for B_name in B_names])/sum_Z
# mw error
E_mw = sum([abs_diffs[B_name] for B_name in B_names])/len(B_names)
### Objective function
model += E_z*alpha + (1-alpha)*E_mw, "Loss"
if v:
print ('solving model...')
model.solve(pulp.GUROBI_CMD(options=[('MIPGap',str(mipgap)),('OutputFlag', str(0))]))
if v:
print(pulp.LpStatus[model.status])
return model, B, E_z, E_mw, Z
class MatchRegion:
def __init__(self, match):
self.match=match
if match=='wri':
self.ini_target_gdf= self.prep_wri()
elif match=='eia':
self.ini_target_gdf = self.prep_eia()
source_gdf = gpd.read_file(os.path.join(os.getcwd(),'data','ABCD_landcover.geojson')).reset_index()
source_gdf['capacity_mw'] = source_gdf['area']*44.2/1000/1000
source_gdf = source_gdf.rename(columns={'index':'unique_id'})
source_gdf.geometry = source_gdf.geometry.representative_point()
self.ini_source_gdf = source_gdf[['unique_id','iso-3166-1','capacity_mw','geometry']]
self.ne = gpd.read_file(os.path.join(os.getcwd(),'data','ne_10m_countries.gpkg'))
logger.info('Source gdf:')
print (self.ini_source_gdf)
logger.info('Target gdf:')
print (self.ini_target_gdf)
def prep_wri(self):
"""
output: gdf with lat/lon point geom, iso-3166-1, a unique_id, and MW capacity
"""
wri = pd.read_csv(os.path.join(os.getcwd(),'data','WRI_gppd.csv'))
iso2 = pd.read_csv(os.path.join(os.getcwd(),'data','iso2.csv'))
# attach country iso-3166-1 to wri
wri = wri.merge(iso2[['iso2','iso3']], how='left',left_on='country',right_on='iso3')
#filter solar PV
wri = wri[wri['fuel1']=='Solar']
# rename iso2
wri = wri.rename(columns={'iso2':'iso-3166-1','gppd_idnr':'unique_id'})
# combine coordinates
wri['coordinates'] = wri[['longitude','latitude']].values.tolist()
# convert to shapely obj
wri['geometry'] = wri['coordinates'].map(geometry.Point)
wri = gpd.GeoDataFrame(wri[['unique_id','iso-3166-1','capacity_mw']], geometry=wri['geometry'], crs={'init':'epsg:4326'})
return wri
def prep_eia(self):
"""
output: gdf with lat/lon point geom, iso-3166-1, a unique_id, and MW capacity
"""
# load file
eia = gpd.read_file(os.path.join(os.getcwd(),'data','eia_powerstations','PowerPlants_US_202001.shp'))
# add iso-3166-1
eia['iso-3166-1'] = 'US'
# filter solar
eia = eia[eia['PrimSource']=='solar']
# rename cols
eia = eia.rename(columns={'Plant_Code':'unique_id','Install_MW':'capacity_mw'})
# keep cols
eia = eia[['geometry','unique_id','capacity_mw','iso-3166-1']]
return eia
def get_components(self):
"""
use source and target gdfs to create a network, get the connected components of the network
"""
source_gdf = self.source_gdf.to_crs({'init':'epsg:3395'})
source_gdf['3395_x'] = source_gdf.geometry.x
source_gdf['3395_y'] = source_gdf.geometry.y
target_gdf = self.target_gdf.to_crs({'init':'epsg:3395'})
target_gdf['3395_x'] = target_gdf.geometry.x
target_gdf['3395_y'] = target_gdf.geometry.y
G = nx.Graph()
source_df = pd.DataFrame(source_gdf)
target_df = pd.DataFrame(target_gdf)
def _match_st(row):
bool_ind = (((target_df['3395_x'] - row['3395_x'])**2 + (target_df['3395_y'] - row['3395_y'])**2)**(1/2))<self.buffer
return target_df[bool_ind]['unique_id'].values.tolist()
source_df['sjoin_ids'] = source_df.apply(lambda row: _match_st(row), axis=1)
logger.info('Non-matched df:')
print (source_df[source_df['sjoin_ids'].str.len()<1])
for ii_r,row in enumerate(source_df.iterrows()):
if ii_r %100==0:
print ('#', end='')
G.add_edges_from([(row[1]['unique_id'],ii) for ii in row[1]['sjoin_ids']])
logger.info (f'n nodes {len(G.nodes)}')
logger.info(f'e dges {len(G.edges)}')
self.G = G
def run_main(self,region, dist_buffer=10000, alpha=0.15, mipgap=0.0001):
self.buffer=dist_buffer
self.alpha=alpha
self.mipgap=mipgap
self.region=region
if region: # must be list(iso-3166-1), for now.
self.source_gdf = self.ini_source_gdf[self.ini_source_gdf['iso-3166-1'].isin(region)]
self.target_gdf = self.ini_target_gdf[self.ini_target_gdf['iso-3166-1'].isin(region)]
self.outpath = os.path.join(os.getcwd(),'data','_'.join(['match',self.match,'-'.join(self.region),str(self.buffer),str(self.alpha)]))
self.get_components()
logger.info (f'n connect components: {len([_ for _ in nx.connected_components(self.G)])}')
self.source_gdf['match_id'] = ''
for ii_c, cc in enumerate(nx.connected_components(self.G)):
print (f'Running component: {ii_c}, len: {len(cc)}', end=' ')
B, E_z, E_mw = self.run_component(cc)
matches = 0
for kk, vv in B.items():
for kk2, vv2 in vv.items():
if vv2.value()>0:
matches+=1
self.source_gdf.loc[kk,'match_id']=kk2
print (f'found matches: {matches}, time: {time.time()-tic}')
self.source_gdf.to_file(self.outpath+'.gpkg', driver="GPKG")
def visualise(self, bounds = None):
logger.info('Visualising...')
fig, ax = plt.subplots(1,1,figsize=(72,72))
self.ne[self.ne['ISO_A2'].isin(self.region)].boundary.plot(ax=ax, color='grey')
# plot the scatter -> how to adjust sizes?
self.source_gdf.plot(ax=ax, marker='o',color='g',markersize=self.source_gdf['capacity_mw'])
self.target_gdf.plot(ax=ax, marker='o',color='r',markersize=self.target_gdf['capacity_mw'])
# plot the links
link_df = pd.DataFrame(self.source_gdf)
link_df = link_df[link_df['match_id']!='']
link_df = link_df.merge( | pd.DataFrame(self.target_gdf[['unique_id','geometry']]) | pandas.DataFrame |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
dense = arr.to_dense()
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype('float32'))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0., 2.],
dtype=dtype.subtype),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0, 2], dtype=np.int64),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with pytest.raises(ValueError, match='NA'):
arr.astype('Sparse[i8]')
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True],
dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray([True, False, False, True],
dtype=SparseDtype(bool, False))
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.to_dense()),
vals.astype(typ))
@pytest.mark.parametrize('array, dtype, expected', [
(SparseArray([0, 1]), 'float',
SparseArray([0., 1.], dtype=SparseDtype(float, 0.0))),
(SparseArray([0, 1]), bool, SparseArray([False, True])),
(SparseArray([0, 1], fill_value=1), bool,
SparseArray([False, True], dtype=SparseDtype(bool, True))),
pytest.param(
SparseArray([0, 1]), 'datetime64[ns]',
SparseArray(np.array([0, 1], dtype='datetime64[ns]'),
dtype=SparseDtype('datetime64[ns]',
pd.Timestamp('1970'))),
marks=[pytest.mark.xfail(reason="NumPy-7619")],
),
(SparseArray([0, 1, 10]), str,
SparseArray(['0', '1', '10'], dtype=SparseDtype(str, '0'))),
(SparseArray(['10', '20']), float, SparseArray([10.0, 20.0])),
(SparseArray([0, 1, 0]), object,
SparseArray([0, 1, 0], dtype=SparseDtype(object, 0))),
])
def test_astype_more(self, array, dtype, expected):
result = array.astype(dtype)
tm.assert_sp_array_equal(result, expected)
def test_astype_nan_raises(self):
arr = SparseArray([1.0, np.nan])
with pytest.raises(ValueError, match='Cannot convert non-finite'):
arr.astype(int)
def test_set_fill_value(self):
arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# XXX: this seems fine? You can construct an integer
# sparsearray with NaN fill value, why not update one?
# coerces to int
# msg = "unable to set fill_value 3\\.1 to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 3.1
assert arr.fill_value == 3.1
# msg = "unable to set fill_value nan to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
# msg = "unable to set fill_value 0 to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 0
assert arr.fill_value == 0
# msg = "unable to set fill_value nan to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
@pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])
def test_set_fill_invalid_non_scalar(self, val):
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
msg = "fill_value must be a scalar"
with pytest.raises(ValueError, match=msg):
arr.fill_value = val
def test_copy(self):
arr2 = self.arr.copy()
assert arr2.sp_values is not self.arr.sp_values
assert arr2.sp_index is self.arr.sp_index
def test_values_asarray(self):
assert_almost_equal(self.arr.to_dense(), self.arr_data)
@pytest.mark.parametrize('data,shape,dtype', [
([0, 0, 0, 0, 0], (5,), None),
([], (0,), None),
([0], (1,), None),
(['A', 'A', np.nan, 'B'], (4,), np.object)
])
def test_shape(self, data, shape, dtype):
# GH 21126
out = SparseArray(data, dtype=dtype)
assert out.shape == shape
@pytest.mark.parametrize("vals", [
[np.nan, np.nan, np.nan, np.nan, np.nan],
[1, np.nan, np.nan, 3, np.nan],
[1, np.nan, 0, 3, 0],
])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_dense_repr(self, vals, fill_value):
vals = np.array(vals)
arr = SparseArray(vals, fill_value=fill_value)
res = arr.to_dense()
tm.assert_numpy_array_equal(res, vals)
with tm.assert_produces_warning(FutureWarning):
res2 = arr.get_values()
tm.assert_numpy_array_equal(res2, vals)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.to_dense()[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.to_dense()[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.to_dense()[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.to_dense()[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ], fill_value=0)
tm.assert_sp_array_equal(res, exp)
with pytest.raises(IndexError):
sparse[4:, :]
with pytest.raises(IndexError):
# check numpy compat
dense[4:, :]
def test_boolean_slice_empty(self):
arr = pd.SparseArray([0, 1, 2])
res = arr[[False, False, False]]
assert res.dtype == arr.dtype
@pytest.mark.parametrize("op", ["add", "sub", "mul",
"truediv", "floordiv", "pow"])
def test_binary_operators(self, op):
op = getattr(operator, op)
data1 = np.random.randn(20)
data2 = np.random.randn(20)
data1[::2] = np.nan
data2[::3] = np.nan
arr1 = SparseArray(data1)
arr2 = SparseArray(data2)
data1[::2] = 3
data2[::3] = 3
farr1 = SparseArray(data1, fill_value=3)
farr2 = SparseArray(data2, fill_value=3)
def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(op(first.to_dense(), second.to_dense()),
fill_value=first.fill_value)
assert isinstance(res, SparseArray)
assert_almost_equal(res.to_dense(), exp.to_dense())
res2 = op(first, second.to_dense())
assert isinstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.to_dense(), second)
assert isinstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
assert isinstance(res4, SparseArray)
# Ignore this if the actual op raises (e.g. pow).
try:
exp = op(first.to_dense(), 4)
exp_fv = op(first.fill_value, 4)
except ValueError:
pass
else:
assert_almost_equal(res4.fill_value, exp_fv)
assert_almost_equal(res4.to_dense(), exp)
with np.errstate(all="ignore"):
for first_arr, second_arr in [(arr1, arr2), (farr1, farr2)]:
_check_op(op, first_arr, second_arr)
def test_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
tm.assert_sp_array_equal(unpickled, obj)
_check_roundtrip(self.arr)
_check_roundtrip(self.zarr)
def test_generator_warnings(self):
sp_arr = SparseArray([1, 2, 3])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(action='always',
category=DeprecationWarning)
warnings.filterwarnings(action='always',
category=PendingDeprecationWarning)
for _ in sp_arr:
pass
assert len(w) == 0
def test_fillna(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0])
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan])
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)
| tm.assert_sp_array_equal(res, exp) | pandas.util.testing.assert_sp_array_equal |
# -*- coding: utf-8 -*-
# Name: pypandas
# Version: 0.1a3
# Owner: <NAME>
import pandas as pd
def employees_num(engine):
"""Get number of employees"""
df = pd.read_sql_table('employees', con=engine)
num = df.employee_id.count()
return num
def sales_num_per_category(engine):
"""Get sales per category"""
df1 = pd.read_sql_table('order_details', con=engine)
df1 = df1[['order_id', 'product_id']]
df2 = pd.read_sql_table('products', con=engine)
df2 = df2[['product_id', 'category_id']]
df3 = | pd.merge(df1, df2, on='product_id', how='outer') | pandas.merge |
import pandas as pd
from fairlens.metrics.correlation import distance_cn_correlation, distance_nn_correlation
from fairlens.sensitive.correlation import find_column_correlation, find_sensitive_correlations
pair_race = "race", "Ethnicity"
pair_age = "age", "Age"
pair_marital = "marital", "Family Status"
pair_gender = "gender", "Gender"
pair_nationality = "nationality", "Nationality"
def test_correlation():
col_names = ["gender", "random", "score"]
data = [
["male", 10, 60],
["female", 10, 80],
["male", 10, 60],
["female", 10, 80],
["male", 9, 59],
["female", 11, 80],
["male", 12, 61],
["female", 10, 83],
]
df = pd.DataFrame(data, columns=col_names)
res = {"score": [pair_gender]}
assert find_sensitive_correlations(df) == res
def test_double_correlation():
col_names = ["gender", "nationality", "random", "corr1", "corr2"]
data = [
["woman", "spanish", 715, 10, 20],
["man", "spanish", 1008, 20, 20],
["man", "french", 932, 20, 10],
["woman", "french", 1300, 10, 10],
]
df = pd.DataFrame(data, columns=col_names)
res = {"corr1": [pair_gender], "corr2": [pair_nationality]}
assert find_sensitive_correlations(df) == res
def test_multiple_correlation():
col_names = ["race", "age", "score", "entries", "marital", "credit", "corr1"]
data = [
["arabian", 21, 10, 2000, "married", 10, 60],
["carribean", 20, 10, 3000, "single", 10, 90],
["indo-european", 41, 10, 1900, "widowed", 10, 120],
["carribean", 40, 10, 2000, "single", 10, 90],
["indo-european", 42, 10, 2500, "widowed", 10, 120],
["arabian", 19, 10, 2200, "married", 10, 60],
]
df = pd.DataFrame(data, columns=col_names)
res = {"corr1": [pair_race, pair_marital]}
assert find_sensitive_correlations(df, corr_cutoff=0.9) == res
def test_common_correlation():
col_names = ["race", "age", "score", "entries", "marital", "credit", "corr1", "corr2"]
data = [
["arabian", 21, 10, 2000, "married", 10, 60, 120],
["carribean", 20, 10, 3000, "single", 10, 90, 130],
["indo-european", 41, 10, 1900, "widowed", 10, 120, 210],
["carribean", 40, 10, 2000, "single", 10, 90, 220],
["indo-european", 42, 10, 2500, "widowed", 10, 120, 200],
["arabian", 19, 10, 2200, "married", 10, 60, 115],
]
df = | pd.DataFrame(data, columns=col_names) | pandas.DataFrame |
import pandas as pd
from importlib import reload # allows reloading of modules
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import ipywidgets as widgets
from IPython.display import display, clear_output
from importlib import reload
import asyncio
import pickle
import pmagpy.pmag as pmag
import pmagpy.ipmag as ipmag
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from pmagpy import contribution_builder as cb
import scipy as scipy
import pickle
from sklearn.decomposition import PCA
from scipy.optimize import curve_fit
#The sortarai function from pmagpy, this will soon be modified so that additivity checks work.
model_circle_fast=pickle.load(open('model_circle_fast.pkl','rb'))
model_circle_slow=pickle.load(open('model_circle_slow.pkl','rb'))
def get_mad(IZZI):
"""Calculates the free Maximum Angle of Deviation (MAD) of Kirshvink et al (1980)"""
pca=PCA(n_components=3)
fit=pca.fit(IZZI.loc[:,'NRM_x':'NRM_z'].values).explained_variance_
MAD=np.degrees(np.arctan(np.sqrt((fit[2]+fit[1])/(fit[0]))))
return MAD
def TaubinSVD(x,y):
"""
Function from PmagPy
algebraic circle fit
input: list [[x_1, y_1], [x_2, y_2], ....]
output: a, b, r. a and b are the center of the fitting circle, and r is the radius
Algebraic circle fit by Taubin
<NAME>, "Estimation Of Planar Curves, Surfaces And Nonplanar
Space Curves Defined By Implicit Equations, With
Applications To Edge And Range Image Segmentation",
IEEE Trans. PAMI, Vol. 13, pages 1115-1138, (1991)
"""
X = np.array(list(map(float, x)))
Xprime=X
Y = np.array(list(map(float, y)))
Yprime=Y
XY = np.array(list(zip(X, Y)))
XY = np.array(XY)
X = XY[:,0] - np.mean(XY[:,0]) # norming points by x avg
Y = XY[:,1] - np.mean(XY[:,1]) # norming points by y avg
centroid = [np.mean(XY[:,0]), np.mean(XY[:,1])]
Z = X * X + Y * Y
Zmean = np.mean(Z)
Z0 = (Z - Zmean)/(2. * np.sqrt(Zmean))
ZXY = np.array([Z0, X, Y]).T
U, S, V = np.linalg.svd(ZXY, full_matrices=False) #
V = V.transpose()
A = V[:,2]
A[0] = A[0]/(2. * np.sqrt(Zmean))
A = np.concatenate([A, [(-1. * Zmean * A[0])]], axis=0)
a, b = (-1 * A[1:3]) / A[0] / 2 + centroid
r = np.sqrt(A[1]*A[1]+A[2]*A[2]-4*A[0]*A[3])/abs(A[0])/2
errors=[]
for i in list(range(0,len(Xprime)-1)):
errors.append((np.sqrt((Xprime[i]-a)**2+(Yprime[i]-b)**2)-r)**2)
sigma=np.sqrt((sum(errors))/(len(Xprime)-1))
return a,b,r,sigma
def get_drat(IZZI,IZZI_trunc,P):
"""Calculates the difference ratio (DRAT) of pTRM checks
(Selkin and Tauxe, 2000) to check for alteration"""
IZZI_reduced=IZZI[IZZI.temp_step.isin(P.temp_step)]
a=np.sum((IZZI_trunc.PTRM-np.mean(IZZI_trunc.PTRM))*(IZZI_trunc.NRM-np.mean(IZZI_trunc.NRM)))
b=a/np.abs(a)*np.sqrt(np.sum((IZZI_trunc.NRM-np.mean(IZZI_trunc.NRM))**2)/np.sum((IZZI_trunc.PTRM-np.mean(IZZI_trunc.PTRM))**2))
yint=np.mean(IZZI_trunc.NRM)-b*np.mean(IZZI_trunc.PTRM)
line={'slope':b,'intercept':yint}
xprime=0.5*(IZZI_trunc.PTRM+(IZZI_trunc.NRM-line['intercept'])/line['slope'])
yprime=0.5*(IZZI_trunc.NRM+line['slope']*IZZI_trunc.PTRM+line['intercept'])
scalefactor=np.sqrt((min(xprime)-max(xprime))**2+(min(yprime)-max(yprime))**2)
absdiff=max(np.abs(P.PTRM.values-IZZI_reduced.PTRM.values)/scalefactor)*100
return(absdiff)
def calculate_anisotropy_correction(IZZI):
"""Calculates anisotropy correction factor for a
paleointensity interpretation, given an s tensor"""
#Convert the s tensor into a numpy array
strlist=IZZI['s_tensor'].iloc[0].split(':')
slist=[]
for stringo in strlist:
slist.append(float(stringo.strip()))
stensor=np.array([[slist[0],slist[3],slist[5]],[slist[3],slist[1],slist[4]],[slist[5],slist[4],slist[2]]])
#Fit a PCA to the IZZI directions
NRM_trunc_dirs=IZZI.loc[:,'NRM_x':'NRM_z']
pca=PCA(n_components=3)
pca=pca.fit(NRM_trunc_dirs)
#Calculate the anisotropy correction factor (see Standard Paleointensity Definitions)
vector=pca.components_[0]
vector=vector/np.sqrt(np.sum(vector**2))
ancvector=np.matmul(np.linalg.inv(stensor),vector)
ancvector=ancvector/np.sqrt(np.sum(ancvector**2))
labmag=np.matmul(stensor,np.array([0,0,-1]))
ancmag=np.matmul(stensor,ancvector)
c=np.sqrt(np.sum(labmag**2))/np.sqrt(np.sum(ancmag**2))
return(c)
def calculate_NLT_correction(IZZI,c):
"""Calculates the correction for non linear TRM for a paleointensity interpretation, given the anisotropy and cooling rate corrections"""
a=np.sum((IZZI.PTRM-np.mean(IZZI.PTRM))*(IZZI.NRM-np.mean(IZZI.NRM)))
b=a/np.abs(a)*np.sqrt(np.sum((IZZI.NRM-np.mean(IZZI.NRM))**2)/np.sum((IZZI.PTRM-np.mean(IZZI.PTRM))**2))
beta=IZZI['NLT_beta'].iloc[0]
correction=c*IZZI.correction.iloc[0]
B_lab=IZZI.B_lab.iloc[0]*1e6
total_correction=(np.arctanh(correction*np.abs(b)*np.tanh(beta*B_lab)))/(np.abs(b)*beta*B_lab)
return(total_correction)
def prep_data_for_fitting(IZZI_filtered,IZZI_original):
"""Returns the needed data for a paleointensity interpretation to perform the BiCEP method (Cych et al, in prep.), calculates all corrections for a specimen"""
specimen=IZZI_original.specimen.iloc[0] #Specimen name
methcodes='' #String For Method Codes
extracolumnsdict={} #Extra Column Information for MagIC export (corrections)
#Calculate Anisotropy Correction:
if len(IZZI_original.dropna(subset=['s_tensor']))>0:
c=calculate_anisotropy_correction(IZZI_filtered)
extracolumnsdict['int_corr_aniso']=c
#Get method code depending on anisotropy type (AARM or ATRM)
methcodes+=IZZI_original['aniso_type'].iloc[0]
else:
c=1
#Get Cooling Rate Correction
if IZZI_original.correction.iloc[0]!=1:
methcodes+=':LP-CR-TRM' #method code for cooling rate correction
extracolumnsdict['int_corr_cooling_rate']=IZZI_original.correction.iloc[0]
#Calculate nonlinear TRM Correction
if len(IZZI_original.dropna(subset=['NLT_beta']))>0:
methcodes+=':DA-NL' #method code for nonlinear TRM correction
total_correction=calculate_NLT_correction(IZZI_filtered,c) #total correction (combination of all three corrections)
extracolumnsdict['int_corr_nlt']=total_correction/(c*IZZI_original.correction.iloc[0]) #NLT correction is total correction/original correction.
else:
total_correction=c*IZZI_original.correction.iloc[0]
#Converting Arai plot data to useable form
NRM0=IZZI_original.NRM.iloc[0]
NRMS=IZZI_filtered.NRM.values/NRM0
PTRMS=IZZI_filtered.PTRM.values/NRM0/total_correction #We divide our pTRMs by the total correction, because we scale the pTRM values so that the maximum pTRM is one, this doesn't affect the fit and just gets scaled back when converting the circle tangent slopes back to intensities as would be expected, but it's easier to apply this here.
PTRMmax=max(IZZI_original.PTRM/NRM0/total_correction) #We scale by our maximum pTRM to perform the circle fit.
line=bestfit_line(IZZI_original.PTRM/NRM0/total_correction,IZZI_original.NRM/NRM0) #best fitting line to the pTRMs
scale=np.sqrt((line['intercept']/line['slope'])**2+(line['intercept'])**2) #Flag- is this ever used?
PTRMS=PTRMS/PTRMmax #Scales the pTRMs so the maximum pTRM is one
#We subtract the minimum pTRM and NRM to maintain aspect ratio and make circle fitting easier.
minPTRM=min(PTRMS)
minNRM=min(NRMS)
PTRMS=PTRMS-minPTRM
NRMS=NRMS-minNRM
#We perform the Taubin least squares circle fit to get values close to the Bayesian maximum likelihood to initialize our MCMC sampler at, this makes sampling a lot easier than initializing at a random point (which may have infinitely low probability).
x_c,y_c,R,sigma=TaubinSVD(PTRMS,NRMS) #Calculate x_c,y_c and R
dist_to_edge=abs(np.sqrt(x_c**2+y_c**2)-R) #Calculate D (dist_to_edge)
phi=np.radians(np.degrees(np.arctan(y_c/x_c))%180)
#Calculate (and ensure the sign of) k
if y_c<0:
k=-1/R
else:
k=1/R
B_lab=IZZI_filtered.B_lab.unique()[0]*1e6
return(scale,minPTRM,minNRM,PTRMmax,k,phi,dist_to_edge,sigma,PTRMS,NRMS,B_lab,methcodes,extracolumnsdict)
def BiCEP_fit(specimenlist,temperatures=None,n_samples=30000,priorstd=20,model=None,**kwargs):
minPTRMs=[]
minNRMs=[]
IZZI_list=[]
B_lab_list=[]
klist=[]
NRM0s=[]
pTRMsList=np.array([])
NRMsList=np.array([])
lengths=[]
philist=[]
dist_to_edgelist=[]
B_ancs=[]
dmaxlist=[]
PTRMmaxlist=[]
centroidlist=[]
spec_old=''
newmethcodes={}
newcolumns={}
i=0
for specimen in specimenlist:
if spec_old==specimen:
i+=1
else:
i=0
spec_old=specimen
IZZI_original=temps[(temps.specimen==specimen)&((temps.steptype=='IZ')|(temps.steptype=="ZI"))]
if temperatures==None:
IZZI_filtered=IZZI_original
else:
IZZI_filtered=IZZI_original[(IZZI_original.temp_step>=temperatures[specimen][i,0])&(IZZI_original.temp_step<=temperatures[specimen][i,1])]
scale,minPTRM,minNRM,PTRMmax,k,phi,dist_to_edge,sigma,PTRMS,NRMS,B_lab,methcodestr,extracolumnsdict=prep_data_for_fitting(IZZI_filtered,IZZI_original)
newcolumns[specimen]=extracolumnsdict
newmethcodes[specimen]=methcodestr
if len(IZZI_filtered)<=3:
print('Specimen Rejected- Too Few Points to make an interpretation')
NRM0=IZZI_filtered.NRM.iloc[0]
minPTRMs.append(minPTRM)
minNRMs.append(minNRM)
line=bestfit_line(IZZI_filtered.PTRM,IZZI_filtered.NRM)
B_anc=-line['slope']*B_lab*IZZI_filtered.correction.iloc[0]
B_ancs.append(B_anc)
Pi,Pj=np.meshgrid(PTRMS,PTRMS)
Ni,Nj=np.meshgrid(NRMS,NRMS)
dmax=np.amax(np.sqrt((Pi-Pj)**2+(Ni-Nj)**2))
centroid=np.sqrt(np.mean(PTRMS)**2+np.mean(NRMS)**2)
IZZI_list.append(IZZI_filtered)
B_lab_list.append(B_lab)
klist.append(k)
philist.append(phi)
dist_to_edgelist.append(dist_to_edge)
NRM0s.append(NRM0)
pTRMsList=np.append(pTRMsList,PTRMS)
NRMsList=np.append(NRMsList,NRMS)
lengths.append(int(len(PTRMS)))
dmaxlist.append(dmax)
PTRMmaxlist.append(PTRMmax)
centroidlist.append(centroid)
if model==None:
if len(specimenlist)<7:
model_circle=model_circle_slow
else:
model_circle=model_circle_fast
else:
model_circle=model
fit_circle=model_circle.sampling (
data={'I':len(pTRMsList),'M':len(lengths),'PTRM':pTRMsList,'NRM':NRMsList,'N':lengths,'PTRMmax':PTRMmaxlist,'B_labs':B_lab_list,'dmax':np.sqrt(dmaxlist),'centroid':centroidlist,'priorstd':priorstd},iter=n_samples,warmup=int(n_samples/2),
init=[{'k_scale':np.array(klist)*np.array(dist_to_edgelist),'phi':philist,'dist_to_edge':dist_to_edgelist,'int_real':B_ancs}]*4,**kwargs)
return(fit_circle,newmethcodes,newcolumns)
def sufficient_statistics(ptrm, nrm):
"""
inputs list of ptrm and nrm data and computes sufficent statistcs needed
for computations
"""
corr = np.cov( np.stack((ptrm, nrm), axis=0) )
return {'xbar': np.mean(ptrm), 'ybar': np.mean(nrm), 'S2xx': corr[0,0], 'S2yy': corr[1,1], 'S2xy': corr[0,1] }
def bestfit_line(ptrm, nrm):
"""
returns the slope and intercept of the best fit line to a set of points with NRM and PTRM using Bayesian maximum likelihood estimate
"""
stat = sufficient_statistics(ptrm, nrm)
w = .5*(stat['S2xx'] - stat['S2yy'])/stat['S2xy']
m = -w-np.sqrt(w**2+1)
b = stat['ybar']-m*stat['xbar']
return {'slope': m, 'intercept': b }
def sortarai(datablock, s, Zdiff, **kwargs):
"""
sorts data block in to first_Z, first_I, etc.
Parameters
_________
datablock : Pandas DataFrame with Thellier-Tellier type data
s : specimen name
Zdiff : if True, take difference in Z values instead of vector difference
NB: this should always be False
**kwargs :
version : data model. if not 3, assume data model = 2.5
Returns
_______
araiblock : [first_Z, first_I, ptrm_check,
ptrm_tail, zptrm_check, GammaChecks]
field : lab field (in tesla)
"""
if 'version' in list(kwargs.keys()) and kwargs['version'] == 3:
dec_key, inc_key, csd_key = 'dir_dec', 'dir_inc', 'dir_csd'
Mkeys = ['magn_moment', 'magn_volume', 'magn_mass', 'magnitude','dir_csd']
meth_key = 'method_codes'
temp_key, dc_key = 'treat_temp', 'treat_dc_field'
dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi'
# convert dataframe to list of dictionaries
datablock = datablock.to_dict('records')
else:
dec_key, inc_key, csd_key = 'measurement_dec', 'measurement_inc','measurement_csd'
Mkeys = ['measurement_magn_moment', 'measurement_magn_volume',
'measurement_magn_mass', 'measurement_magnitude']
meth_key = 'magic_method_codes'
temp_key, dc_key = 'treatment_temp', 'treatment_dc_field'
dc_theta_key, dc_phi_key = 'treatment_dc_field_theta', 'treatment_dc_field_phi'
first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], []
field, phi, theta = "", "", ""
starthere = 0
Treat_I, Treat_Z, Treat_PZ, Treat_PI, Treat_M = [], [], [], [], []
ISteps, ZSteps, PISteps, PZSteps, MSteps = [], [], [], [], []
GammaChecks = [] # comparison of pTRM direction acquired and lab field
rec = datablock[0]
for key in Mkeys:
if key in list(rec.keys()) and rec[key] != "":
momkey = key
break
# first find all the steps
for k in range(len(datablock)):
rec = datablock[k]
temp = float(rec[temp_key])
methcodes = []
tmp = rec[meth_key].split(":")
for meth in tmp:
methcodes.append(meth.strip())
if 'LT-T-I' in methcodes and 'LP-TRM' not in methcodes and 'LP-PI-TRM' in methcodes:
Treat_I.append(temp)
ISteps.append(k)
if field == "":
field = float(rec[dc_key])
if phi == "":
phi = float(rec[dc_phi_key])
theta = float(rec[dc_theta_key])
# stick first zero field stuff into first_Z
if 'LT-NO' in methcodes:
Treat_Z.append(temp)
ZSteps.append(k)
if 'LT-T-Z' in methcodes:
Treat_Z.append(temp)
ZSteps.append(k)
if 'LT-PTRM-Z' in methcodes:
Treat_PZ.append(temp)
PZSteps.append(k)
if 'LT-PTRM-I' in methcodes:
Treat_PI.append(temp)
PISteps.append(k)
if 'LT-PTRM-MD' in methcodes:
Treat_M.append(temp)
MSteps.append(k)
if 'LT-NO' in methcodes:
dec = float(rec[dec_key])
inc = float(rec[inc_key])
str = float(rec[momkey])
if csd_key not in rec.keys():
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
elif rec[csd_key]!=None:
sig = np.radians(float(rec[csd_key]))*np.sqrt(3)/np.sqrt(2)*str
else:
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
first_I.append([273, 0., 0., 0., 0., 1])
first_Z.append([273, dec, inc, str, sig, 1]) # NRM step
for temp in Treat_I: # look through infield steps and find matching Z step
if temp in Treat_Z: # found a match
istep = ISteps[Treat_I.index(temp)]
irec = datablock[istep]
methcodes = []
tmp = irec[meth_key].split(":")
for meth in tmp:
methcodes.append(meth.strip())
# take last record as baseline to subtract
brec = datablock[istep - 1]
zstep = ZSteps[Treat_Z.index(temp)]
zrec = datablock[zstep]
# sort out first_Z records
if "LP-PI-TRM-IZ" in methcodes:
ZI = 0
else:
ZI = 1
dec = float(zrec[dec_key])
inc = float(zrec[inc_key])
str = float(zrec[momkey])
if csd_key not in rec.keys():
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
elif rec[csd_key]!=None:
sig = np.radians(float(rec[csd_key]))*np.sqrt(3)/np.sqrt(2)*str
else:
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
first_Z.append([temp, dec, inc, str, sig, ZI])
# sort out first_I records
idec = float(irec[dec_key])
iinc = float(irec[inc_key])
istr = float(irec[momkey])
X = pmag.dir2cart([idec, iinc, istr])
BL = pmag.dir2cart([dec, inc, str])
I = []
for c in range(3):
I.append((X[c] - BL[c]))
if I[2] != 0:
iDir = pmag.cart2dir(I)
if csd_key not in rec.keys():
isig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
elif rec[csd_key]!=None:
isig = np.radians(float(rec[csd_key]))*np.sqrt(3)/np.sqrt(2)*istr
else:
isig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*istr
isig = np.sqrt(isig**2+sig**2)
if Zdiff == 0:
first_I.append([temp, iDir[0], iDir[1], iDir[2], isig, ZI])
else:
first_I.append([temp, 0., 0., I[2], 0., isig, ZI])
gamma = pmag.angle([iDir[0], iDir[1]], [phi, theta])
else:
first_I.append([temp, 0., 0., 0., 0., ZI])
gamma = 0.0
# put in Gamma check (infield trm versus lab field)
if 180. - gamma < gamma:
gamma = 180. - gamma
GammaChecks.append([temp - 273., gamma])
for temp in Treat_PI: # look through infield steps and find matching Z step
step = PISteps[Treat_PI.index(temp)]
rec = datablock[step]
dec = float(rec[dec_key])
inc = float(rec[inc_key])
str = float(rec[momkey])
brec = datablock[step - 1] # take last record as baseline to subtract
pdec = float(brec[dec_key])
pinc = float(brec[inc_key])
pint = float(brec[momkey])
X = pmag.dir2cart([dec, inc, str])
prevX = pmag.dir2cart([pdec, pinc, pint])
I = []
for c in range(3):
I.append(X[c] - prevX[c])
dir1 = pmag.cart2dir(I)
if csd_key not in rec.keys():
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
psig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*dir1[2]
elif rec[csd_key]!=None:
sig = np.radians(float(rec[csd_key]))*np.sqrt(3)/np.sqrt(2)*str
psig=np.radians(float(brec[csd_key]))*np.sqrt(3)/np.sqrt(2)*dir1[2]
else:
sig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
psig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*dir1[2]
psig=np.sqrt(sig**2+psig**2)
if Zdiff == 0:
ptrm_check.append([temp, dir1[0], dir1[1], dir1[2], sig])
else:
ptrm_check.append([temp, 0., 0., I[2]], sig)
# in case there are zero-field pTRM checks (not the SIO way)
for temp in Treat_PZ:
step = PZSteps[Treat_PZ.index(temp)]
rec = datablock[step]
dec = float(rec[dec_key])
inc = float(rec[inc_key])
str = float(rec[momkey])
brec = datablock[step - 1]
pdec = float(brec[dec_key])
pinc = float(brec[inc_key])
pint = float(brec[momkey])
X = pmag.dir2cart([dec, inc, str])
prevX = pmag.dir2cart([pdec, pinc, pint])
I = []
for c in range(3):
I.append(X[c] - prevX[c])
dir2 = pmag.cart2dir(I)
if csd_key not in rec.keys():
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
psig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*dir1[2]
elif rec[csd_key]!=None:
sig = np.radians(float(rec[csd_key]))*np.sqrt(3)/np.sqrt(2)*str
psig= np.radians(float(brec[csd_key]))*np.sqrt(3)/np.sqrt(2)*dir2[2]
else:
sig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
psig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*dir1[2]
psig=np.sqrt(sig**2+psig**2)
zptrm_check.append([temp, dir2[0], dir2[1], dir2[2],psig])
# get pTRM tail checks together -
for temp in Treat_M:
# tail check step - just do a difference in magnitude!
step = MSteps[Treat_M.index(temp)]
rec = datablock[step]
dec = float(rec[dec_key])
inc = float(rec[inc_key])
str = float(rec[momkey])
if csd_key not in rec.keys():
sig= np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
elif rec[csd_key]!=None:
sig = np.radians(float(rec[csd_key]))*np.sqrt(3)/np.sqrt(2)*str
else:
sig = np.radians(2)*np.sqrt(3)/np.sqrt(2)*str
if temp in Treat_Z:
step = ZSteps[Treat_Z.index(temp)]
brec = datablock[step]
pint = float(brec[momkey])
# X=dir2cart([dec,inc,str])
# prevX=dir2cart([pdec,pinc,pint])
# I=[]
# for c in range(3):I.append(X[c]-prevX[c])
# d=cart2dir(I)
# ptrm_tail.append([temp,d[0],d[1],d[2]])
# difference - if negative, negative tail!
ptrm_tail.append([temp, dec, inc, str, sig])
else:
print(
s, ' has a tail check with no first zero field step - check input file! for step', temp - 273.)
#
# final check
#
if len(first_Z) != len(first_I):
print(len(first_Z), len(first_I))
print(" Something wrong with this specimen! Better fix it or delete it ")
input(" press return to acknowledge message")
araiblock = (first_Z, first_I, ptrm_check,
ptrm_tail, zptrm_check, GammaChecks)
return araiblock, field
def NLTsolver(fields,a,b):
"""Makes the non linear TRM correction"""
return(a* np.tanh(b*fields))
def convert_intensity_measurements(measurements):
"""Converts a measurements table with only intensity experiments into the internal data format used by the BiCEP method"""
specimens=list(measurements.specimen.unique())#This function constructs the 'temps' dataframe (used to plot Arai plots)
#this may take a while to run depending on the number of specimens.
#Constructs initial empty 'temps' dataframe
data_array=np.empty(shape=(16,0))
for specimen in specimens:
print('Working on:',specimen)
araiblock,field=sortarai(measurements[measurements.specimen==specimen],specimen, Zdiff=False,version=3) #Get arai data
sitename=measurements[measurements.specimen==specimen].site.unique()
first_Z,first_I,ptrm_check,ptrm_tail,zptrm_check,GammaChecks=araiblock #Split NRM and PTRM values into step types
B_lab=np.full(len(first_Z),field) #Lab field used
m=len(first_Z)
NRM_dec_max=first_Z[m-1][1]
NRM_inc_max=first_Z[m-1][2]
NRM_int_max=first_Z[m-1][3]
PTRM_dec_max=first_I[m-1][1]
PTRM_inc_max=first_I[m-1][2]
PTRM_int_max=first_I[m-1][3]
NRM_vector_max=pmag.dir2cart([NRM_dec_max,NRM_inc_max,NRM_int_max])
PTRM_vector_max=pmag.dir2cart([PTRM_dec_max,PTRM_inc_max,PTRM_int_max])
PTRM_vector_max=PTRM_vector_max-NRM_vector_max
NRMS=[first_Z[i][3] for i in list(range(len(first_Z)))]
first_Z=np.array(first_Z)
first_I=np.array(first_I)
if min(NRMS)/NRMS[0]<0.25:
if(len(first_Z))>1:
sample=np.full(len(first_Z),measurements[measurements.specimen==specimen]['sample'].unique()[0]) #Get sample name
site=np.full(len(first_Z),measurements[measurements.specimen==specimen].site.unique()[0]) #Get site name
specarray=np.full(len(first_Z),specimen)
temp_step=first_Z[:,0] #Gets the temperature in kelvin we use
NRM=first_Z[:,3] #NRM value (in first_Z dataframe)
zbinary=first_Z[:,5]#Is it a ZI or an IZ step?
zbinary=zbinary.astype('object')
zbinary[zbinary==1]='ZI'
zbinary[zbinary==0]='IZ'
steptype=zbinary
PTRM=first_I[:,3] #PTRM value (in first_I dataframe)
PTRM_sigma=first_I[:,4]
NRM_dec=first_Z[:,1]
NRM_inc=first_Z[:,2]
NRM_int=NRM
NRM_sigma=first_Z[:,4]
NRM_vector=pmag.dir2cart(np.array([NRM_dec,NRM_inc,NRM_int]).T)
PTRM_vector=pmag.dir2cart(np.array([first_I[:,1],first_I[:,2],first_I[:,3]]).T)
NRM_x=NRM_vector[:,0]
NRM_y=NRM_vector[:,1]
NRM_z=NRM_vector[:,2]
PTRM_x=PTRM_vector[:,0]
PTRM_y=PTRM_vector[:,1]
PTRM_z=PTRM_vector[:,2]
newarray=np.array([specarray,sample,site,NRM,PTRM,NRM_x,NRM_y,NRM_z,PTRM_x,PTRM_y,PTRM_z,NRM_sigma,PTRM_sigma,B_lab,steptype,temp_step])
data_array=np.concatenate((data_array,newarray),axis=1)
#Doing PTRM Checks Part
ptrm_check=np.array(ptrm_check)
temp_step=ptrm_check[:,0]
smallarray=data_array
sample=np.full(len(ptrm_check),measurements[measurements.specimen==specimen]['sample'].unique()[0]) #Get sample name
site=np.full(len(ptrm_check),measurements[measurements.specimen==specimen].site.unique()[0]) #Get site name
specarray=np.full(len(ptrm_check),specimen)
B_lab=np.full(len(ptrm_check),field)
PTRM=ptrm_check[:,3]
PTRM_sigma=ptrm_check[:,4]
intersect=data_array[:,(data_array[0]==specimen)&(np.in1d(data_array[-1].astype('float'),temp_step.astype('float')))]
NRM_vector=np.array([intersect[5],intersect[6],intersect[7]])
NRM_sigma=intersect[11]
PTRM_vector=pmag.dir2cart(np.array([ptrm_check[:,1],ptrm_check[:,2],ptrm_check[:,3]]).T)
NRM_x=NRM_vector[0]
NRM_y=NRM_vector[1]
NRM_z=NRM_vector[2]
PTRM_x=PTRM_vector[:,0]
PTRM_y=PTRM_vector[:,1]
PTRM_z=PTRM_vector[:,2]
NRM=intersect[3]
steptype=np.full(len(ptrm_check),'P')
if len(NRM)==len(PTRM):
newarray=np.array([specarray,sample,site,NRM,PTRM,NRM_x,NRM_y,NRM_z,PTRM_x,PTRM_y,PTRM_z,NRM_sigma,PTRM_sigma,B_lab,steptype,temp_step])
data_array=np.concatenate((data_array,newarray),axis=1)
else:
diff=np.setdiff1d(temp_step,intersect[-1])
for i in diff:
print('PTRM check at '+str(i)+'K has no corresponding infield measurement, ignoring')
newarray=np.array([specarray[temp_step!=diff],sample[temp_step!=diff],site[temp_step!=diff],NRM,PTRM[temp_step!=diff],NRM_x,NRM_y,NRM_z,PTRM_x[temp_step!=diff],PTRM_y[temp_step!=diff],PTRM_z[temp_step!=diff],NRM_sigma,PTRM_sigma[temp_step!=diff],B_lab[temp_step!=diff],steptype[temp_step!=diff],temp_step[temp_step!=diff]])
data_array=np.concatenate((data_array,newarray),axis=1)
#Add PTRM tail checks
ptrm_tail=np.array(ptrm_tail)
if len(ptrm_tail)>1:
temp_step=ptrm_tail[:,0]
sample=np.full(len(ptrm_tail),measurements[measurements.specimen==specimen]['sample'].unique()[0]) #Get sample name
site=np.full(len(ptrm_tail),measurements[measurements.specimen==specimen].site.unique()[0]) #Get site name
specarray=np.full(len(ptrm_tail),specimen)
B_lab=np.full(len(ptrm_tail),field)
intersect=data_array[:,(data_array[0]==specimen)&(np.in1d(data_array[-1].astype('float'),temp_step.astype('float')))&(data_array[-2]!='P')]
NRM=ptrm_tail[:,3]
NRM_sigma=ptrm_tail[:,4]
NRM_vector=pmag.dir2cart(np.array([ptrm_tail[:,1],ptrm_tail[:,2],ptrm_tail[:,3]]).T)
PTRM_vector=np.array([intersect[8],intersect[9],intersect[10]])
PTRM_sigma=intersect[12]
PTRM_x=PTRM_vector[0]
PTRM_y=PTRM_vector[1]
PTRM_z=PTRM_vector[2]
NRM_x=NRM_vector[:,0]
NRM_y=NRM_vector[:,1]
NRM_z=NRM_vector[:,2]
PTRM=intersect[4]
steptype=np.full(len(ptrm_tail),'T')
if len(PTRM)==len(NRM):
newarray=np.array([specarray,sample,site,NRM,PTRM,NRM_x,NRM_y,NRM_z,PTRM_x,PTRM_y,PTRM_z,NRM_sigma,PTRM_sigma,B_lab,steptype,temp_step])
data_array=np.concatenate((data_array,newarray),axis=1)
else:
diff=np.setdiff1d(temp_step,intersect[-1])
for i in diff:
print('PTRM tail check at '+str(i)+'K has no corresponding zero field measurement, ignoring')
newarray=np.array([specarray[temp_step!=diff],sample[temp_step!=diff],site[temp_step!=diff],NRM[temp_step!=diff],PTRM,NRM_x[temp_step!=diff],NRM_y[temp_step!=diff],NRM_z[temp_step!=diff],PTRM_x,PTRM_y,PTRM_z,NRM_sigma[temp_step!=diff],PTRM_sigma,B_lab[temp_step!=diff],steptype[temp_step!=diff],temp_step[temp_step!=diff]])
data_array=np.concatenate((data_array,newarray),axis=1)
else:
print(specimen,'in site',sitename[0],'Not included, not a thellier experiment')
else:
print(specimen,'in site',sitename[0],'Not included, demagnetization not completed')
temps=pd.DataFrame(data_array.T,columns=['specimen','sample','site','NRM','PTRM','NRM_x','NRM_y','NRM_z','PTRM_x','PTRM_y','PTRM_z','NRM_sigma','PTRM_sigma','B_lab','steptype','temp_step'])
return(temps)
def generate_arai_plot_table(outputname):
"""
Generates a DataFrame with points on an Araiplot. Inputs: outputname (must be string)
"""
#This cell constructs the 'measurements' dataframe with samples and sites added
status,measurements=cb.add_sites_to_meas_table('./')
measurements=measurements[measurements.specimen.str.contains('#')==False]
measurements_old=measurements
measurements=measurements[measurements.experiment.str.contains('LP-PI-TRM')]
temps=convert_intensity_measurements(measurements)
temps['correction']=1
temps['s_tensor']=np.nan
temps['aniso_type']=np.nan
spec=pd.read_csv('specimens.txt',skiprows=1,sep='\t')
#Create the anisotropy tensors if they don't already exist.
print("Couldn't find Anisotropy Tensors, Generating...")
#Tensor for ATRM
ipmag.atrm_magic('measurements.txt',output_spec_file='specimens_atrm.txt')
spec_atrm=pd.read_csv('specimens_atrm.txt',sep='\t',skiprows=1)
for specimen in spec_atrm.specimen.unique():
temps.loc[temps.specimen==specimen,'s_tensor']=spec_atrm.loc[spec_atrm.specimen==specimen,'aniso_s'].iloc[0]
temps.loc[temps.specimen==specimen,'aniso_type']=':LP-AN-TRM'
#Tensor for AARM
ipmag.aarm_magic('measurements.txt',output_spec_file='specimens_aarm.txt')
spec_aarm=pd.read_csv('specimens_aarm.txt',sep='\t',skiprows=1)
for specimen in spec_aarm.specimen.unique():
temps.loc[temps.specimen==specimen,'s_tensor']=spec_aarm.loc[spec_aarm.specimen==specimen,'aniso_s'].iloc[0]
temps.loc[temps.specimen==specimen,'aniso_type']=':LP-AN-ARM'
#Add Anisotropy tensors to specimen tables.
if len(spec_atrm.specimen.unique())>0:
cols = spec.columns.difference(spec_atrm.columns)
cols=np.append(cols.values,'specimen')
spec_1=pd.merge(spec.loc[:,cols],spec_atrm,how='right',left_on='specimen',right_on='specimen')
if len(spec_aarm.specimen.unique())>0:
spec_2=pd.merge(spec.loc[:,cols],spec_aarm,how='right',left_on='specimen',right_on='specimen')
spec= | pd.concat([spec_2,spec_1]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 11 16:42:34 2020
@author: <NAME>
"""
import requests
import pandas as pd
def getData(statistic, scope):
apiUrl = ''
if(scope == 'global'):
apiUrl = 'https://covidapi.info/api/v1/global/count'
else:
apiUrl = 'https://covidapi.info/api/v1/country/{}'.format(scope)
#Get the JSON data from the Covid API
response = requests.get(apiUrl)
json = response.json()['result']
#Convert the JSON data to a pandas dataframe
response_data = pd.DataFrame.from_dict(json)
#Transpose the dataframe and reset the index so timestamp is now index
df = pd.DataFrame(data=response_data).T
df = df.reset_index(level=None, drop=False, col_level=0)
#Set the new column names
df.columns = ['Date Time', 'Confirmed', 'Deaths', 'Recovered']
#calculate the concurrent number of infections
df['Concurrent'] = df.apply(lambda x: (x['Confirmed'] - x['Recovered'] - x['Deaths']), axis=1)
#calculate the rates
df['Death_Rate'] = df['Deaths'].diff()
df['Confirmed_Rate'] = df['Confirmed'].diff()
df['Recovered_Rate'] = df['Recovered'].diff()
df['Concurrent_Rate'] = df['Concurrent'].diff()
#set the first value of these rates to 0, as they are NaN
df['Death_Rate'][0] = 0
df['Confirmed_Rate'][0] = 0
df['Recovered_Rate'][0] = 0
df['Concurrent_Rate'][0] = 0
#Make a new data frame with the inputted statistic
uni_data = | pd.DataFrame() | pandas.DataFrame |
# Author: <NAME>
# Email: <EMAIL>
import numpy as np
import pandas as pd
import os
import seaborn
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import sklearn.metrics as metrics
import matplotlib.pylab as pylab
from glob import glob
from Python_Files.hydrolibs import rasterops as rops
from Python_Files.hydrolibs import vectorops as vops
from Python_Files.hydrolibs.sysops import makedirs, make_proper_dir_name
from datetime import datetime
from sklearn.linear_model import LinearRegression
def create_gw_time_series(actual_gw_file_dir, pred_gw_file_dir, grace_dir, actual_gw_pattern='GW*.tif',
pred_gw_pattern='pred*.tif', grace_pattern='GRACE*.tif', make_trend=False,
out_dir='../Outputs/'):
"""
Create time series data for actual and predicted GW values (annual mean) along with GRACE
:param actual_gw_file_dir: Actual GW pumping raster directory
:param pred_gw_file_dir: Predicted GW pumping raster directory
:param grace_dir: GRACE raster directory
:param actual_gw_pattern: Actual GW pumping raster file pattern
:param pred_gw_pattern: Predicted GW pumping raster file pattern
:param grace_pattern: GRACE raster file pattern
:param make_trend: Make trend data for the monthly grace values
:param out_dir: Output directory for storing the CSV files
:return: Dataframe containing year and corresponding mean values
"""
actual_gw_raster_dict = rops.create_raster_dict(actual_gw_file_dir, pattern=actual_gw_pattern)
pred_gw_raster_dict = rops.create_raster_dict(pred_gw_file_dir, pattern=pred_gw_pattern)
grace_yearly_raster_dict = rops.create_yearly_avg_raster_dict(grace_dir, pattern=grace_pattern)
grace_monthly_raster_dict = rops.create_monthly_avg_raster_dict(grace_dir, pattern=grace_pattern)
years = sorted(list(pred_gw_raster_dict.keys()))
dt_list = sorted(list(grace_monthly_raster_dict.keys()))
mean_actual_gw = {}
mean_pred_gw = {}
mean_grace = {}
mean_monthly_grace = {}
for year in years:
mean_actual_gw[year] = np.nanmean(actual_gw_raster_dict[year])
mean_pred_gw[year] = np.nanmean(pred_gw_raster_dict[year])
mean_grace[year] = np.nanmean(grace_yearly_raster_dict[year])
for dt in dt_list:
mean_monthly_grace[dt] = grace_monthly_raster_dict[dt]
df1 = {'YEAR': years, 'Actual_GW': list(mean_actual_gw.values()), 'Pred_GW': list(mean_pred_gw.values()),
'GRACE': list(mean_grace.values())}
df1 = pd.DataFrame(data=df1)
df1.to_csv(out_dir + 'ts_yearly.csv', index=False)
time = [datetime.strftime(t, '%b %Y') for t in dt_list]
grace_monthly_list = list(mean_monthly_grace.values())
df2 = {'Time': time, 'GRACE': grace_monthly_list}
if make_trend:
grace_fit = get_trend(dt_list, grace_monthly_list)
df2['Trend'] = grace_fit
df2 = pd.DataFrame(data=df2)
df2.to_csv(out_dir + 'grace_monthly.csv', index=False)
return df1, df2
def create_gw_forecast_time_series(actual_gw_file_dir_list, pred_gw_file_dir_list, grace_csv, gw_name_list=None,
use_gws=True, actual_gw_pattern='GW*.tif', pred_gw_pattern='pred*.tif',
out_dir='../Outputs/', exclude_years=(), forecast_years=()):
"""
Create GW and GRACE dataframes
:param actual_gw_file_dir_list: Actual GW pumping raster directory list
:param pred_gw_file_dir_list: Predicted GW pumping raster directory list
:param grace_csv: GRACE TWS CSV file
:param gw_name_list: List of GMD names
:param use_gws: Set False to use entire GW raster for analysis
:param actual_gw_pattern: Actual GW pumping raster file pattern
:param pred_gw_pattern: Predicted GW pumping raster file pattern
:param out_dir: Output directory for storing the CSV files
:param exclude_years: Exclude these years from analysis
:param forecast_years: Set these years as forecast years
:return: Two dataframes, one with the GW pumping values and the other containing the monthly GRACE values
"""
grace_df = pd.read_csv(grace_csv)
grace_df = grace_df.dropna(axis=0)
grace_df['GRACE'] = grace_df['GRACE'] * 10
grace_df['DT'] = pd.to_datetime(grace_df['DT']).dt.date
gw_df = | pd.DataFrame() | pandas.DataFrame |
"""
Module : Main
Description : The main entry point for the program.
Copyright : (c) CHER_WEI_YUAN, 29DEC2021
License : -
Maintainer : <EMAIL>
Portability : POSIX
This program reads the output of GeneScan in csv format, remove
peaks with small or relatively small areas, and calculates, for
each sample, the percentage of the total area that each peak covers.
The hard filtering procedure here has low threshold (in ambiguous
situations, many peaks will not be removed) and the decision to keep
or remove peaks is left to the user.
##### Development for isoform assignment to peaks
# Note all sizes are in base pair (bp)
# Log shift and command inputs
# Plot error-shift line graph as visual quality control
# Position of df is important: pos 1 is "Sample File Name"
# Need to read exon_df
"""
PROGRAM_NAME = "genescanner"
PROGRAM_VERSION = "1.0.2"
EXIT_COLUMN_HEADER_ERROR = 1
DIRECTORY_MISSING_ERROR = 2
PERMISSION_ERROR = 3
MISSING_EXON_INFO_ERROR = 4
import sys
from os import mkdir
from os import path
from logging import basicConfig
from logging import info
from logging import INFO
from logging import error
from argparse import ArgumentParser
import pandas as pd
from itertools import chain
from seaborn import FacetGrid
from seaborn import scatterplot
from matplotlib.pyplot import bar
from itertools import combinations
from bisect import bisect_left
from numpy import arange
from copy import deepcopy
def parse_args():
"""
Parse command line arguments.
"""
description = "Reads the output of GeneScan from a SINGLE EXPERIMENT in csv format, remove peaks with small area, and calculates, for each sample, the percentage of the total area that each peak covers."
epilog = "Example usage: genescanner \
--exon_df /mnt/c/mydata/exons.csv \
--outdir /mnt/c/output_folder \
--prefix output \
--filter 1 \
/mnt/c/mydata/genescan.csv"
parser = ArgumentParser(description=description,
epilog=epilog)
parser.add_argument('input',
type=str,
help="Input GeneScan datasheet from a single run in CSV format. A single run can contain many samples but they share the same capillary injection")
parser.add_argument('--exon_df',
type=str,
default = None,
help='Datasheet containing exon sizes per sample in CSV format')
parser.add_argument('--outdir',
type=str,
default='stdout',
help='Name of output directory')
parser.add_argument('--prefix',
type=str,
default='out',
help='Prefix name of output files')
parser.add_argument('--version',
action='version',
version=str(PROGRAM_VERSION))
parser.add_argument('--resolveAmbiguousPeaks',
dest='resolve_peaks',
action='store_true',
help = 'Keep highest of multiple peaks close to each other. "Closeness" is defined as peaks within --peak_gap base pair of each other. If an ambiguous cluster of peaks has more than --cluster_size peaks, they will not be resolved.)')
parser.add_argument('--not_resolveAmbiguousPeaks',
dest='resolve_peaks',
action='store_false',
help = 'Do not clean peaks by only keeping the highest of closely clustered peaks.')
parser.set_defaults(resolve_peaks = False)
parser.add_argument('--peak_gap',
type=float,
default = 1.7,
help='DEFAULT = 1.7. A pair of peaks within peak_gap of each other will be processed to give one peak')
parser.add_argument('--cluster_size',
type=int,
default = 3,
help='DEFAULT = 3. The maximum number of peaks within peak_gap of each other that will be processed together. Only one peak with largest area will remain.')
parser.add_argument('--filter',
type=float,
default = 0.0,
help='DEFAULT = 0.0. Float. Remove all peaks with percentage area lower than filter. Percentage area refers to the area of the peak over the area of all peaks of the same sample.')
parser.add_argument('--Error_filter',
type=float,
default = 15.0,
help='DEFAULT = 15.0. Float. Output dataframe with Error equals or more than the specified value will be removed.')
parser.add_argument('--shift_range',
type=list,
default=[-50, 50, 0.25],
help='DEFAULT = [-50, 50, 0.25]. Range of Error to consider. If the GeneScan results have drastic shift, consider using higher Error values.')
parser.add_argument('--shift',
default=False,
help='Float. You can provide shift value if it is known.')
return parser.parse_args()
def exit_with_error(message, exit_status):
'''Print an error message to stderr, prefixed by the program name and 'ERROR'.
Then exit program with supplied exit status.
Arguments:
message: an error message as a string.
exit_status: a positive integer representing the exit status of the
program.
'''
error(message)
print("{} ERROR: {}, exiting".format(PROGRAM_NAME, message), file=sys.stderr)
sys.exit(exit_status)
def loadDf(input_file):
"""
Parameters
----------
input_file : TYPE String
DESCRIPTION. Directory and name of GeneScan output sheet in csv.
(e.g. ./input/data.csv)
Returns
-------
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet.
"""
# Load dataframe
df = pd.read_csv(input_file)
# Sort dataframe by sample name and size
df = df.sort_values(by=['Sample File Name', 'Size']).\
reset_index(drop = True, inplace = False)
# Clean sample names by removing leading and trailing white space
df["Sample File Name"] = df["Sample File Name"].str.rstrip().str.lstrip()
# Clean df by making sure dtype is correct
df = df.astype({'Sample File Name': str,
'Size': float,
'Height': float,
'Area': float})
# Check if df column names are correct
expected = ['Sample File Name',
'Size','Height','Area']
for i in expected:
if i not in list(df.columns):
exit_with_error(f"Unexpected column header detected. Rename columns to {expected} and retry",
EXIT_COLUMN_HEADER_ERROR)
return df
def getSampleNames(df):
"""
Parameters
----------
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet.
Returns
-------
sample_names : TYPE List
DESCRIPTION. List of unique sample names from df.
"""
sample_names = list(set(df.loc[:,"Sample File Name"]))
return sample_names
def findPeakCluster(index, build_list, df, peak_gap):
"""
Recursively finds members of a peak cluster starting from the peak with
the smallest size.
Parameters
----------
index : TYPE Integer
DESCRIPTION. The index of df that corresponds to the
rows (i.e. peaks) that are clustered (within peak_gap
of each other) and awaiting to be processed to give
fewer peaks.
build_list : TYPE List
DESCRIPTION. List of index of peaks in peak clusters
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet.
peak_gap : TYPE Integer
DESCRIPTION. User-supplied. A pair of peaks within peak_gap of
each other will be processed to give one peak.
Returns
-------
TYPE List
A list of index corresponding to peaks in a peak cluster.
"""
# Return build_list if we reach the end of dataframe
if index == max(df.index):
return list(set(build_list))
# Stop recursion when next peak is not within peak_gap of current peak
elif df.loc[index + 1, "Size"] - df.loc[index, "Size"] > peak_gap:
return list(set(build_list))
# Recursion to next peak
else:
build_list += [index, index + 1]
return findPeakCluster(index + 1, build_list, df, peak_gap)
def findMountainRanges(df, sample_names, peak_gap):
"""
Finds a list of peak_cluster (each peak cluster is a list of index
corresponding to peaks within peak_gap of each other) called
mountain_ranges (i.e. the collection of all peak clusters in df).
Parameters
----------
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet.
sample_names : TYPE List
DESCRIPTION. List of unique sample names from df.
Returns
-------
mountain_ranges : TYPE List
DESCRIPTION. A master list containing list of indexes of
continuous peaks within peak_gap of each other.
"""
mountain_ranges = []
# Loop through samples
for i in sample_names:
dfSample = df.loc[df.iloc[:,1] == i, :]
# Loop through rows in subset df
# and run findPeakCluster
for index, row in dfSample.iterrows():
# Skip rows checked by findPeakCluster
if index in chain(*mountain_ranges):
pass
else:
mountain_ranges += [findPeakCluster(index,
[],
dfSample,
peak_gap)]
# Remove duplicate empty nested lists
mountain_ranges = [x for x in mountain_ranges if x != []]
return mountain_ranges
def cleanMountainRanges(df, mountain_ranges, cluster_size):
"""
Generates a list of row index to remove from dataframe
"""
remove = []
for mountains in mountain_ranges:
if len(mountains) == 2:
a = df.loc[mountains[0], "Area"]
b = df.loc[mountains[1], "Area"]
if a > b:
remove += [[mountains[1]]]
elif b > a:
remove += [[mountains[0]]]
# Keep both peaks if they are equal
elif a == b:
pass
elif len(mountains) > 2 and len(mountains) <= cluster_size:
# Keep list of all areas of peaks in cluster
lst = []
for i in range(len(mountains)):
lst += [df.loc[mountains[i], "Area"]]
# Remove index corresponding to largest area
# and add remaining index to remove
mountains.pop(lst.index(max(lst)))
remove += [ mountains ]
remove = list(chain(*remove))
return remove
def RemoveArtefacts(df, remove):
"""
Create df for output to user
Parameters
----------
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet.
remove : TYPE List
DESCRIPTION. Flattened list of index to remove from df
Returns
-------
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet with dirty peaks
removed.
"""
return df.drop(labels = remove, axis = 0, inplace = False)
def labelArtefacts(df, remove):
"""
Create df for plotting
Parameters
----------
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet.
remove : TYPE List
DESCRIPTION. Flattened list of index to remove from df
Returns
-------
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet with dirty peaks
to be removed marked as "Removed".
"""
df["Status"] = "Kept"
if remove == []:
pass
else:
for i in remove:
df.loc[i, "Status"] = "Removed"
return df
def AddPercentage(processed_df, sample_names):
"""
Parameters
----------
processed_df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet from RemoveArtefacts.
sample_names : TYPE List
DESCRIPTION. List of unique sample names from df.
Returns
-------
df_out : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned and processed GeneScan datasheet.
"""
data_list = []
# Loop through samples
for i in sample_names:
dfSample = processed_df.loc[processed_df.iloc[:,1] == i, :]
# Calculate percentage per peak (row)
# and keep only useful rows
total_area = sum(dfSample.loc[:,"Area"])
for index, row in dfSample.iterrows():
name = row["Sample File Name"]
size = row["Size"]
height = row["Height"]
area = row["Area"]
percentage = int(round((row["Area"]/ total_area) * 100, 0))
data_list.append([name, size, height, area, percentage])
# Make output dataframe
df_out = pd.DataFrame(data_list, columns = \
["Sample File Name",
"Size",
"Height",
"Area",
"Percentage"])
return df_out
def filterAreaPercent(df_out, filter_threshold):
return df_out.query(f"Percentage > {filter_threshold}").\
sort_values(by=['Sample File Name', 'Size'], ascending = True)
def plot(df_before, df_after, prefix, outdir):
# Sort dataframe by sample name and size
df_before = df_before.sort_values(by=['Sample File Name', 'Size'])
df_after = df_after.sort_values(by=['Sample File Name', 'Size'])
# Add new column to differentiate before and after
df_before["Processed"] = "Before"
df_after["Processed"] = "After"
# Combine df
df_combine = | pd.concat([df_before, df_after]) | pandas.concat |
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize
from sklearn.ensemble import RandomForestRegressor
from timeit import default_timer as timer
from sklearn.utils import resample
import shap
import stratx.partdep
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy import nan
def check(X, y, colname,
expected_xranges, expected_slopes,
expected_pdpx, expected_pdpy,
expected_ignored=0,
min_samples_leaf=15):
leaf_xranges, leaf_slopes, slope_counts_at_x, dx, slope_at_x, pdpx, pdpy, ignored = \
stratx.partdep.partial_dependence(X, y, colname,
min_samples_leaf=min_samples_leaf,
min_slopes_per_x=1)
# print(leaf_xranges, leaf_slopes, slope_counts_at_x, dx, slope_at_x, pdpx, pdpy)
assert ignored==expected_ignored, f"Expected ignored {expected_ignored} got {ignored}"
assert len(leaf_xranges)==len(expected_slopes), f"Expected ranges {expected_xranges}"
assert np.isclose(leaf_xranges, np.array(expected_xranges)).all(), f"Expected ranges {expected_xranges} got {leaf_xranges}"
assert len(leaf_slopes)==len(expected_slopes), f"Expected slopes {expected_slopes}"
assert np.isclose(leaf_slopes, np.array(expected_slopes)).all(), f"Expected slopes {expected_slopes} got {leaf_slopes}"
assert len(pdpx)==len(expected_pdpx), f"Expected pdpx {expected_pdpx}"
assert np.isclose(pdpx, np.array(expected_pdpx)).all(), f"Expected pdpx {expected_pdpx} got {pdpx}"
assert len(pdpy)==len(expected_pdpy), f"Expected pdpy {expected_pdpy}"
assert np.isclose(pdpy, np.array(expected_pdpy)).all(), f"Expected pdpy {expected_pdpy} got {pdpy}"
def test_binary_one_region():
df = pd.DataFrame()
df['x1'] = [1, 1]
df['x2'] = [65, 60]
df['y'] = [100, 130]
X = df.drop('y', axis=1)
y = df['y']
expected_xranges = np.array([[60, 65]])
expected_slopes = np.array([-6])
expected_pdpx = np.array([60,65])
expected_pdpy = np.array([0,-30])
check(X, y, "x2",
expected_xranges, expected_slopes,
expected_pdpx, expected_pdpy,
min_samples_leaf=2)
def test_one_region():
df = pd.DataFrame()
df['x1'] = [1, 1, 1]
df['x2'] = [100,101,102]
df['y'] = [10, 11, 12]
X = df.drop('y', axis=1)
y = df['y']
expected_xranges = np.array([[100, 101],
[101, 102]])
expected_slopes = np.array([1, 1])
expected_pdpx = np.array([100,101,102])
expected_pdpy = np.array([0, 1, 2])
check(X, y, "x2",
expected_xranges, expected_slopes,
expected_pdpx, expected_pdpy,
min_samples_leaf=3)
def test_disjoint_regions():
"""
What happens when we have two disjoint regions in x_j space?
Does the 2nd start with 0 again with cumsum?
"""
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@File: pretreatment_llr_cal.py
@Author: haifeng
@Since: python3.9
@Version: V1.0
@Date: 2021/9/13 21:38
@Description:
-------------------------------------------------
@Change:
2021/9/13 21:38
-------------------------------------------------
"""
import math
import sys
import pandas
import numpy
from sklearn import model_selection, preprocessing
from sklearn.preprocessing import MinMaxScaler
def get_array(file_name):
dataframe = pandas.read_csv(file_name)
text_array = | pandas.DataFrame(dataframe['text']) | pandas.DataFrame |
"""
Module containing the main gtfs_tripify method, logify, which takes an update stream as input
and returns a logbook as output. Processing is done in two steps. First, the update stream is
broken up by a unique_trip_id, which is inferred from the trip_id field and contextual information
about trips with the same route_id which are aligned with one another. This is complicated by the
fact that (1) trip_id values are not globally unique and are instead recycled by multiple trains
over the course of a day and (2) end-to-end runs may have their trip_id reassigned without
warning. Then once the messages are aligned they are first transformed into action logs, and then
those action logs are compacted into logs in a logbook.
"""
import itertools
from collections import defaultdict
import uuid
import numpy as np
import pandas as pd
from gtfs_tripify.utils import synthesize_route, finish_trip
from gtfs_tripify.ops import (
drop_invalid_messages, drop_duplicate_messages, drop_nonsequential_messages, parse_feed
)
########################
# INTERMEDIATE PARSERS #
########################
# dictify: GTFS-RT Protobuf -> <dict>
# actionify: (dict<trip_message>, dict<vehicle_message>, int<timestamp>) -> DataFrame<action_log>
# tripify: list<action_logs> -> DataFrame<trip_log>
def dictify(buffer):
"""
Parses a GTFS-Realtime Protobuf into a Python dict, which is more ergonomic to work with.
Fields not in the GTFS-RT schema are ignored.
"""
update = {
'header': {'gtfs_realtime_version': buffer.header.gtfs_realtime_version,
'timestamp': buffer.header.timestamp},
'entity': []
}
# Helper functions for determining GTFS-RT message types.
def is_vehicle_update(message):
return str(message.trip_update.trip.route_id) == '' and str(message.alert) == ''
def is_alert(message):
return str(message.alert) != ''
def is_trip_update(message):
return not is_vehicle_update(message) and not is_alert(message)
# Helper function for mapping dictionary-encoded statuses into human-readable strings.
def munge_status(status_code):
statuses = {
0: 'INCOMING_AT',
1: 'STOPPED_AT',
2: 'IN_TRANSIT_TO'
}
return statuses[status_code]
for message in buffer.entity:
if is_trip_update(message):
parsed_message = {
'id': message.id,
'trip_update': {
'trip': {
'trip_id': message.trip_update.trip.trip_id,
'start_date': message.trip_update.trip.start_date,
'route_id': message.trip_update.trip.route_id
},
'stop_time_update': [
{
'stop_id': _update.stop_id,
'arrival': np.nan if str(_update.arrival) == ""
else _update.arrival.time,
'departure': np.nan if str(_update.departure) == ""
else _update.departure.time
} for _update in message.trip_update.stop_time_update]
},
'type': 'trip_update'
}
update['entity'].append(parsed_message)
elif is_vehicle_update(message):
parsed_message = {
'id': message.id,
'vehicle': {
'trip': {
'trip_id': message.vehicle.trip.trip_id,
'start_date': message.vehicle.trip.start_date,
'route_id': message.vehicle.trip.route_id
},
'current_stop_sequence': message.vehicle.current_stop_sequence,
'current_status': munge_status(message.vehicle.current_status),
'timestamp': message.vehicle.timestamp,
'stop_id': message.vehicle.stop_id
},
'type': 'vehicle_update'
}
update['entity'].append(parsed_message)
else: # is_alert
parsed_message = {
'id': message.id,
'alert': {
'header_text': {
'translation': {
'text': message.alert.header_text.translation[0].text
}
},
'informed_entity': [
{
'trip_id': _trip.trip.trip_id,
'route_id': _trip.trip.route_id
} for _trip in message.alert.informed_entity]
},
'type': 'alert'
}
update['entity'].append(parsed_message)
return update
def actionify(trip_message, vehicle_message, timestamp):
"""
Parses the trip update and vehicle update messages (if there is one; may be None) for a
particular trip into an action log.
"""
# If a vehicle message is not None, the trip is already in progress.
inp = vehicle_message is not None
# The base of the log entry is the same for all possible entries.
base = np.array([trip_message['trip_update']['trip']['trip_id'],
trip_message['trip_update']['trip']['route_id'], timestamp])
vehicle_status = vehicle_message['vehicle']['current_status'] if inp else 'QUEUED'
loglist = []
def log_arrival(stop_id, arrival_time):
loglist.append(
np.append(base.copy(), np.array(['EXPECTED_TO_ARRIVE_AT', stop_id, arrival_time]))
)
def log_departure(stop_id, departure_time):
loglist.append(
np.append(base.copy(), np.array(['EXPECTED_TO_DEPART_AT', stop_id, departure_time]))
)
def log_stop(stop_id, arrival_time):
loglist.append(
np.append(base.copy(), np.array(['STOPPED_AT', stop_id, arrival_time]))
)
def log_skip(stop_id, skip_time):
loglist.append(
np.append(base.copy(), np.array(['EXPECTED_TO_SKIP', stop_id, skip_time]))
)
for s_i, stop_time_update in enumerate(trip_message['trip_update']['stop_time_update']):
first_station = s_i == 0
last_station = s_i == len(trip_message['trip_update']['stop_time_update']) - 1
stop_id = stop_time_update['stop_id']
arrival_time = stop_time_update['arrival']
departure_time = stop_time_update['departure']
# First station, vehicle status is STOPPED_AT.
if first_station and vehicle_status == 'STOPPED_AT':
log_stop(stop_id, arrival_time)
# First station, vehicle status is QUEUED.
elif first_station and vehicle_status == 'QUEUED':
log_departure(stop_id, departure_time)
# First station, vehicle status is IN_TRANSIT_TO or INCOMING_AT, both arrival and
# departure fields are non-null.
# Intermediate station, both arrival and departure fields are non-null.
elif ((first_station and
vehicle_status in ['IN_TRANSIT_TO', 'INCOMING_AT'] and
pd.notnull(arrival_time) and pd.notnull(departure_time)) or
(not first_station and
not last_station and
pd.notnull(arrival_time) and | pd.notnull(departure_time) | pandas.notnull |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def oracle_url() -> str:
conn = os.environ["ORACLE_URL"]
return conn
@pytest.mark.xfail
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_on_non_select(oracle_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
read_sql(oracle_url, query)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_complex_join(oracle_url: str) -> None:
query = "SELECT a.test_int, b.test_date, c.test_num_int FROM test_table a left join test_types b on a.test_int = b.test_num_int cross join (select test_num_int from test_types) c where c.test_num_int < 3"
df = read_sql(oracle_url, query)
df = df.sort_values("TEST_INT").reset_index(drop=True)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 5, 2333], dtype="Int64"),
"TEST_DATE": pd.Series(
["2019-05-21", None, None, "2020-05-21", "2020-05-21", None],
dtype="datetime64[ns]",
),
"TEST_NUM_INT": pd.Series([1, 1, 1, 1, 1, 1], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_oracle_complex_join(oracle_url: str) -> None:
query = "SELECT a.test_int, b.test_date, c.test_num_int FROM test_table a left join test_types b on a.test_int = b.test_num_int cross join (select test_num_int from test_types) c where c.test_num_int < 3"
df = read_sql(oracle_url, query)
df = df.sort_values("TEST_INT").reset_index(drop=True)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 5, 2333], dtype="Int64"),
"TEST_DATE": pd.Series(
["2019-05-21", None, None, "2020-05-21", "2020-05-21", None],
dtype="datetime64[ns]",
),
"TEST_NUM_INT": pd.Series([1, 1, 1, 1, 1, 1], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_complex_join(oracle_url: str) -> None:
query = "SELECT a.test_int, b.test_date, c.test_num_int FROM test_table a left join test_types b on a.test_int = b.test_num_int cross join (select test_num_int from test_types) c where c.test_num_int < 3"
df = read_sql(oracle_url, query)
df = df.sort_values("TEST_INT").reset_index(drop=True)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 5, 2333], dtype="Int64"),
"TEST_DATE": pd.Series(
["2019-05-21", None, None, "2020-05-21", "2020-05-21", None],
dtype="datetime64[ns]",
),
"TEST_NUM_INT": pd.Series([1, 1, 1, 1, 1, 1], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_aggregation(oracle_url: str) -> None:
query = "select avg(test_int), test_char from test_table group by test_char"
df = read_sql(oracle_url, query)
df = df.sort_values("AVG(TEST_INT)").reset_index(drop=True)
expected = pd.DataFrame(
data={
"AVG(TEST_INT)": pd.Series([1, 2, 5, 1168.5], dtype="float64"),
"TEST_CHAR": pd.Series(["str1 ", "str2 ", "str05", None], dtype="object"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_partition_on_aggregation(oracle_url: str) -> None:
query = "select sum(test_int) cid, test_char from test_table group by test_char"
df = read_sql(oracle_url, query, partition_on="cid", partition_num=3)
df = df.sort_values("CID").reset_index(drop=True)
expected = pd.DataFrame(
index=range(4),
data={
"CID": pd.Series([1, 2, 5, 2337], dtype="float64"),
"TEST_CHAR": pd.Series(["str1 ", "str2 ", "str05", None], dtype="object"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_aggregation2(oracle_url: str) -> None:
query = "select DISTINCT(test_char) from test_table"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_CHAR": pd.Series(["str05", "str1 ", "str2 ", None], dtype="object"),
},
)
df.sort_values(by="TEST_CHAR", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_partition_on_aggregation2(oracle_url: str) -> None:
query = "select MAX(test_int) MAX, MIN(test_int) MIN from test_table"
df = read_sql(oracle_url, query, partition_on="MAX", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"MAX": pd.Series([2333], dtype="float64"),
"MIN": pd.Series([1], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_manual_partition(oracle_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(oracle_url, query=queries)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 2333], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, "str05", None], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, -4.44, None, None], dtype="float64"),
},
)
df.sort_values(by="TEST_INT", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_without_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 2333, 4, 5], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, None, "str05"], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, None, -4.44, None], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_limit_without_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table where rownum <= 3"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 2333], dtype="Int64"),
"TEST_CHAR": pd.Series(["str1 ", "str2 ", None], dtype="object"),
"TEST_FLOAT": pd.Series([1.1, 2.2, None], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_limit_large_without_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table where rownum < 10"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 2333, 4, 5], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, None, "str05"], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, None, -4.44, None], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_with_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
oracle_url,
query,
partition_on="test_int",
partition_range=(0, 5001),
partition_num=3,
)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 2333], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, "str05", None], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, -4.44, None, None], dtype="float64"),
},
)
df.sort_values(by="TEST_INT", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_with_partition_without_partition_range(oracle_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 1"
df = read_sql(
oracle_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2], dtype="Int64"),
"TEST_CHAR": pd.Series(["str1 ", "str2 "], dtype="object"),
"TEST_FLOAT": pd.Series([1.1, 2.2], dtype="float64"),
},
)
df.sort_values(by="TEST_INT", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_with_partition_and_selection(oracle_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
oracle_url,
query,
partition_on="test_int",
partition_range=(1, 2333),
partition_num=3,
)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 2333], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, "str05", None], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, -4.44, None, None], dtype="float64"),
},
)
df.sort_values(by="TEST_INT", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_with_partition_and_spja(oracle_url: str) -> None:
query = "select test_table.test_int cid, SUM(test_types.test_num_float) sfloat from test_table, test_types where test_table.test_int=test_types.test_num_int group by test_table.test_int"
df = read_sql(oracle_url, query, partition_on="cid", partition_num=2)
expected = pd.DataFrame(
data={
"CID": pd.Series([1, 5], dtype="Int64"),
"SFLOAT": pd.Series([2.3, -0.2], dtype="float64"),
},
)
df.sort_values(by="CID", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_types(oracle_url: str) -> None:
query = "SELECT * FROM test_types"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_NUM_INT": pd.Series([1, 5, 5, None], dtype="Int64"),
"TEST_INT": pd.Series([-10, 22, 22, 100], dtype="Int64"),
"TEST_NUM_FLOAT": pd.Series([2.3, -0.1, -0.1, None], dtype="float64"),
"TEST_FLOAT": pd.Series([2.34, 123.455, 123.455, None], dtype="float64"),
"TEST_BINARY_FLOAT": pd.Series(
[-3.456, 3.1415926535, 3.1415926535, None], dtype="float64"
),
"TEST_BINARY_DOUBLE": pd.Series(
[9999.99991, -111111.2345, -111111.2345, None], dtype="float64"
),
"TEST_CHAR": pd.Series(["char1", "char2", "char2", None], dtype="object"),
"TEST_VARCHAR": pd.Series(
["varchar1", "varchar222", "varchar222", None], dtype="object"
),
"TEST_NCHAR": pd.Series(
["y123 ", "aab123", "aab123", None], dtype="object"
),
"TEST_NVARCHAR": pd.Series(
["aK>?KJ@#$%", ")>KDS)(F*&%J", ")>KDS)(F*&%J", None], dtype="object"
),
"TEST_DATE": pd.Series(
["2019-05-21", "2020-05-21", "2020-05-21", None], dtype="datetime64[ns]"
),
"TEST_TIMESTAMP": pd.Series(
[
"2019-05-21 01:02:33",
"2020-05-21 01:02:33",
"2020-05-21 01:02:33",
None,
],
dtype="datetime64[ns]",
),
"TEST_TIMESTAMPTZ": pd.Series(
[
"1999-12-01 11:00:00",
"1899-12-01 11:00:00",
"1899-12-01 11:00:00",
None,
],
dtype="datetime64[ns]",
),
"TEST_CLOB": pd.Series(
["13ab", "13ab", "13ab", None], dtype="object"
),
"TEST_BLOB": pd.Series(
[ b'9\xaf', b'9\xaf', b'9\xaf', None], dtype="object"
),
}
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_empty_result(oracle_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([], dtype="Int64"),
"TEST_CHAR": pd.Series([], dtype="object"),
"TEST_FLOAT": | pd.Series([], dtype="float64") | pandas.Series |
from __future__ import division
import pytest
import numpy as np
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat)
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
def test_constructors_other(self):
# all-nan
result = IntervalIndex.from_intervals([np.nan])
expected = np.array([np.nan], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
# empty
result = IntervalIndex.from_intervals([])
expected = np.array([], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
def test_constructors_errors(self):
# scalar
with pytest.raises(TypeError):
IntervalIndex(5)
# not an interval
with pytest.raises(TypeError):
IntervalIndex([0, 1])
with pytest.raises(TypeError):
IntervalIndex.from_intervals([0, 1])
# invalid closed
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed
with pytest.raises(ValueError):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 10], [3, 5])
with pytest.raises(ValueError):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# no point in nesting periods in an IntervalIndex
with pytest.raises(ValueError):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
def test_constructors_datetimelike(self):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx)
expected = IntervalIndex.from_breaks(idx.values)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self):
index = self.index
assert len(index) == 2
assert index.size == 2
assert index.shape == (2, )
tm.assert_index_equal(index.left, Index([0, 1]))
tm.assert_index_equal(index.right, Index([1, 2]))
tm.assert_index_equal(index.mid, Index([0.5, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.index_with_nan
assert len(index) == 3
assert index.size == 3
assert index.shape == (3, )
tm.assert_index_equal(index.left, Index([0, np.nan, 1]))
tm.assert_index_equal(index.right, Index([1, np.nan, 2]))
tm.assert_index_equal(index.mid, Index([0.5, np.nan, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), np.nan,
Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self):
index = self.index
assert not index.hasnans
tm.assert_numpy_array_equal(index.isna(),
np.array([False, False]))
tm.assert_numpy_array_equal(index.notna(),
np.array([True, True]))
index = self.index_with_nan
assert index.hasnans
tm.assert_numpy_array_equal(index.notna(),
np.array([True, False, True]))
tm.assert_numpy_array_equal(index.isna(),
np.array([False, True, False]))
def test_copy(self):
actual = self.index.copy()
assert actual.equals(self.index)
actual = self.index.copy(deep=True)
assert actual.equals(self.index)
assert actual.left is not self.index.left
def test_ensure_copied_data(self):
# exercise the copy flag in the constructor
# not copying
index = self.index
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self):
idx = self.index
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert not idx.equals(idx.astype(object))
assert not idx.equals(np.array(idx))
assert not idx.equals(list(idx))
assert not idx.equals([1, 2])
assert not idx.equals(np.array([1, 2]))
assert not idx.equals(pd.date_range('20130101', periods=2))
def test_astype(self):
idx = self.index
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
| tm.assert_index_equal(result, idx) | pandas.util.testing.assert_index_equal |
# python3 GeneratorSales.py
# -*- coding: utf-8 -*-
# ===========================================================================================
# Created by: <NAME>
# Description:
# # Generates sales data and writes it into a json document.
# # This json document is located in ../Datasets/Sales/sales.json
# ===========================================================================================
import json
import pandas as pd
import random
import time
import DataProcessing.DataGenerators.Configuration.Season as seas
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Print total progess in console
:param
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
def generateSalesData(hasToBeGenerated=False, numberOfDataToGenerate=10000):
'''
Generates articles data, saves them to csv file and returns dataframe
:param hasToBeGenerated: (bool)
true: articles data gets freshly generated, default false: use generated .json
:param numberOfDataToGenerate: (int)
defines number of sales to generate, default: 10000
:return: salesDataFrame: (pandas.dataframe)
dataframe with date and soldArticles list
'''
if hasToBeGenerated:
# Read all articles as dataframe from articles.csv
df = pd.read_csv("../Datasets/Articles/articles.csv")
# List to contain the dictionaries. Will be the json to save in a file later.
finalJSON = []
# Generating dataframe with columns date and soldArticles
columns = ['date', 'soldArticles']
salesDataFrame = pd.DataFrame(columns=columns)
# Generates a list of dateTime. Converts them then into dates.
# Starts a 01. January 2016 and ends at 31. Oktober 2021
dates = | pd.date_range(start="2016-01-01", end="2021-09-30") | pandas.date_range |
#!/usr/bin/env python
from __future__ import print_function, division
import os
import tarfile
import pandas as pd
import numpy as np
import gzip
import shutil
import itertools
import multiprocessing as mp
import astropy.units as u
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.io import ascii, fits
from astropy import utils, io
from astroquery.vizier import Vizier
from astroquery.irsa import Irsa
from astroquery.vsa import Vsa
from astroquery.ukidss import Ukidss
from astroquery.sdss import SDSS
from dl import queryClient as qc
try:
from urllib2 import urlopen #python2
from httplib import IncompleteRead
from urllib2 import HTTPError
except ImportError:
from urllib.request import urlopen #python3
from urllib.error import HTTPError
from http.client import IncompleteRead
#SIA
from pyvo.dal import sia
import pyvo
from qso_toolbox import utils as ut
from qso_toolbox import vlass_quicklook
# ------------------------------------------------------------------------------
# Supported surveys, data releases, bands
# ------------------------------------------------------------------------------
astroquery_dict = {
'tmass': {'service': 'irsa', 'catalog': 'fp_psc',
'ra': 'ra', 'dec': 'dec', 'mag_name':
'TMASS_J', 'mag': 'j_m', 'distance':
'dist', 'data_release': None},
'nomad': {'service': 'vizier', 'catalog': 'NOMAD',
'ra': 'RAJ2000', 'dec': 'DECJ2000',
'mag_name': 'R', 'mag': 'Rmag', 'distance':
'distance', 'data_release': None},
'vhsdr6': {'service': 'vsa', 'catalog': 'VHS',
'ra': 'ra', 'dec': 'dec',
'data_release': 'VHSDR6', 'mag_name': 'VHS_J',
'mag': 'jAperMag3', 'distance': 'distance'},
# new, needs to be tested!
'vikingdr5': {'service': 'vsa', 'catalog': 'VIKING',
'ra': 'ra', 'dec': 'dec',
'data_release': 'VIKINGDR5', 'mag_name': 'VHS_J',
'mag': 'jAperMag3', 'distance': 'distance'}
# ,
# 'sdss': {'service': 'sdss', 'catalog': 'VIKING',
# 'ra': 'ra', 'dec': 'dec',
# 'data_release': 'VIKINGDR5', 'mag_name': 'VHS_J',
# 'mag': 'jAperMag3', 'distance': 'distance'}
}
datalab_offset_dict = {'des_dr1.main': {'ra': 'ra', 'dec': 'dec',
'mag': 'mag_auto_z',
'mag_name': 'mag_auto_z'}}
# To add more surveys from the VISTA Science Archive, this dictionary can be
# expanded:
vsa_info_dict = {'vhsdr6': ('VHS', 'VHSDR6', 'tilestack'),
# new, needs to be tested
'vikingdr5': ('VIKING', 'VIKINGDR5', 'tilestack')}
# Surveys as serviced by VSA, append list if necessary (see VSA dictionary
# above)
vsa_survey_list = ['vhsdr6', 'vikingdr5']
# all surveys that directly allow to download fits files
unzipped_download_list = ['desdr1', 'desdr2', 'ps1', 'vhsdr6', 'vikingdr5',
'2MASS', 'DSS2', 'skymapper', 'ukidss']
# ------------------------------------------------------------------------------
# Input table manipulation
# ------------------------------------------------------------------------------
# copied from http://docs.astropy.org/en/stable/_modules/astropy/io/fits/column.html
# L: Logical (Boolean)
# B: Unsigned Byte
# I: 16-bit Integer
# J: 32-bit Integer
# K: 64-bit Integer
# E: Single-precision Floating Point
# D: Double-precision Floating Point
# C: Single-precision Complex
# M: Double-precision Complex
# A: Character
fits_to_numpy = {'L': 'i1', 'B': 'u1', 'I': 'i2', 'J': 'i4', 'K': 'i8',
'E': 'f4',
'D': 'f8', 'C': 'c8', 'M': 'c16', 'A': 'a'}
def fits_to_hdf(filename):
""" Convert fits data table to hdf5 data table.
:param filename:
:return:
"""
hdu = fits.open(filename)
filename = os.path.splitext(filename)[0]
df = pd.DataFrame()
format_list = ['D', 'J']
dtype_dict = {}
# Cycle through all columns in the fits file
for idx, column in enumerate(hdu[1].data.columns):
# Check whether the column is in a multi-column format
if len(column.format) > 1 and column.format[-1] in format_list:
n_columns = int(column.format[:-1])
# unWISE specific solution
if column.name[:6] == 'unwise' and n_columns == 2:
passbands = ['w1', 'w2']
for jdx, passband in enumerate(passbands):
new_column_name = column.name + '_' + passband
print(new_column_name)
df[new_column_name] = hdu[1].data[column.name][:, jdx]
numpy_type = fits_to_numpy[column.format[-1]]
dtype_dict.update({new_column_name: numpy_type})
# SOLUTIONS FOR OTHER SURVEYS MAY BE APPENDED HERE
# else for single columns
else:
print(column.name)
df[column.name] = hdu[1].data[column.name]
numpy_type = fits_to_numpy[column.format[-1]]
dtype_dict.update({column.name: numpy_type})
# update the dtype for the DataFrame
print(dtype_dict)
# df = df.astype(dtype_dict)
df.to_hdf(filename+'.hdf5', 'data', format='table')
def check_if_table_is_pandas_dataframe(table):
"""
Check whether the supplied table is a pandas Dataframe and convert to it
if necessary.
This function also returns the original file type. Current file types
implemented include:
- astropy tables
- fits record arrays
:param table: object
:return: pd.DataFrame, string
"""
if type(table) == pd.DataFrame:
return table, 'pandas_dataframe'
elif type(table) == Table:
return table.to_pandas(), 'astropy_table'
elif type(table) == fits.fitsrec.FITS_rec:
return Table(table).to_pandas(), 'fits_rec'
def convert_table_to_format(table, format):
""" Convert a pandas Dataframe back to an original format.
Conversions to the following file types are possible:
-astropy table
:param table: pd.DataFrame
:param format: string
:return: object
"""
if format == 'astropy_table':
return Table.from_pandas(table)
elif format == 'fits_rec':
print('Warning: You entered a fits record array. However, this code '
'does not support this data type. Your table is returned as an'
'astropy table!')
return Table.from_pandas(table)
else:
return table
def convert_urltable_to_pandas(data, sep=',', header=0, skip_header=1,
skip_footer=1, linesep='\n'):
"""
:param data:
:param sep:
:param header:
:param skip_header:
:param skip_footer:
:param linesep:
:return:
"""
data_string = data.read().decode('utf-8').split(linesep)
if data_string[0] == 'no rows found':
return None
else:
df = pd.DataFrame(columns=data_string[header].split(sep))
for dat in data_string[skip_header:-skip_footer]:
df = df.append(pd.Series(dat.split(sep),
index=data_string[0].split(sep)),
ignore_index=True)
return df
# ------------------------------------------------------------------------------
# Download catalog data / Offset star queries
# ------------------------------------------------------------------------------
def query_region_astroquery(ra, dec, radius, service, catalog,
data_release=None):
""" Returns the catalog data of sources within a given radius of a defined
position using astroquery.
:param ra: float
Right ascension
:param dec: float
Declination
:param radius: float
Region search radius in arcseconds
:param service: string
Astroquery class used to query the catalog of choice
:param catalog: string
Catalog to query
:param data_release:
If needed by astroquery the specified data release (e.g. needed for VSA)
:return: pandas.core.frame.DataFrame
Returns the dataframe with the returned matches
"""
target_coord = SkyCoord(ra=ra, dec=dec, unit=(u.deg, u.deg), frame='icrs')
if service == 'vizier':
result = Vizier.query_region(target_coord, radius=radius * u.arcsecond,
catalog=catalog, spatial='Cone')
result = result[0]
elif service == 'irsa':
result = Irsa.query_region(target_coord, radius=radius * u.arcsecond,
catalog=catalog, spatial='Cone')
elif service == 'vsa':
result = Vsa.query_region(target_coord, radius=radius * u.arcsecond,
programme_id=catalog, database=data_release)
# elif service == 'sdss':
# result = SDSS.query_region(target_coord, radius=radius * u.arcsecond,
# programme_id=catalog, database=data_release)
else:
raise KeyError('Astroquery class not recognized. Implemented classes '
'are: Vizier, Irsa, VSA')
return result.to_pandas()
def get_astroquery_offset(target_name, target_ra, target_dec, radius, catalog,
quality_query=None, n=3, verbosity=0):
"""Return the n nearest offset stars specified by the quality criteria
around a given target using astroquery.
:param target_name: string
Identifier for the target
:param target_ra: float
Target right ascension
:param target_dec:
Target Declination
:param radius: float
Maximum search radius in arcseconds
:param catalog: string
Catalog (and data release) to retrieve the offset star data from. See
astroquery_dict for implemented catalogs.
:param quality_query: string
A string written in pandas query syntax to apply quality criteria on
potential offset stars around the target.
:param n: int
Number of offset stars to retrieve. (Maximum: n=5)
:param verbosity:
Verbosity > 0 will print verbose statements during the execution.
:return: pandas.core.frame.DataFrame
Returns the dataframe with the retrieved offset stars for the given
target.
"""
service = astroquery_dict[catalog]['service']
cat = astroquery_dict[catalog]['catalog']
ra = astroquery_dict[catalog]['ra']
dec = astroquery_dict[catalog]['dec']
mag = astroquery_dict[catalog]['mag']
mag_name = astroquery_dict[catalog]['mag_name']
distance = astroquery_dict[catalog]['distance']
dr = astroquery_dict[catalog]['data_release']
df = query_region_astroquery(target_ra, target_dec, radius, service, cat,
dr).copy()
if quality_query is not None:
df.query(quality_query, inplace=True)
if df.shape[0] > 0:
# Sort DataFrame by match distance
df.sort_values(distance, ascending=True, inplace=True)
# Keep only the first three entries
offset_df = df[:n]
# Build the offset DataFrame
offset_df.loc[:, 'target_name'] = target_name
offset_df.loc[:, 'target_ra'] = target_ra
offset_df.loc[:, 'target_dec'] = target_dec
offset_df.loc[:, 'offset_ra'] = df[ra]
offset_df.loc[:, 'offset_dec'] = df[dec]
for jdx, idx in enumerate(offset_df.index):
abc_dict = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4:'E'}
letter = abc_dict[jdx]
offset_df.loc[idx, 'offset_name'] = target_name + '_offset_' + \
letter
offset_df.loc[
idx, 'offset_shortname'] = target_name + '_offset_' + letter
offset_df.loc[:, mag_name] = df[mag]
# GET THIS INTO A SEPARATE FUNCTION
target_coords = SkyCoord(ra=target_ra, dec=target_dec,
unit=(u.deg, u.deg),
frame='icrs')
offset_coords = SkyCoord(ra=offset_df.offset_ra.values,
dec=offset_df.offset_dec, unit=(u.deg, u.deg),
frame='icrs')
# Calculate position angles and separations (East of North)
pos_angles = offset_coords.position_angle(target_coords).to(u.deg)
separations = offset_coords.separation(target_coords).to(u.arcsecond)
dra, ddec = offset_coords.spherical_offsets_to(target_coords)
# UNTIL HERE
if verbosity > 1:
print('Offset delta ra: {}'.format(dra))
print('Offset delta dec: {}'.format(ddec))
print('Offset separation: {}'.format(separations))
print('Offset position angle: {}'.format(pos_angles))
offset_df.loc[:, 'separation'] = separations.value
offset_df.loc[:, 'pos_angle'] = pos_angles.value
offset_df.loc[:, 'dra_offset'] = dra.to(u.arcsecond).value
offset_df.loc[:, 'ddec_offset'] = ddec.to(u.arcsecond).value
return offset_df[
['target_name', 'target_ra', 'target_dec', 'offset_name',
'offset_shortname', 'offset_ra', 'offset_dec',
mag, 'separation', 'pos_angle', 'dra_offset',
'ddec_offset']]
else:
print("Offset star for {} not found.".format(target_name))
return pd.DataFrame()
def get_offset_stars_astroquery(df, target_name_column, target_ra_column,
target_dec_column, radius, catalog='tmass', n=3,
quality_query=None, verbosity=0):
"""Get offset stars for all targets in the input DataFrame using astroquery.
:param df: pandas.core.frame.DataFrame
Dataframe with targets to retrieve offset stars for
:param target_name_column: string
Name of the target identifier column
:param target_ra_column: string
Right ascension column name
:param target_dec_column: string
Declination column name
:param radius: float
Maximum search radius in arcseconds
:param catalog: string
Catalog (and data release) to retrieve the offset star data from. See
astroquery_dict for implemented catalogs.
:param n: int
Number of offset stars to retrieve. (Maximum: n=5)
:param quality_query: string
A string written in pandas query syntax to apply quality criteria on
potential offset stars around the target.
:param verbosity:
Verbosity > 0 will print verbose statements during the execution.
:return: pandas.core.frame.DataFrame
Returns the dataframe with the retrieved offset stars for all targets
in the input dataframe.
"""
offset_df = pd.DataFrame()
for idx in df.index:
target_name = df.loc[idx, target_name_column]
target_ra = df.loc[idx, target_ra_column]
target_dec = df.loc[idx, target_dec_column]
temp_df = get_astroquery_offset(target_name, target_ra, target_dec, radius, catalog,
quality_query=quality_query, n=n, verbosity=verbosity)
offset_df = offset_df.append(temp_df, ignore_index=True)
offset_df.to_csv('temp_offset_df.csv', index=False)
return offset_df
def get_offset_stars_datalab(df, target_name_column, target_ra_column,
target_dec_column, radius, survey='des_dr1', table='main',
n=3, where=None, verbosity=0):
"""Get offset stars for all targets in the input DataFrame using the
NOAO datalab.
:param df: pandas.core.frame.DataFrame
Dataframe with targets to retrieve offset stars for
:param target_name_column: string
Name of the target identifier column
:param target_ra_column: string
Right ascension column name
:param target_dec_column: string
Declination column name
:param radius: float
Maximum search radius in arcseconds
:param survey: string
Survey keyword for the datalab query.
:param table: string
Table keyword for the datalab query.
:param n: int
Number of offset stars to retrieve. (Maximum: n=5)
:param where: string
A string written in ADQL syntax to apply quality criteria on
potential offset stars around the target.
:param verbosity:
Verbosity > 0 will print verbose statements during the execution.
:return: pandas.core.frame.DataFrame
Returns the dataframe with the retrieved offset stars for all targets
in the input dataframe.
"""
offset_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import datetime
import yfinance as yf
from pandas_datareader import data as pdr
from flask import current_app
from stk_predictor.extensions import db
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
def get_ticker_from_yahoo(ticker, start_date, end_date):
yf.pdr_override()
try:
new_trading_df = pdr.get_data_yahoo(
ticker, start_date, end_date, interval='1d')
new_trading_df = new_trading_df.drop(
['Open', 'High', 'Low', 'Adj Close'], axis=1)
new_trading_df = new_trading_df.dropna('index')
new_trading_df = new_trading_df.reset_index()
new_trading_df.columns = ['trading_date',
'intraday_close', 'intraday_volumes']
his_trading_df = pd.read_sql('aapl', db.engine, index_col='id')
df = pd.concat([his_trading_df, new_trading_df]
).drop_duplicates('trading_date')
df = df.sort_values(by='trading_date')
df = df.reset_index(drop=True)
if len(df) > 0:
df.to_sql("aapl", db.engine, if_exists='replace', index_label='id')
return df
else:
# t = pd.read_sql('aapl', db.engine, index_col='id')
return None
except Exception as ex:
raise RuntimeError(
"Catch Excetion when retrieve data from Yahoo...", ex)
return None
def get_news_from_finviz(ticker):
"""Request news headline from finviz, according to
company ticker's name
Parameters
-----------
ticker: str
the stock ticker name
Return
----------
df : pd.DataFrame
return the latest 2 days news healines.
"""
current_app.logger.info("Job >> Enter Finviz news scrape step...")
base_url = 'https://finviz.com/quote.ashx?t={}'.format(ticker)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36'
}
parsed_news = []
try:
res = requests.get(base_url, headers=headers)
if res.status_code == 200:
texts = res.text
soup = BeautifulSoup(texts)
news_tables = soup.find(id="news-table")
for x in news_tables.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
else:
date = date_scrape[0]
time = date_scrape[1]
parsed_news.append([date, time, text])
# filter the recent day news
df = pd.DataFrame(parsed_news, columns=['date', 'time', 'texts'])
df['date'] = pd.to_datetime(df.date).dt.date
one_day_period = (datetime.datetime.today() -
datetime.timedelta(days=1)).date()
df_sub = df[df.date >= one_day_period]
return df_sub
else:
raise RuntimeError("HTTP response Error {}".format(
res.status_code)) from None
except Exception as ex:
current_app.logger.info("Exception in scrape Finviz.", ex)
raise RuntimeError("Exception in scrape Finviz.") from ex
def prepare_trading_dataset(df):
"""Prepare the trading data set.
Time series analysis incoporate previous data for future prediction,
We need to retrieve historical data to generate features.
Parameters
-----------
df: DataFrame
the stock ticker trading data, including trading-date, close-price, volumes
window: int, default = 400
feature engineer windows size. Using at most 400 trading days to construct
features.
Return
----------
array_lstm : np.array
return the array with 3 dimensions shape -> [samples, 1, features]
"""
if len(df) == 0:
raise RuntimeError(
"Encounter Error in >>make_dataset.prepare_trading_dataset<<... \
Did not catch any news.") from None
else:
df['log_ret_1d'] = np.log(df['intraday_close'] / df['intraday_close'].shift(1))
df['log_ret_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).sum()
df['log_ret_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).sum()
df['log_ret_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).sum()
df['log_ret_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).sum()
df['log_ret_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).sum()
df['log_ret_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).sum()
df['log_ret_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).sum()
df['log_ret_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).sum()
df['log_ret_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).sum()
df['log_ret_28w'] = pd.Series(df['log_ret_1d']).rolling(window=140).sum()
df['log_ret_32w'] = pd.Series(df['log_ret_1d']).rolling(window=160).sum()
df['log_ret_36w'] = pd.Series(df['log_ret_1d']).rolling(window=180).sum()
df['log_ret_40w'] = pd.Series(df['log_ret_1d']).rolling(window=200).sum()
df['log_ret_44w'] = pd.Series(df['log_ret_1d']).rolling(window=220).sum()
df['log_ret_48w'] = pd.Series(df['log_ret_1d']).rolling(window=240).sum()
df['log_ret_52w'] = pd.Series(df['log_ret_1d']).rolling(window=260).sum()
df['log_ret_56w'] = pd.Series(df['log_ret_1d']).rolling(window=280).sum()
df['log_ret_60w'] = pd.Series(df['log_ret_1d']).rolling(window=300).sum()
df['log_ret_64w'] = pd.Series(df['log_ret_1d']).rolling(window=320).sum()
df['log_ret_68w'] = pd.Series(df['log_ret_1d']).rolling(window=340).sum()
df['log_ret_72w'] = pd.Series(df['log_ret_1d']).rolling(window=360).sum()
df['log_ret_76w'] = pd.Series(df['log_ret_1d']).rolling(window=380).sum()
df['log_ret_80w'] = pd.Series(df['log_ret_1d']).rolling(window=400).sum()
df['vol_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).std()*np.sqrt(5)
df['vol_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).std()*np.sqrt(10)
df['vol_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).std()*np.sqrt(15)
df['vol_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).std()*np.sqrt(20)
df['vol_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).std()*np.sqrt(40)
df['vol_12w'] = | pd.Series(df['log_ret_1d']) | pandas.Series |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012',
'2011'], name='pidx', freq='A')
pexpected = PeriodIndex(
['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp,
check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx',
freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'],
name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(
['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
def test_nat_new(self):
idx = pd.period_range('2011-01', freq='M', periods=5, name='x')
result = idx._nat_new()
exp = pd.PeriodIndex([pd.NaT] * 5, freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.PeriodIndex([], name='xxx', freq='H')
with tm.assertRaises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq='H')
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(-3), exp)
def test_repeat(self):
index = pd.period_range('2001-01-01', periods=2, freq='D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], freq='D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.period_range('2001-01-01', periods=2, freq='2D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], freq='2D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')
exp = pd.PeriodIndex(['2001-01', '2001-01', '2001-01',
'NaT', 'NaT', 'NaT',
'2003-01', '2003-01', '2003-01'], freq='M')
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
def test_nat(self):
self.assertIs(pd.PeriodIndex._na_value, pd.NaT)
self.assertIs(pd.PeriodIndex([], freq='M')._na_value, pd.NaT)
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for freq in ['D', 'M']:
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq=freq)
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq='H')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(idx.asi8, freq='H')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestPeriodIndexSeriesMethods(tm.TestCase):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with tm.assertRaisesRegexp(TypeError, msg):
obj + ng
with tm.assertRaises(TypeError):
# error message differs between PY2 and 3
ng + obj
with tm.assertRaisesRegexp(TypeError, msg):
obj - ng
with tm.assertRaises(TypeError):
np.add(obj, ng)
if _np_version_under1p10:
self.assertIs(np.add(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.add(ng, obj)
with tm.assertRaises(TypeError):
np.subtract(obj, ng)
if _np_version_under1p10:
self.assertIs(np.subtract(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.subtract(ng, obj)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'NaT', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='2M', name='idx')
expected = PeriodIndex(['2011-07', '2011-08',
'NaT', '2011-10'], freq='2M', name='idx')
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
f = lambda x: x + offsets.Day()
exp = PeriodIndex(['2011-01-02', '2011-02-02', '2011-03-02',
'2011-04-02'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x + offsets.Day(2)
exp = PeriodIndex(['2011-01-03', '2011-02-03', '2011-03-03',
'2011-04-03'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x - offsets.Day(2)
exp = PeriodIndex(['2010-12-30', '2011-01-30', '2011-02-27',
'2011-03-30'], freq='D', name='idx')
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
s = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)"
msg_s = r"Input cannot be converted to Period\(freq=D\)"
for obj, msg in [(idx, msg_idx), (s, msg_s)]:
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj + offsets.Hour(2)
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
offsets.Hour(2) + obj
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj - offsets.Hour(2)
def test_pi_sub_period(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, -11, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(idx, pd.Period('2012-01', freq='M'))
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, 11, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period('2012-01', freq='M'), idx)
if _np_version_under1p10:
self.assertIs(result, NotImplemented)
else:
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp)
tm.assert_index_equal(pd.Period('NaT', freq='M') - idx, exp)
def test_pi_sub_pdnat(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
exp = pd.TimedeltaIndex([pd.NaT] * 4, name='idx')
tm.assert_index_equal(pd.NaT - idx, exp)
tm.assert_index_equal(idx - pd.NaT, exp)
def test_pi_sub_period_nat(self):
# GH 13071
idx = PeriodIndex(['2011-01', 'NaT', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, np.nan, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, np.nan, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp)
tm.assert_index_equal(pd.Period('NaT', freq='M') - idx, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
f = lambda x: x == pd.Period('2011-03', freq='M')
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period('2011-03', freq='M')
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') != x
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period('2011-03', freq='M')
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(['2011-01', 'NaT', '2011-03',
'2011-04'], freq='M', name='idx')
f = lambda x: x == pd.Period('2011-03', freq='M')
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') == x
self._check(idx, f, exp)
f = lambda x: x == tslib.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: tslib.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period('2011-03', freq='M')
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') != x
self._check(idx, f, exp)
f = lambda x: x != tslib.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: tslib.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period('2011-03', freq='M')
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > tslib.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: tslib.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
class TestSeriesPeriod(tm.TestCase):
def setUp(self):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
def test_ops_series_timedelta(self):
# GH 13043
s = pd.Series([pd.Period('2015-01-01', freq='D'),
pd.Period('2015-01-02', freq='D')], name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Period('2015-01-02', freq='D'),
pd.Period('2015-01-03', freq='D')], name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
tm.assert_series_equal(s + pd.tseries.offsets.Day(), exp)
tm.assert_series_equal(pd.tseries.offsets.Day() + s, exp)
def test_ops_series_period(self):
# GH 13043
s = pd.Series([pd.Period('2015-01-01', freq='D'),
pd.Period('2015-01-02', freq='D')], name='xxx')
self.assertEqual(s.dtype, object)
p = pd.Period('2015-01-10', freq='D')
# dtype will be object because of original dtype
exp = pd.Series([9, 8], name='xxx', dtype=object)
tm.assert_series_equal(p - s, exp)
tm.assert_series_equal(s - p, -exp)
s2 = pd.Series([pd.Period('2015-01-05', freq='D'),
pd.Period('2015-01-04', freq='D')], name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([4, 2], name='xxx', dtype=object)
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
class TestFramePeriod(tm.TestCase):
def test_ops_frame_period(self):
# GH 13043
df = pd.DataFrame({'A': [pd.Period('2015-01', freq='M'),
pd.Period('2015-02', freq='M')],
'B': [pd.Period('2014-01', freq='M'),
pd.Period('2014-02', freq='M')]})
self.assertEqual(df['A'].dtype, object)
self.assertEqual(df['B'].dtype, object)
p = pd.Period('2015-03', freq='M')
# dtype will be object because of original dtype
exp = pd.DataFrame({'A': np.array([2, 1], dtype=object),
'B': np.array([14, 13], dtype=object)})
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -exp)
df2 = pd.DataFrame({'A': [pd.Period('2015-05', freq='M'),
pd.Period('2015-06', freq='M')],
'B': [pd.Period('2015-05', freq='M'),
pd.Period('2015-06', freq='M')]})
self.assertEqual(df2['A'].dtype, object)
self.assertEqual(df2['B'].dtype, object)
exp = pd.DataFrame({'A': np.array([4, 4], dtype=object),
'B': np.array([16, 16], dtype=object)})
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -exp)
class TestPeriodIndexComparisons(tm.TestCase):
def test_pi_pi_comp(self):
for freq in ['M', '2M', '3M']:
base = PeriodIndex(['2011-01', '2011-02',
'2011-03', '2011-04'], freq=freq)
p = Period('2011-02', freq=freq)
exp = np.array([False, True, False, False])
self.assert_numpy_array_equal(base == p, exp)
self.assert_numpy_array_equal(p == base, exp)
exp = np.array([True, False, True, True])
self.assert_numpy_array_equal(base != p, exp)
self.assert_numpy_array_equal(p != base, exp)
exp = np.array([False, False, True, True])
self.assert_numpy_array_equal(base > p, exp)
self.assert_numpy_array_equal(p < base, exp)
exp = np.array([True, False, False, False])
self.assert_numpy_array_equal(base < p, exp)
self.assert_numpy_array_equal(p > base, exp)
exp = np.array([False, True, True, True])
self.assert_numpy_array_equal(base >= p, exp)
self.assert_numpy_array_equal(p <= base, exp)
exp = np.array([True, True, False, False])
self.assert_numpy_array_equal(base <= p, exp)
self.assert_numpy_array_equal(p >= base, exp)
idx = PeriodIndex(['2011-02', '2011-01', '2011-03',
'2011-05'], freq=freq)
exp = np.array([False, False, True, False])
self.assert_numpy_array_equal(base == idx, exp)
exp = np.array([True, True, False, True])
self.assert_numpy_array_equal(base != idx, exp)
exp = np.array([False, True, False, False])
self.assert_numpy_array_equal(base > idx, exp)
exp = np.array([True, False, False, True])
self.assert_numpy_array_equal(base < idx, exp)
exp = np.array([False, True, True, False])
self.assert_numpy_array_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
self.assert_numpy_array_equal(base <= idx, exp)
# different base freq
msg = "Input has different freq=A-DEC from PeriodIndex"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
base <= Period('2011', freq='A')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
Period('2011', freq='A') >= base
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='A')
base <= idx
# different mult
msg = "Input has different freq=4M from PeriodIndex"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
base <= Period('2011', freq='4M')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
Period('2011', freq='4M') >= base
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='4M')
base <= idx
def test_pi_nat_comp(self):
for freq in ['M', '2M', '3M']:
idx1 = PeriodIndex(
['2011-01', '2011-02', 'NaT', '2011-05'], freq=freq)
result = idx1 > Period('2011-02', freq=freq)
exp = np.array([False, False, False, True])
self.assert_numpy_array_equal(result, exp)
result = | Period('2011-02', freq=freq) | pandas.Period |
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog
import os
import sys
import traceback
from itertools import groupby
import pandas as pd
import numpy as np
sys.path.append("..")
from idw import idw
from cfm import cfm
from knncad import knn
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args):
super().__init__(*args)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Set up progress bar with cancel button
self.progressbar = QtWidgets.QProgressBar()
self.statusBar().showMessage("Ready")
self.statusBar().addPermanentWidget(self.progressbar)
self.progressbar.setGeometry(30, 40, 200, 20)
self.progressbar.setValue(0)
self.progressbar.hide()
self.cancelBtn = QtWidgets.QPushButton("Cancel")
self.cancelBtn.clicked.connect(self.cancel_pressed)
self.statusBar().addPermanentWidget(self.cancelBtn)
self.cancelBtn.hide()
self.cancelling = False
# Set up connections for IDW
self.stations = {}
self.files = []
self.ui.browseBtn.clicked.connect(self.idw_set_input_folder)
self.ui.outBrowseBtn.clicked.connect(self.idw_set_output_folder)
self.ui.resetBtn.clicked.connect(self.idw_reset_input)
self.ui.runBtn.clicked.connect(self.idw_run)
# Set up Connections for CFM
self.ui.observedFileBrowse.clicked.connect(self.cfm_get_obs_file)
self.ui.historicalFileBrowse.clicked.connect(self.cfm_get_his_file)
self.ui.futureFileBrowse.clicked.connect(self.cfm_get_fut_file)
self.ui.runButton.clicked.connect(self.cfm_run)
self.ui.outBrowse.clicked.connect(self.cfm_get_out_path)
self.ui.cfmResetBtn.clicked.connect(self.cfm_reset_input)
# Set up Connections for KNN
self.ui.knnBrowse.clicked.connect(self.knn_get_input_file)
self.ui.knnAddFile.clicked.connect(self.knn_add_file)
self.ui.knnRemoveFile.clicked.connect(self.knn_remove_file)
self.ui.knnOutputBrowse.clicked.connect(self.knn_set_output_folder)
self.ui.knnRun.clicked.connect(self.knn_run)
self.ui.knnResetInput.clicked.connect(self.knn_reset_input)
tableHHeader = self.ui.knnTableWidget.horizontalHeader()
tableHHeader.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)
tableHHeader.setVisible(True)
def cancel_pressed(self):
self.cancelling = True
self.statusBar().showMessage("Cancelling...")
def idw_set_input_folder(self):
path = QFileDialog.getExistingDirectory(None, "Select Directory")
self.ui.pathLineEdit.setText(path)
def idw_set_output_folder(self):
path = QFileDialog.getExistingDirectory(None, "Select Directory")
self.ui.outputLineEdit.setText(path)
def idw_reset_input(self):
self.files = []
self.stations = {}
self.ui.pathLineEdit.setText("")
self.ui.outputLineEdit.setText("")
self.ui.varEdit.setText("")
self.ui.alphaSpin.setValue(2)
self.ui.pointsSpin.setValue(4)
self.ui.extraLineEdit.setText("")
self.ui.startDateEdit.setDate(QtCore.QDate(1975, 1, 1))
self.ui.endDateEdit.setDate(QtCore.QDate(2005, 1, 1))
self.ui.northSpin.setValue(0)
self.ui.eastSpin.setValue(0)
self.ui.southSpin.setValue(0)
self.ui.westSpin.setValue(0)
self.ui.timeGroupBox.setChecked(False)
self.ui.spatialGroupBox.setChecked(False)
R = self.ui.stationTable.rowCount()
C = self.ui.stationTable.columnCount()
for r in range(R):
for c in range(C):
cell = self.ui.stationTable.item(r, c)
if cell:
cell.setText("")
def idw_get_stations(self):
stations = {}
R = self.ui.stationTable.rowCount()
for i in range(R):
name_cell = self.ui.stationTable.item(i, 0)
lat_cell = self.ui.stationTable.item(i, 1)
lon_cell = self.ui.stationTable.item(i, 2)
if not name_cell:
break
elif not name_cell.text():
break
name = name_cell.text()
try:
lat = float(lat_cell.text())
lon = float(lon_cell.text())
stations[name] = (lat, lon)
except ValueError as e:
QtWidgets.QMessageBox.warning(self, "Input Error", e)
return stations
def idw_get_input(self):
path = self.ui.pathLineEdit.text()
out = self.ui.outputLineEdit.text()
varname = self.ui.varEdit.text()
stations = self.idw_get_stations()
alpha = self.ui.alphaSpin.value()
points = self.ui.pointsSpin.value()
options = self.ui.extraLineEdit.text()
files = []
for fl in os.listdir(path):
if fl.endswith('.nc') and varname + '_' in fl:
files.append(fl)
if self.ui.spatialGroupBox.isChecked():
n = self.ui.northSpin.value()
e = self.ui.eastSpin.value()
s = self.ui.southSpin.value()
w = self.ui.westSpin.value()
extent = [n, e, s, w]
else:
extent = None
if self.ui.timeGroupBox.isChecked():
sDate = self.ui.startDateEdit.date().toPyDate()
eDate = self.ui.endDateEdit.date().toPyDate()
period = [(sDate.year, sDate.month, sDate.day),
(eDate.year, eDate.month, eDate.day)]
else:
period = None
kwargs = {}
if options:
for o in options.split(','):
k, v = o.split('=')
kwargs[k.strip()] = int(v)
return (path, out, varname, stations, alpha, points, kwargs, files,
extent, period)
def idw_run(self):
pars = self.idw_get_input()
(path, out, varname, stations, alpha, points, kwargs, files,
extent, period) = pars
def file_splitter(fname):
*first, _ = fname.split('_')
return first
self.statusBar().showMessage("Interpolating...")
self.progressbar.setRange(0, len(files))
self.progressbar.setValue(0)
self.progressbar.show()
self.cancelBtn.show()
try:
for k, group in groupby(files, key=file_splitter):
if self.cancelling:
self.cancelling = False
break
grouped_files = [os.path.join(path, g) for g in group]
df = idw.idw(grouped_files,
varname,
stations,
extent=extent,
period=period,
alpha=alpha,
k=points,
**kwargs)
df.to_csv(os.path.join(out, 'idw_' + '_'.join(k)) + '.csv')
new = self.progressbar.value() + len(grouped_files)
self.progressbar.setValue(new)
except Exception as e:
msg = traceback.format_exc(5)
QtWidgets.QMessageBox.critical(self, "Error", msg)
finally:
self.statusBar().showMessage("Ready")
self.progressbar.reset()
self.progressbar.hide()
self.cancelBtn.hide()
def cfm_reset_input(self):
self.ui.cfmVarNameEdit.setText("")
self.ui.observedFileEdit.setText("")
self.ui.historicalFileEdit.setText("")
self.ui.futureFileEdit.setText("")
self.ui.outLineEdit.setText("")
self.ui.scalingComboBox.setCurrentIndex(0)
self.ui.binsSpinBox.setValue(25)
def cfm_get_obs_file(self):
fl, _ = QFileDialog.getOpenFileName(self, "Select Observed File",
filter="CSV files (*.csv)")
self.ui.observedFileEdit.setText(fl)
def cfm_get_his_file(self):
fl, _ = QFileDialog.getOpenFileName(self,
"Select Historical GCM File",
filter="CSV files (*.csv)")
self.ui.historicalFileEdit.setText(fl)
def cfm_get_fut_file(self):
fl, _ = QFileDialog.getOpenFileName(self, "Select Future GCM File",
filter="CSV files (*.csv)")
self.ui.futureFileEdit.setText(fl)
def cfm_get_out_path(self):
fl = QFileDialog.getExistingDirectory(self, "Select Output Folder")
self.ui.outLineEdit.setText(fl)
def cfm_run(self):
self.statusBar().showMessage("Loading Files...")
self.progressbar.setRange(0, 4)
self.progressbar.show()
self.cancelBtn.show()
try:
varname = self.ui.cfmVarNameEdit.text()
obs_fl = self.ui.observedFileEdit.text()
obs = | pd.read_csv(obs_fl, index_col=[0, 1, 2], header=[0, 1]) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.